text stringlengths 4 1.02M | meta dict |
|---|---|
import sqlite3
class db():
'''
Maneja las conexiones con la base de datos.
'''
def __init__(self,db_path):
'''
Crea la conexión con la base de datos.
'''
self.cnx=sqlite3.connect(db_path)
self.cur=self.cnx.cursor()
def consultaSimp(self,sql):
'''
Consulta de datos simple
'''
self.cur.execute(sql)
return self.cur.fetchall()
def consultaDat(self,sql,datos):
'''
Consulta con varios parámetros.
'''
self.cur.execute(sql,datos)
return self.cur.fetchall()
def insertarDatos(self,sql,datos):
'''
Inserta datos en la base de datos.
'''
self.cur.executemany(sql,datos)
self.cnx.commit()
def cnxClose(self):
'''
Cierra la conexión con la base de datos.
'''
self.cnx.close() | {
"content_hash": "e2cae3e09967fb27cb142c09c81ff0e1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 44,
"avg_line_length": 18.4,
"alnum_prop": 0.6508152173913043,
"repo_name": "seertha/WSN_XBee",
"id": "467868d1e91478b97b8611e8c653056d02233891",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Software/RPI/Display_lcd/base_datos.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "12518"
},
{
"name": "Python",
"bytes": "46715"
}
],
"symlink_target": ""
} |
import sys
sys.path = ['../../'] + sys.path # Prepend
from codejam import CodeJam, parsers
from codejam.helpers import memoize
import unittest
from itertools import repeat
# Python 3 difference
try:
import io
io.StringIO('not unicode')
StringIO = io.StringIO
except:
from StringIO import StringIO
def pickleable_solve(line):
assert line == "1 2 3", 'Input to solve() not as expected: "%s"' % line
return 1
class TestCodeJam(unittest.TestCase):
input1 = '''2
1 2 3
1 2 3'''
input2 = '''2
1 2 3
4 5
1 2 3
4 5'''
irregular_input = '''2
3
1
2
3
4
1
2
3
4'''
def test_basic(self):
def solve(line):
assert line == "1 2 3", 'Input to solve() not as expected: "%s"' % line
return 1
inf = StringIO(self.input1)
outf = StringIO()
CodeJam(parsers.lines, solve).run(inf, outf, silent=True)
expout = '\n'.join(['Case #%d: 1'%i for i in [1,2]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_ints(self):
def solve(line1, line2):
assert line1 == [1,2,3], 'Input to solve() not as expected: "%s"' % str(line1)
assert line2 == [4,5], 'Input to solve() not as expected: "%s"' % str(line2)
return 2
inf = StringIO(self.input2)
outf = StringIO()
CodeJam(parsers.ints, solve).run(inf, outf, silent=True)
expout = '\n'.join(['Case #%d: 2'%i for i in [1,2]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_floats(self):
def solve(line1, line2):
assert line1 == [1.,2.,3.], 'Input to solve() not as expected: "%s"' % str(line1)
assert line2 == [4.,5.], 'Input to solve() not as expected: "%s"' % str(line2)
assert type(line1[0]) == float
return 3.5
inf = StringIO(self.input2)
outf = StringIO()
CodeJam(parsers.floats, solve).run(inf, outf, silent=True)
expout = '\n'.join(['Case #%d: 3.500000'%i for i in [1,2]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_words(self):
def solve(line):
assert line == ["1","2","3"], 'Input to solve() not as expected: "%s"' % line
return 4
inf = StringIO(self.input1)
outf = StringIO()
CodeJam(parsers.words, solve).run(inf, outf, silent=True)
expout = '\n'.join(['Case #%d: 4'%i for i in [1,2]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_bad_simple_input(self):
def solve(*args):
raise Exception('shouldn\'t get here')
inf = StringIO(self.irregular_input)
outf = StringIO()
try:
CodeJam(parsers.lines, solve).run(inf, outf, silent=True)
assert False, 'should have failed here'
except AssertionError as err:
assert str(err).startswith('The number of lines in')
def test_custom_parser(self):
def solve(a, b, c):
assert a == 1
assert b == 2
assert c == (1,2,3)
return "FIVE"
def parse(f):
for i in range(3):
yield (1,2,(1,2,3))
inf = StringIO()
outf = StringIO()
CodeJam(parse, solve).run(inf, outf, silent=True)
expout = '\n'.join(['Case #%d: FIVE'%i for i in [1,2,3]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_iter_parser(self):
def solve(n, l):
assert len(l) == n
return 6.1
@parsers.iter_parser
def parse(nxt):
n = int(nxt())
l = [int(nxt()) for unused in range(n)]
return (n, l)
inf = StringIO(self.irregular_input)
outf = StringIO()
CodeJam(parse, solve).run(inf, outf, silent=True)
expout = '\n'.join(['Case #%d: 6.100000'%i for i in [1,2]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_bad_iter_parser(self):
def solve(n, l):
assert len(l) == n
return 6.1
@parsers.iter_parser
def parse(nxt):
n = int(nxt())
nxt()
l = [int(nxt()) for unused in range(n)]
return (n, l)
inf = StringIO(self.irregular_input)
outf = StringIO()
try:
CodeJam(parse, solve).run(inf, outf, silent=True)
assert False, "Should have errored on parse!"
except:
pass
def test_multiproc(self):
inf = StringIO(self.input1)
outf = StringIO()
CodeJam(parsers.lines, pickleable_solve).run_multiproc(inf, outf, silent=True, workers=1)
expout = '\n'.join(['Case #%d: 1'%i for i in [1,2]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_floating_accuracy(self):
def solve(line1, line2):
return 3.128
inf = StringIO(self.input2)
outf = StringIO()
CodeJam(parsers.floats, solve, floating_accuracy=2).run(inf, outf, silent=True)
expout = '\n'.join(['Case #%d: 3.13'%i for i in [1,2]]) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
def test_include_case(self):
def solve(line1, line2):
return 3
inf = StringIO(self.input2)
outf = StringIO()
CodeJam(parsers.floats, solve, include_case=False).run(inf, outf, silent=True)
expout = '\n'.join(repeat('3',2)) + '\n'
assert outf.getvalue() == expout, "unexpected output: '%s' vs '%s'" % (outf.getvalue(), expout)
class TestCodeJamMain(unittest.TestCase):
def solve(self, *lines):
assert lines == [[1,2,3]]
return 1
def run_basic(self, inf, outf, debug=False, silent=False):
self.h = {'fn':'run','inf':inf,'outf':outf,'debug':debug,'silent':silent,'workers':None}
def run_multiproc(self, inf, outf, debug=False, silent=False, workers=4):
self.h = {'fn':'run_multiproc','inf':inf,'outf':outf,'debug':debug,'silent':silent,'workers':workers}
def setUp(self):
self.cj = CodeJam(parsers.ints, self.solve)
self.cj.run = self.run_basic
self.cj.run_multiproc = self.run_multiproc
self.h = {s:None for s in 'fn inf outf debug silent workers'.split()}
inf = open('test.in','w')
inf.write('2\n1 2 3\n1 2 3')
inf.close()
def tearDown(self):
pass
def test_basic(self):
argv = ['test.in']
self.cj.main(argv)
assert self.h['fn'] == 'run'
assert self.h['inf'].name == 'test.in'
assert self.h['inf'].read() == '''2
1 2 3
1 2 3'''
assert self.h['outf'].name == 'test.out'
assert self.h['silent'] == False
assert self.h['debug'] == False
def test_debug_silent(self):
argv = ['test.in','-d','-q']
self.cj.main(argv)
assert self.h['fn'] == 'run'
assert self.h['silent'] == True
assert self.h['debug'] == True
def test_outpuf_file(self):
argv = ['test.in','-o','another.out']
self.cj.main(argv)
assert self.h['fn'] == 'run'
assert self.h['outf'].name == 'another.out'
def test_multiproc(self):
argv = ['test.in','-m','-w','3']
self.cj.main(argv)
assert self.h['fn'] == 'run_multiproc'
assert self.h['workers'] == 3
from time import sleep, time
class TestHelpers(unittest.TestCase):
def test_memoize(self):
@memoize
def fn(x):
sleep(.2)
return 1
assert fn(1) == 1
start = time()
assert fn(1) == 1
elapsed = time() - start
assert elapsed < 100 # ms
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "07597fe0fe8094bad09fd9e75e69df92",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 105,
"avg_line_length": 27.730337078651687,
"alnum_prop": 0.5923824959481362,
"repo_name": "yanatan16/pycodejam",
"id": "f4eae69d18c754d6e0f939cc80f4a5ab32792e78",
"size": "7405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/codejam/tests/codejam_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16435"
},
{
"name": "JavaScript",
"bytes": "52869"
},
{
"name": "Python",
"bytes": "26956"
}
],
"symlink_target": ""
} |
from base64 import decodestring
import os
import re
# System libary imports.
from IPython.external.qt import QtCore, QtGui
# Local imports
from IPython.utils.traitlets import Bool
from IPython.qt.svg import save_svg, svg_to_clipboard, svg_to_image
from ipython_widget import IPythonWidget
class RichIPythonWidget(IPythonWidget):
""" An IPythonWidget that supports rich text, including lists, images, and
tables. Note that raw performance will be reduced compared to the plain
text version.
"""
# RichIPythonWidget protected class variables.
_payload_source_plot = 'IPython.kernel.zmq.pylab.backend_payload.add_plot_payload'
_jpg_supported = Bool(False)
# Used to determine whether a given html export attempt has already
# displayed a warning about being unable to convert a png to svg.
_svg_warning_displayed = False
#---------------------------------------------------------------------------
# 'object' interface
#---------------------------------------------------------------------------
def __init__(self, *args, **kw):
""" Create a RichIPythonWidget.
"""
kw['kind'] = 'rich'
super(RichIPythonWidget, self).__init__(*args, **kw)
# Configure the ConsoleWidget HTML exporter for our formats.
self._html_exporter.image_tag = self._get_image_tag
# Dictionary for resolving document resource names to SVG data.
self._name_to_svg_map = {}
# Do we support jpg ?
# it seems that sometime jpg support is a plugin of QT, so try to assume
# it is not always supported.
_supported_format = map(str, QtGui.QImageReader.supportedImageFormats())
self._jpg_supported = 'jpeg' in _supported_format
#---------------------------------------------------------------------------
# 'ConsoleWidget' public interface overides
#---------------------------------------------------------------------------
def export_html(self):
""" Shows a dialog to export HTML/XML in various formats.
Overridden in order to reset the _svg_warning_displayed flag prior
to the export running.
"""
self._svg_warning_displayed = False
super(RichIPythonWidget, self).export_html()
#---------------------------------------------------------------------------
# 'ConsoleWidget' protected interface
#---------------------------------------------------------------------------
def _context_menu_make(self, pos):
""" Reimplemented to return a custom context menu for images.
"""
format = self._control.cursorForPosition(pos).charFormat()
name = format.stringProperty(QtGui.QTextFormat.ImageName)
if name:
menu = QtGui.QMenu()
menu.addAction('Copy Image', lambda: self._copy_image(name))
menu.addAction('Save Image As...', lambda: self._save_image(name))
menu.addSeparator()
svg = self._name_to_svg_map.get(name, None)
if svg is not None:
menu.addSeparator()
menu.addAction('Copy SVG', lambda: svg_to_clipboard(svg))
menu.addAction('Save SVG As...',
lambda: save_svg(svg, self._control))
else:
menu = super(RichIPythonWidget, self)._context_menu_make(pos)
return menu
#---------------------------------------------------------------------------
# 'BaseFrontendMixin' abstract interface
#---------------------------------------------------------------------------
def _pre_image_append(self, msg, prompt_number):
""" Append the Out[] prompt and make the output nicer
Shared code for some the following if statement
"""
self.log.debug("pyout: %s", msg.get('content', ''))
self._append_plain_text(self.output_sep, True)
self._append_html(self._make_out_prompt(prompt_number), True)
self._append_plain_text('\n', True)
def _handle_pyout(self, msg):
""" Overridden to handle rich data types, like SVG.
"""
if not self._hidden and self._is_from_this_session(msg):
content = msg['content']
prompt_number = content.get('execution_count', 0)
data = content['data']
metadata = msg['content']['metadata']
if 'image/svg+xml' in data:
self._pre_image_append(msg, prompt_number)
self._append_svg(data['image/svg+xml'], True)
self._append_html(self.output_sep2, True)
elif 'image/png' in data:
self._pre_image_append(msg, prompt_number)
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
self._append_html(self.output_sep2, True)
elif 'image/jpeg' in data and self._jpg_supported:
self._pre_image_append(msg, prompt_number)
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
self._append_html(self.output_sep2, True)
else:
# Default back to the plain text representation.
return super(RichIPythonWidget, self)._handle_pyout(msg)
def _handle_display_data(self, msg):
""" Overridden to handle rich data types, like SVG.
"""
if not self._hidden and self._is_from_this_session(msg):
source = msg['content']['source']
data = msg['content']['data']
metadata = msg['content']['metadata']
# Try to use the svg or html representations.
# FIXME: Is this the right ordering of things to try?
if 'image/svg+xml' in data:
self.log.debug("display: %s", msg.get('content', ''))
svg = data['image/svg+xml']
self._append_svg(svg, True)
elif 'image/png' in data:
self.log.debug("display: %s", msg.get('content', ''))
# PNG data is base64 encoded as it passes over the network
# in a JSON structure so we decode it.
png = decodestring(data['image/png'].encode('ascii'))
self._append_png(png, True, metadata=metadata.get('image/png', None))
elif 'image/jpeg' in data and self._jpg_supported:
self.log.debug("display: %s", msg.get('content', ''))
jpg = decodestring(data['image/jpeg'].encode('ascii'))
self._append_jpg(jpg, True, metadata=metadata.get('image/jpeg', None))
else:
# Default back to the plain text representation.
return super(RichIPythonWidget, self)._handle_display_data(msg)
#---------------------------------------------------------------------------
# 'RichIPythonWidget' protected interface
#---------------------------------------------------------------------------
def _append_jpg(self, jpg, before_prompt=False, metadata=None):
""" Append raw JPG data to the widget."""
self._append_custom(self._insert_jpg, jpg, before_prompt, metadata=metadata)
def _append_png(self, png, before_prompt=False, metadata=None):
""" Append raw PNG data to the widget.
"""
self._append_custom(self._insert_png, png, before_prompt, metadata=metadata)
def _append_svg(self, svg, before_prompt=False):
""" Append raw SVG data to the widget.
"""
self._append_custom(self._insert_svg, svg, before_prompt)
def _add_image(self, image):
""" Adds the specified QImage to the document and returns a
QTextImageFormat that references it.
"""
document = self._control.document()
name = str(image.cacheKey())
document.addResource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name), image)
format = QtGui.QTextImageFormat()
format.setName(name)
return format
def _copy_image(self, name):
""" Copies the ImageResource with 'name' to the clipboard.
"""
image = self._get_image(name)
QtGui.QApplication.clipboard().setImage(image)
def _get_image(self, name):
""" Returns the QImage stored as the ImageResource with 'name'.
"""
document = self._control.document()
image = document.resource(QtGui.QTextDocument.ImageResource,
QtCore.QUrl(name))
return image
def _get_image_tag(self, match, path = None, format = "png"):
""" Return (X)HTML mark-up for the image-tag given by match.
Parameters
----------
match : re.SRE_Match
A match to an HTML image tag as exported by Qt, with
match.group("Name") containing the matched image ID.
path : string|None, optional [default None]
If not None, specifies a path to which supporting files may be
written (e.g., for linked images). If None, all images are to be
included inline.
format : "png"|"svg"|"jpg", optional [default "png"]
Format for returned or referenced images.
"""
if format in ("png","jpg"):
try:
image = self._get_image(match.group("name"))
except KeyError:
return "<b>Couldn't find image %s</b>" % match.group("name")
if path is not None:
if not os.path.exists(path):
os.mkdir(path)
relpath = os.path.basename(path)
if image.save("%s/qt_img%s.%s" % (path, match.group("name"), format),
"PNG"):
return '<img src="%s/qt_img%s.%s">' % (relpath,
match.group("name"),format)
else:
return "<b>Couldn't save image!</b>"
else:
ba = QtCore.QByteArray()
buffer_ = QtCore.QBuffer(ba)
buffer_.open(QtCore.QIODevice.WriteOnly)
image.save(buffer_, format.upper())
buffer_.close()
return '<img src="data:image/%s;base64,\n%s\n" />' % (
format,re.sub(r'(.{60})',r'\1\n',str(ba.toBase64())))
elif format == "svg":
try:
svg = str(self._name_to_svg_map[match.group("name")])
except KeyError:
if not self._svg_warning_displayed:
QtGui.QMessageBox.warning(self, 'Error converting PNG to SVG.',
'Cannot convert PNG images to SVG, export with PNG figures instead. '
'If you want to export matplotlib figures as SVG, add '
'to your ipython config:\n\n'
'\tc.InlineBackend.figure_format = \'svg\'\n\n'
'And regenerate the figures.',
QtGui.QMessageBox.Ok)
self._svg_warning_displayed = True
return ("<b>Cannot convert PNG images to SVG.</b> "
"You must export this session with PNG images. "
"If you want to export matplotlib figures as SVG, add to your config "
"<span>c.InlineBackend.figure_format = 'svg'</span> "
"and regenerate the figures.")
# Not currently checking path, because it's tricky to find a
# cross-browser way to embed external SVG images (e.g., via
# object or embed tags).
# Chop stand-alone header from matplotlib SVG
offset = svg.find("<svg")
assert(offset > -1)
return svg[offset:]
else:
return '<b>Unrecognized image format</b>'
def _insert_jpg(self, cursor, jpg, metadata=None):
""" Insert raw PNG data into the widget."""
self._insert_img(cursor, jpg, 'jpg', metadata=metadata)
def _insert_png(self, cursor, png, metadata=None):
""" Insert raw PNG data into the widget.
"""
self._insert_img(cursor, png, 'png', metadata=metadata)
def _insert_img(self, cursor, img, fmt, metadata=None):
""" insert a raw image, jpg or png """
if metadata:
width = metadata.get('width', None)
height = metadata.get('height', None)
else:
width = height = None
try:
image = QtGui.QImage()
image.loadFromData(img, fmt.upper())
if width and height:
image = image.scaled(width, height, transformMode=QtCore.Qt.SmoothTransformation)
elif width and not height:
image = image.scaledToWidth(width, transformMode=QtCore.Qt.SmoothTransformation)
elif height and not width:
image = image.scaledToHeight(height, transformMode=QtCore.Qt.SmoothTransformation)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid %s data.'%fmt)
else:
format = self._add_image(image)
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _insert_svg(self, cursor, svg):
""" Insert raw SVG data into the widet.
"""
try:
image = svg_to_image(svg)
except ValueError:
self._insert_plain_text(cursor, 'Received invalid SVG data.')
else:
format = self._add_image(image)
self._name_to_svg_map[format.name()] = svg
cursor.insertBlock()
cursor.insertImage(format)
cursor.insertBlock()
def _save_image(self, name, format='PNG'):
""" Shows a save dialog for the ImageResource with 'name'.
"""
dialog = QtGui.QFileDialog(self._control, 'Save Image')
dialog.setAcceptMode(QtGui.QFileDialog.AcceptSave)
dialog.setDefaultSuffix(format.lower())
dialog.setNameFilter('%s file (*.%s)' % (format, format.lower()))
if dialog.exec_():
filename = dialog.selectedFiles()[0]
image = self._get_image(name)
image.save(filename, format)
| {
"content_hash": "a9522acf1d33aab4e957ea7ad100c0ad",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 98,
"avg_line_length": 43.50301204819277,
"alnum_prop": 0.5363844076715364,
"repo_name": "noslenfa/tdjangorest",
"id": "302c6c6911335531a699ebb042eed9d7fac7e66b",
"size": "14819",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "uw/lib/python2.7/site-packages/IPython/qt/console/rich_ipython_widget.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189930"
},
{
"name": "Groff",
"bytes": "7138"
},
{
"name": "HTML",
"bytes": "279754"
},
{
"name": "JavaScript",
"bytes": "1017625"
},
{
"name": "Makefile",
"bytes": "7062"
},
{
"name": "Python",
"bytes": "11886731"
},
{
"name": "Shell",
"bytes": "3741"
},
{
"name": "Smarty",
"bytes": "20972"
}
],
"symlink_target": ""
} |
from django.utils.encoding import force_text
from unittest import mock
from olympia import amo
from olympia.addons.models import Addon, AddonReviewerFlags
from olympia.amo.tests import (
TestCase, addon_factory, file_factory, version_factory)
from olympia.reviewers.forms import ReviewForm
from olympia.reviewers.models import CannedResponse
from olympia.reviewers.utils import ReviewHelper
from olympia.users.models import UserProfile
class TestReviewForm(TestCase):
fixtures = ('base/users', 'base/addon_3615')
def setUp(self):
super(TestReviewForm, self).setUp()
self.addon = Addon.objects.get(pk=3615)
self.version = self.addon.versions.all()[0]
class FakeRequest:
user = UserProfile.objects.get(pk=10482)
self.request = FakeRequest()
self.file = self.version.files.all()[0]
def get_form(self, data=None):
return ReviewForm(
data=data,
helper=ReviewHelper(request=self.request, addon=self.addon,
version=self.version))
def set_statuses_and_get_actions(self, addon_status, file_status):
self.file.update(status=file_status)
self.addon.update(status=addon_status)
# Need to clear self.version.all_files cache since we updated the file.
del self.version.all_files
form = self.get_form()
return form.helper.get_actions(self.request)
def test_actions_reject(self):
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW)['reject']['details']
assert force_text(actions).startswith('This will reject this version')
def test_actions_addon_status_null(self):
# If the add-on is null we only show info, comment and super review.
assert len(self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NULL, file_status=amo.STATUS_NULL)) == 3
def test_actions_addon_status_deleted(self):
# If the add-on is deleted we only show info, comment and super review.
assert len(self.set_statuses_and_get_actions(
addon_status=amo.STATUS_DELETED, file_status=amo.STATUS_NULL)) == 3
def test_actions_no_pending_files(self):
# If the add-on has no pending files we only show info, comment and
# super review.
assert len(self.set_statuses_and_get_actions(
addon_status=amo.STATUS_APPROVED,
file_status=amo.STATUS_APPROVED)) == 3
assert len(self.set_statuses_and_get_actions(
addon_status=amo.STATUS_DISABLED,
file_status=amo.STATUS_DISABLED)) == 3
@mock.patch('olympia.access.acl.action_allowed')
def test_actions_admin_flagged_addon_actions(self, action_allowed_mock):
AddonReviewerFlags.objects.create(
addon=self.addon, needs_admin_code_review=True)
# Test with an admin reviewer.
action_allowed_mock.return_value = True
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW)
assert 'public' in actions.keys()
# Test with an non-admin reviewer.
action_allowed_mock.return_value = False
actions = self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW)
assert 'public' not in actions.keys()
def test_canned_responses(self):
self.cr_addon = CannedResponse.objects.create(
name=u'addon reason', response=u'addon reason body',
sort_group=u'public', type=amo.CANNED_RESPONSE_TYPE_ADDON)
self.cr_theme = CannedResponse.objects.create(
name=u'theme reason', response=u'theme reason body',
sort_group=u'public', type=amo.CANNED_RESPONSE_TYPE_THEME)
self.set_statuses_and_get_actions(
addon_status=amo.STATUS_NOMINATED,
file_status=amo.STATUS_AWAITING_REVIEW)
form = self.get_form()
choices = form.fields['canned_response'].choices[1][1]
# choices is grouped by the sort_group, where choices[0] is the
# default "Choose a response..." option.
# Within that, it's paired by [group, [[response, name],...]].
# So above, choices[1][1] gets the first real group's list of
# responses.
assert len(choices) == 1 # No theme response
assert self.cr_addon.response in choices[0]
# Check we get different canned responses for static themes.
self.addon.update(type=amo.ADDON_STATICTHEME)
form = self.get_form()
choices = form.fields['canned_response'].choices[1][1]
assert self.cr_theme.response in choices[0]
assert len(choices) == 1 # No addon response
def test_comments_and_action_required_by_default(self):
form = self.get_form()
assert not form.is_bound
form = self.get_form(data={})
assert form.is_bound
assert not form.is_valid()
assert form.errors == {
'action': [u'This field is required.'],
'comments': [u'This field is required.']
}
# Alter the action to make it not require comments to be sent
# regardless of what the action actually is, what we want to test is
# the form behaviour.
form = self.get_form(data={'action': 'reply'})
form.helper.actions['reply']['comments'] = False
assert form.is_bound
assert form.is_valid()
assert not form.errors
def test_versions_queryset(self):
addon_factory()
file_factory(version=self.addon.current_version)
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == []
# With post-review permission, the reject_multiple_versions action will
# be available, resetting the queryset of allowed choices.
self.grant_permission(self.request.user, 'Addons:PostReview')
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == [
self.addon.current_version]
def test_versions_queryset_contains_pending_version(self):
addon_factory()
version_factory(addon=self.addon, channel=amo.RELEASE_CHANNEL_LISTED,
file_kw={'status': amo.STATUS_AWAITING_REVIEW})
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == []
# With post-review permission, the reject_multiple_versions action will
# be available, resetting the queryset of allowed choices.
self.grant_permission(self.request.user, 'Addons:PostReview')
form = self.get_form()
assert not form.is_bound
assert form.fields['versions'].required is False
assert list(form.fields['versions'].queryset) == list(
self.addon.versions.all().order_by('pk'))
def test_versions_required(self):
self.grant_permission(self.request.user, 'Addons:PostReview')
form = self.get_form(data={
'action': 'reject_multiple_versions', 'comments': 'lol'})
form.helper.actions['reject_multiple_versions']['versions'] = True
assert form.is_bound
assert not form.is_valid()
assert form.errors == {
'versions': [u'This field is required.']
}
| {
"content_hash": "eb66ca21c42386c49c596bc2ff7a2130",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 79,
"avg_line_length": 43.60112359550562,
"alnum_prop": 0.6446334235278959,
"repo_name": "psiinon/addons-server",
"id": "9b636353660291a0e13152b474055c518960c34e",
"size": "7761",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/olympia/reviewers/tests/test_forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "752741"
},
{
"name": "Dockerfile",
"bytes": "4089"
},
{
"name": "HTML",
"bytes": "314894"
},
{
"name": "JavaScript",
"bytes": "947557"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "5192809"
},
{
"name": "Shell",
"bytes": "6712"
},
{
"name": "Smarty",
"bytes": "1418"
},
{
"name": "TSQL",
"bytes": "6926"
}
],
"symlink_target": ""
} |
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("checkout", "0038_merge_20210903_1048"),
("product", "0153_merge_20211006_0910"),
("warehouse", "0018_auto_20210323_2116"),
]
operations = [
migrations.CreateModel(
name="PreorderReservation",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("quantity_reserved", models.PositiveIntegerField(default=0)),
("reserved_until", models.DateTimeField()),
(
"checkout_line",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="preorder_reservations",
to="checkout.checkoutline",
),
),
(
"product_variant_channel_listing",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="preorder_reservations",
to="product.productvariantchannellisting",
),
),
],
options={
"ordering": ("pk",),
},
),
migrations.AddIndex(
model_name="preorderreservation",
index=models.Index(
fields=["checkout_line", "reserved_until"],
name="warehouse_p_checkou_3abf41_idx",
),
),
migrations.AlterUniqueTogether(
name="preorderreservation",
unique_together={("checkout_line", "product_variant_channel_listing")},
),
]
| {
"content_hash": "eb04f489909f750b2f181f022c6ee67d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 83,
"avg_line_length": 33.95,
"alnum_prop": 0.4521354933726068,
"repo_name": "mociepka/saleor",
"id": "89fe2d54d5ddd02defc8cc210e22ac092f9f58e4",
"size": "2086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/warehouse/migrations/0019_auto_20211019_1438.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
} |
import ddt
from neutronclient.common import exceptions
from oslo_serialization import jsonutils
from kuryr import app
from kuryr.tests.unit import base
from kuryr import utils
@ddt.ddt
class TestIpamRequestPoolFailures(base.TestKuryrFailures):
"""Unit tests for testing request pool failures.
This test covers error responses listed in the spec:
http://developer.openstack.org/api-ref-networking-v2-ext.html#createSubnetPool
http://developer.openstack.org/api-ref-networking-v2-ext.html#listSubnetPools
"""
def _invoke_create_request(self, pool):
fake_request = {
'AddressSpace': '',
'Pool': pool,
'SubPool': '', # In the case --ip-range is not given
'Options': {},
'V6': False
}
response = self.app.post('/IpamDriver.RequestPool',
content_type='application/json',
data=jsonutils.dumps(fake_request))
return response
@ddt.data(exceptions.Unauthorized, exceptions.Forbidden,
exceptions.NotFound)
def test_request_pool_create_failures(self, GivenException):
pool_name = utils.get_neutron_subnetpool_name("10.0.0.0/16")
new_subnetpool = {
'name': pool_name,
'default_prefixlen': 16,
'prefixes': ['10.0.0.0/16']}
self.mox.StubOutWithMock(app.neutron, 'list_subnetpools')
fake_name = pool_name
app.neutron.list_subnetpools(name=fake_name).AndReturn(
{'subnetpools': []})
self.mox.StubOutWithMock(app.neutron, 'create_subnetpool')
app.neutron.create_subnetpool(
{'subnetpool': new_subnetpool}).AndRaise(GivenException)
self.mox.ReplayAll()
pool = '10.0.0.0/16'
response = self._invoke_create_request(pool)
self.assertEqual(GivenException.status_code, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
self.assertEqual(
{'Err': GivenException.message}, decoded_json)
def test_request_pool_bad_request_failure(self):
pool = 'pool-should-be-cidr'
response = self._invoke_create_request(pool)
self.assertEqual(400, response.status_code)
decoded_json = jsonutils.loads(response.data)
self.assertIn('Err', decoded_json)
self.assertIn(pool, decoded_json['Err'])
self.assertIn('Pool', decoded_json['Err'])
def test_request_pool_list_subnetpool_failure(self):
self.mox.StubOutWithMock(app.neutron, 'list_subnetpools')
pool_name = utils.get_neutron_subnetpool_name("10.0.0.0/16")
fake_name = pool_name
ex = exceptions.Unauthorized
app.neutron.list_subnetpools(name=fake_name).AndRaise(ex)
self.mox.ReplayAll()
pool = '10.0.0.0/16'
response = self._invoke_create_request(pool)
self.assertEqual(ex.status_code, response.status_code)
| {
"content_hash": "91e29f9694b8a52284f2bb28693ee495",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 86,
"avg_line_length": 36.792682926829265,
"alnum_prop": 0.6324163075903215,
"repo_name": "midonet/kuryr",
"id": "053416852725193abdd5792cbfbaa2eb649ab0aa",
"size": "3563",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kuryr/tests/unit/test_ipam_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "184823"
},
{
"name": "Ruby",
"bytes": "1438"
},
{
"name": "Shell",
"bytes": "16265"
}
],
"symlink_target": ""
} |
from sys import argv
script, first, second, third = argv
# script is the name of the file, its value is a priori
print "The script is called:", script
# you must run this code in bash with three space seperated variables in order to succeed
print "Your first variable is:", first
print "Your second variable is:", second
print "You third variable is:", third | {
"content_hash": "de160de42be285c9c3cc3795027ce680",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 27.923076923076923,
"alnum_prop": 0.7493112947658402,
"repo_name": "FlippantSol/CIS-121",
"id": "56ee6368f8d18f6117709b51bd41b4e7ac63f283",
"size": "363",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lpthw/ex13.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19888"
}
],
"symlink_target": ""
} |
from ghidra.app.util.datatype import DataTypeSelectionDialog
from ghidra.framework.plugintool import PluginTool
from ghidra.program.model.data import DataType
from ghidra.program.model.data import DataTypeManager
from ghidra.util.data.DataTypeParser import AllowedDataTypes
tool = state.getTool()
dtm = currentProgram.getDataTypeManager()
selectionDialog = DataTypeSelectionDialog(tool, dtm, -1, AllowedDataTypes.FIXED_LENGTH)
tool.showDialog(selectionDialog)
dataType = selectionDialog.getUserChosenDataType()
if dataType is not None:
print "Chosen data type: " + str(dataType)
| {
"content_hash": "60154ad16a31f54ad944de6ec034c383",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 87,
"avg_line_length": 41.857142857142854,
"alnum_prop": 0.8293515358361775,
"repo_name": "NationalSecurityAgency/ghidra",
"id": "be36f8fdf996775e8e730d5a3113ea296e12cf61",
"size": "1399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Ghidra/Features/Python/ghidra_scripts/ChooseDataTypeScriptPy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "77536"
},
{
"name": "Batchfile",
"bytes": "21610"
},
{
"name": "C",
"bytes": "1132868"
},
{
"name": "C++",
"bytes": "7334484"
},
{
"name": "CSS",
"bytes": "75788"
},
{
"name": "GAP",
"bytes": "102771"
},
{
"name": "GDB",
"bytes": "3094"
},
{
"name": "HTML",
"bytes": "4121163"
},
{
"name": "Hack",
"bytes": "31483"
},
{
"name": "Haskell",
"bytes": "453"
},
{
"name": "Java",
"bytes": "88669329"
},
{
"name": "JavaScript",
"bytes": "1109"
},
{
"name": "Lex",
"bytes": "22193"
},
{
"name": "Makefile",
"bytes": "15883"
},
{
"name": "Objective-C",
"bytes": "23937"
},
{
"name": "Pawn",
"bytes": "82"
},
{
"name": "Python",
"bytes": "587415"
},
{
"name": "Shell",
"bytes": "234945"
},
{
"name": "TeX",
"bytes": "54049"
},
{
"name": "XSLT",
"bytes": "15056"
},
{
"name": "Xtend",
"bytes": "115955"
},
{
"name": "Yacc",
"bytes": "127754"
}
],
"symlink_target": ""
} |
import logging; logger = logging.getLogger("morse." + __name__)
import morse.core.robot
class SubmarineClass(morse.core.robot.MorseRobotClass):
""" Class definition for the Submarine.
Sub class of Morse_Object. """
def __init__(self, obj, parent=None):
""" Constructor method.
Receives the reference to the Blender object.
Optionally it gets the name of the object's parent,
but that information is not currently used for a robot. """
# Call the constructor of the parent class
logger.info('%s initialization' % obj.name)
super(self.__class__,self).__init__(obj, parent)
logger.info('Component initialized')
def default_action(self):
""" Main function of this component. """
pass
| {
"content_hash": "ba211b288d67ac8f1cc5f11588f4b87b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 36.22727272727273,
"alnum_prop": 0.6348808030112923,
"repo_name": "Arkapravo/morse-0.6",
"id": "5baba4a2d9528e9bf4428114aa5d8e4b28badc88",
"size": "797",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/morse/robots/submarine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "46148"
},
{
"name": "C++",
"bytes": "30878"
},
{
"name": "Perl",
"bytes": "1705"
},
{
"name": "Python",
"bytes": "1117700"
},
{
"name": "Shell",
"bytes": "684"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import shutil
from collections import defaultdict
from pathspec import PathSpec
from pathspec.gitignore import GitIgnorePattern
from twitter.common.collections.orderedset import OrderedSet
from pants.backend.jvm.subsystems.scala_platform import ScalaPlatform
from pants.backend.jvm.targets.annotation_processor import AnnotationProcessor
from pants.backend.jvm.targets.scala_library import ScalaLibrary
from pants.backend.jvm.tasks.classpath_products import ClasspathProducts
from pants.backend.jvm.tasks.ivy_task_mixin import IvyTaskMixin
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.build_environment import get_buildroot
from pants.base.build_file import BuildFile
from pants.base.exceptions import TaskError
from pants.binaries import binary_util
from pants.build_graph.address import BuildFileAddress
from pants.build_graph.resources import Resources
from pants.util.dirutil import safe_mkdir, safe_walk
logger = logging.getLogger(__name__)
# We use custom checks for scala and java targets here for 2 reasons:
# 1.) jvm_binary could have either a scala or java source file attached so we can't do a pure
# target type test
# 2.) the target may be under development in which case it may not have sources yet - its pretty
# common to write a BUILD and ./pants idea the target inside to start development at which
# point there are no source files yet - and the developer intents to add them using the ide.
def is_scala(target):
return target.has_sources('.scala') or target.is_scala
def is_java(target):
return target.has_sources('.java') or target.is_java
class IdeGen(IvyTaskMixin, NailgunTask):
@classmethod
def subsystem_dependencies(cls):
return super(IdeGen, cls).subsystem_dependencies() + (ScalaPlatform, )
@classmethod
def register_options(cls, register):
super(IdeGen, cls).register_options(register)
register('--project-name', default='project',
help='Specifies the name to use for the generated project.')
register('--project-dir',
help='Specifies the directory to output the generated project files to.')
register('--project-cwd',
help='Specifies the directory the generated project should use as the cwd for '
'processes it launches. Note that specifying this trumps --{0}-project-dir '
'and not all project related files will be stored there.'
.format(cls.options_scope))
register('--intransitive', type=bool,
help='Limits the sources included in the generated project to just '
'those owned by the targets specified on the command line.')
register('--python', type=bool,
help='Adds python support to the generated project configuration.')
register('--java', type=bool, default=True,
help='Includes java sources in the project; otherwise compiles them and adds them '
'to the project classpath.')
register('--java-language-level', type=int, default=7,
help='Sets the java language and jdk used to compile the project\'s java sources.')
register('--java-jdk-name', default=None,
help='Sets the jdk used to compile the project\'s java sources. If unset the default '
'jdk name for the --java-language-level is used')
register('--scala', type=bool, default=True,
help='Includes scala sources in the project; otherwise compiles them and adds them '
'to the project classpath.')
register('--use-source-root', type=bool,
help='Use source roots to collapse sourcepaths in project and determine '
'which paths are used for tests. This is usually what you want if your repo '
' uses a maven style directory layout.')
register('--debug_port', type=int, default=5005,
help='Port to use for launching tasks under the debugger.')
register('--source-jars', type=bool, default=True,
help='Pull source jars from external dependencies into the project.')
register('--javadoc-jars', type=bool, default=True,
help='Pull javadoc jars from external dependencies into the project')
# Options intended to be configured primarily in pants.ini
register('--python_source_paths', type=list, advanced=True,
help='Always add these paths to the IDE as Python sources.')
register('--python_test_paths', type=list, advanced=True,
help='Always add these paths to the IDE as Python test sources.')
register('--python_lib_paths', type=list, advanced=True,
help='Always add these paths to the IDE for Python libraries.')
register('--extra-jvm-source-paths', type=list, advanced=True,
help='Always add these paths to the IDE for Java sources.')
register('--extra-jvm-test-paths', type=list, advanced=True,
help='Always add these paths to the IDE for Java test sources.')
@classmethod
def prepare(cls, options, round_manager):
super(IdeGen, cls).prepare(options, round_manager)
if options.python:
round_manager.require('python')
if options.java:
round_manager.require('java')
if options.scala:
round_manager.require('scala')
class Error(TaskError):
"""IdeGen Error."""
class TargetUtil(object):
def __init__(self, context):
self.context = context
@property
def build_graph(self):
return self.context.build_graph
def get_all_addresses(self, buildfile):
return set(self.context.address_mapper.addresses_in_spec_path(buildfile.spec_path))
def get(self, address):
self.context.build_graph.inject_address_closure(address)
return self.context.build_graph.get_target(address)
def __init__(self, *args, **kwargs):
super(IdeGen, self).__init__(*args, **kwargs)
self.project_name = self.get_options().project_name
self.python = self.get_options().python
self.skip_java = not self.get_options().java
self.skip_scala = not self.get_options().scala
self.use_source_root = self.get_options().use_source_root
self.java_language_level = self.get_options().java_language_level
if self.get_options().java_jdk_name:
self.java_jdk = self.get_options().java_jdk_name
else:
self.java_jdk = '1.{}'.format(self.java_language_level)
# Always tack on the project name to the work dir so each project gets its own linked jars,
# etc. See https://github.com/pantsbuild/pants/issues/564
if self.get_options().project_dir:
self.gen_project_workdir = os.path.abspath(
os.path.join(self.get_options().project_dir, self.project_name))
else:
self.gen_project_workdir = os.path.abspath(
os.path.join(self.workdir, self.__class__.__name__, self.project_name))
self.cwd = (
os.path.abspath(self.get_options().project_cwd) if
self.get_options().project_cwd else self.gen_project_workdir
)
self.intransitive = self.get_options().intransitive
self.debug_port = self.get_options().debug_port
def resolve_jars(self, targets):
executor = self.create_java_executor()
confs = ['default']
if self.get_options().source_jars:
confs.append('sources')
if self.get_options().javadoc_jars:
confs.append('javadoc')
compile_classpath = ClasspathProducts(self.get_options().pants_workdir)
self.resolve(executor=executor,
targets=targets,
classpath_products=compile_classpath,
confs=confs)
return compile_classpath
def _prepare_project(self):
targets, self._project = self.configure_project(
self.context.targets(),
self.debug_port)
self.configure_compile_context(targets)
def configure_project(self, targets, debug_port):
jvm_targets = [t for t in targets if t.has_label('jvm') or t.has_label('java') or
isinstance(t, Resources)]
if self.intransitive:
jvm_targets = set(self.context.target_roots).intersection(jvm_targets)
build_ignore_patterns = self.context.options.for_global_scope().ignore_patterns or []
project = Project(self.project_name,
self.python,
self.skip_java,
self.skip_scala,
self.use_source_root,
get_buildroot(),
debug_port,
self.context,
jvm_targets,
not self.intransitive,
self.TargetUtil(self.context),
PathSpec.from_lines(GitIgnorePattern, build_ignore_patterns))
if self.python:
python_source_paths = self.get_options().python_source_paths
python_test_paths = self.get_options().python_test_paths
python_lib_paths = self.get_options().python_lib_paths
project.configure_python(python_source_paths, python_test_paths, python_lib_paths)
extra_source_paths = self.get_options().extra_jvm_source_paths
extra_test_paths = self.get_options().extra_jvm_test_paths
all_targets = project.configure_jvm(extra_source_paths, extra_test_paths)
return all_targets, project
def configure_compile_context(self, targets):
"""
Trims the context's target set to just those targets needed as jars on the IDE classpath.
All other targets only contribute their external jar dependencies and excludes to the
classpath definition.
"""
def is_cp(target):
return (
target.is_codegen or
# Some IDEs need annotation processors pre-compiled, others are smart enough to detect and
# proceed in 2 compile rounds
isinstance(target, AnnotationProcessor) or
(self.skip_java and is_java(target)) or
(self.skip_scala and is_scala(target)) or
(self.intransitive and target not in self.context.target_roots)
)
jars = OrderedSet()
excludes = OrderedSet()
compiles = OrderedSet()
def prune(target):
if target.is_jvm:
if target.excludes:
excludes.update(target.excludes)
jars.update(jar for jar in target.jar_dependencies)
if is_cp(target):
target.walk(compiles.add)
for target in targets:
target.walk(prune)
# TODO(John Sirois): Restructure to use alternate_target_roots Task lifecycle method
self.context._replace_targets(compiles)
self.jar_dependencies = jars
self.context.log.debug('pruned to cp:\n\t{}'.format(
'\n\t'.join(str(t) for t in self.context.targets())
))
def map_internal_jars(self, targets):
internal_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libs')
safe_mkdir(internal_jar_dir, clean=True)
internal_source_jar_dir = os.path.join(self.gen_project_workdir, 'internal-libsources')
safe_mkdir(internal_source_jar_dir, clean=True)
internal_jars = self.context.products.get('jars')
internal_source_jars = self.context.products.get('source_jars')
for target in targets:
mappings = internal_jars.get(target)
if mappings:
for base, jars in mappings.items():
if len(jars) != 1:
raise IdeGen.Error('Unexpected mapping, multiple jars for {}: {}'.format(target, jars))
jar = jars[0]
cp_jar = os.path.join(internal_jar_dir, jar)
shutil.copy(os.path.join(base, jar), cp_jar)
cp_source_jar = None
mappings = internal_source_jars.get(target)
if mappings:
for base, jars in mappings.items():
if len(jars) != 1:
raise IdeGen.Error(
'Unexpected mapping, multiple source jars for {}: {}'.format(target, jars)
)
jar = jars[0]
cp_source_jar = os.path.join(internal_source_jar_dir, jar)
shutil.copy(os.path.join(base, jar), cp_source_jar)
self._project.internal_jars.add(ClasspathEntry(cp_jar, source_jar=cp_source_jar))
def map_external_jars(self, targets):
external_jar_dir = os.path.join(self.gen_project_workdir, 'external-libs')
safe_mkdir(external_jar_dir, clean=True)
external_source_jar_dir = os.path.join(self.gen_project_workdir, 'external-libsources')
safe_mkdir(external_source_jar_dir, clean=True)
external_javadoc_jar_dir = os.path.join(self.gen_project_workdir, 'external-libjavadoc')
safe_mkdir(external_javadoc_jar_dir, clean=True)
classpath_products = self.resolve_jars(targets) or ClasspathProducts(self.get_options().pants_workdir)
cp_entry_by_classifier_by_orgname = defaultdict(lambda: defaultdict(dict))
for conf, jar_entry in classpath_products.get_artifact_classpath_entries_for_targets(targets):
coord = (jar_entry.coordinate.org, jar_entry.coordinate.name)
classifier = jar_entry.coordinate.classifier
cp_entry_by_classifier_by_orgname[coord][classifier] = jar_entry
def copy_jar(cp_entry, dest_dir):
if not cp_entry:
return None
cp_jar = os.path.join(dest_dir, os.path.basename(cp_entry.path))
shutil.copy(cp_entry.path, cp_jar)
return cp_jar
# Per org.name (aka maven "project"), collect the primary artifact and any extra classified
# artifacts, taking special note of 'sources' and 'javadoc' artifacts that IDEs handle specially
# to provide source browsing and javadocs for 3rdparty libs.
for cp_entry_by_classifier in cp_entry_by_classifier_by_orgname.values():
primary_jar = copy_jar(cp_entry_by_classifier.pop(None, None), external_jar_dir)
sources_jar = copy_jar(cp_entry_by_classifier.pop('sources', None), external_source_jar_dir)
javadoc_jar = copy_jar(cp_entry_by_classifier.pop('javadoc', None), external_javadoc_jar_dir)
if primary_jar:
self._project.external_jars.add(ClasspathEntry(jar=primary_jar,
source_jar=sources_jar,
javadoc_jar=javadoc_jar))
# Treat all other jars as opaque with no source or javadoc attachments of their own. An
# example are jars with the 'tests' classifier.
for jar_entry in cp_entry_by_classifier.values():
extra_jar = copy_jar(jar_entry, external_jar_dir)
self._project.external_jars.add(ClasspathEntry(extra_jar))
def execute(self):
"""Stages IDE project artifacts to a project directory and generates IDE configuration files."""
# Grab the targets in-play before the context is replaced by `self._prepare_project()` below.
targets = self.context.targets()
self._prepare_project()
if self.context.options.is_known_scope('compile.checkstyle'):
checkstyle_classpath = self.tool_classpath('checkstyle', scope='compile.checkstyle')
else: # Checkstyle not enabled.
checkstyle_classpath = []
if self.skip_scala:
scalac_classpath = []
else:
scalac_classpath = ScalaPlatform.global_instance().compiler_classpath(self.context.products)
self._project.set_tool_classpaths(checkstyle_classpath, scalac_classpath)
self.map_internal_jars(targets)
self.map_external_jars(targets)
idefile = self.generate_project(self._project)
if idefile:
binary_util.ui_open(idefile)
def generate_project(self, project):
raise NotImplementedError('Subclasses must generate a project for an ide')
class ClasspathEntry(object):
"""Represents a classpath entry that may have sources available."""
def __init__(self, jar, source_jar=None, javadoc_jar=None):
self.jar = jar
self.source_jar = source_jar
self.javadoc_jar = javadoc_jar
class SourceSet(object):
"""Models a set of source files."""
def __init__(self, root_dir, source_base, path, is_test=False, resources_only=False):
"""
:param string root_dir: full path to the root of the project containing this source set
:param string source_base: the relative path from root_dir to the base of this source set
:param string path: relative path from the source_base to the base of the sources in this set
:param bool is_test: true if the sources contained by this set implement test cases
:param bool resources_only: true if a target has resources but no sources.
"""
self.root_dir = root_dir
self.source_base = source_base
self.path = path
self.is_test = is_test
self.resources_only = resources_only
self._excludes = []
@property
def excludes(self):
"""Paths relative to self.path that are excluded from this source set."""
return self._excludes
@property
def _key_tuple(self):
"""Creates a tuple from the attributes used as a key to uniquely identify a SourceSet"""
return (self.root_dir, self.source_base, self.path)
def __str__(self):
return str(self._key_tuple)
def __eq__(self, other):
return self._key_tuple == other._key_tuple
def __cmp__(self, other):
return cmp(self._key_tuple, other._key_tuple)
def __hash__(self):
return hash(self._key_tuple)
def __repr__(self):
return "root_dir={} source_base={} path={} is_test={} resources_only={} _excludes={}".format(
self.root_dir,
self.source_base,
self.path,
self.is_test,
self.resources_only,
self._excludes)
class Project(object):
"""Models a generic IDE project that is comprised of a set of BUILD targets."""
@staticmethod
def extract_resource_extensions(resources):
"""Returns the set of unique extensions (including the .) from the given resource files."""
if resources:
for resource in resources:
_, ext = os.path.splitext(resource)
yield ext
@staticmethod
def _collapse_by_source_root(source_roots, source_sets):
"""Collapse SourceSets with common source roots into one SourceSet instance.
Use the registered source roots to collapse all source paths under a root.
If any test type of target is allowed under the root, the path is determined to be
a test path. This method will give unpredictable results if source root entries overlap.
:param list source_sets: SourceSets to analyze
:returns: list of SourceSets collapsed to the source root paths. There may be duplicate
entries in this list which will be removed by dedup_sources()
"""
collapsed_source_sets = []
for source in source_sets:
query = os.path.join(source.source_base, source.path)
source_root = source_roots.find_by_path(query)
if not source_root:
collapsed_source_sets.append(source)
else:
collapsed_source_sets.append(SourceSet(source.root_dir, source_root.path, "",
is_test=source.is_test,
resources_only=source.resources_only))
return collapsed_source_sets
def __init__(self, name, has_python, skip_java, skip_scala, use_source_root, root_dir,
debug_port, context, targets, transitive, target_util, build_ignore_patterns=None):
"""Creates a new, unconfigured, Project based at root_dir and comprised of the sources visible
to the given targets."""
self.context = context
self.target_util = target_util
self.name = name
self.root_dir = root_dir
self.targets = OrderedSet(targets)
self.transitive = transitive
self.sources = []
self.py_sources = []
self.py_libs = []
self.resource_extensions = set()
self.has_python = has_python
self.skip_java = skip_java
self.skip_scala = skip_scala
self.use_source_root = use_source_root
self.has_scala = False
self.has_tests = False
self.debug_port = debug_port
self.internal_jars = OrderedSet()
self.external_jars = OrderedSet()
self.build_ignore_patterns = build_ignore_patterns
def configure_python(self, source_paths, test_paths, lib_paths):
self.py_sources.extend(SourceSet(get_buildroot(), root, None) for root in source_paths)
self.py_sources.extend(SourceSet(get_buildroot(), root, None, is_test=True) for root in test_paths)
for root in lib_paths:
for path in os.listdir(os.path.join(get_buildroot(), root)):
if os.path.isdir(os.path.join(get_buildroot(), root, path)) or path.endswith('.egg'):
self.py_libs.append(SourceSet(get_buildroot(), root, path, is_test=False))
@classmethod
def dedup_sources(cls, source_set_list):
"""Remove duplicate source sets from the source_set_list.
Sometimes two targets with the same path are added to the source set. Remove duplicates
with the following rules:
1) If two targets are resources_only with different settings for is_test, is_test = False
2) If the targets have different settings for resources_only, resources_only = False
3) If the two non-resource-only targets have different settings for is_test, is_test = True
"""
deduped_sources = set(filter(lambda s: not s.resources_only and s.is_test,
source_set_list))
deduped_sources.update(filter(lambda s: not s.resources_only,
source_set_list))
deduped_sources.update(filter(lambda s : s.resources_only and not s.is_test,
source_set_list))
deduped_sources.update(filter(lambda s : s.resources_only and s.is_test,
source_set_list))
# re-sort the list, makes the generated project easier to read.
return sorted(list(deduped_sources))
def configure_jvm(self, extra_source_paths, extra_test_paths):
"""
Configures this project's source sets returning the full set of targets the project is
comprised of. The full set can be larger than the initial set of targets when any of the
initial targets only has partial ownership of its source set's directories.
"""
# TODO(John Sirois): much waste lies here, revisit structuring for more readable and efficient
# construction of source sets and excludes ... and add a test!
analyzed_targets = OrderedSet()
targeted = set()
def relative_sources(target):
sources = target.payload.sources.relative_to_buildroot()
return [os.path.relpath(source, target.target_base) for source in sources]
def source_target(target):
result = ((self.transitive or target in self.targets) and
target.has_sources() and
(not (self.skip_java and is_java(target)) and
not (self.skip_scala and is_scala(target))))
return result
def configure_source_sets(relative_base, sources, is_test=False, resources_only=False):
absolute_base = os.path.join(self.root_dir, relative_base)
paths = set([os.path.dirname(source) for source in sources])
for path in paths:
absolute_path = os.path.join(absolute_base, path)
# Note, this can add duplicate source paths to self.sources(). We'll de-dup them later,
# because we want to prefer test paths.
targeted.add(absolute_path)
source_set = SourceSet(self.root_dir, relative_base, path,
is_test=is_test, resources_only=resources_only)
self.sources.append(source_set)
def find_source_basedirs(target):
dirs = set()
if source_target(target):
absolute_base = os.path.join(self.root_dir, target.target_base)
dirs.update([os.path.join(absolute_base, os.path.dirname(source))
for source in relative_sources(target)])
return dirs
def configure_target(target):
if target not in analyzed_targets:
analyzed_targets.add(target)
self.has_scala = not self.skip_scala and (self.has_scala or is_scala(target))
# Hack for java_sources and Eclipse/IntelliJ: add java_sources to project
if isinstance(target, ScalaLibrary):
for java_source in target.java_sources:
configure_target(java_source)
# Resources are already in the target set
if target.has_resources:
resources_by_basedir = defaultdict(set)
for resources in target.resources:
analyzed_targets.add(resources)
resources_by_basedir[resources.target_base].update(relative_sources(resources))
for basedir, resources in resources_by_basedir.items():
self.resource_extensions.update(Project.extract_resource_extensions(resources))
configure_source_sets(basedir, resources, is_test=target.is_test,
resources_only=True)
if target.has_sources():
test = target.is_test
self.has_tests = self.has_tests or test
base = target.target_base
configure_source_sets(base, relative_sources(target), is_test=test,
resources_only=isinstance(target, Resources))
# TODO(Garrett Malmquist): This is dead code, and should be redone/reintegrated.
# Other BUILD files may specify sources in the same directory as this target. Those BUILD
# files might be in parent directories (globs('a/b/*.java')) or even children directories if
# this target globs children as well. Gather all these candidate BUILD files to test for
# sources they own that live in the directories this targets sources live in.
target_dirset = find_source_basedirs(target)
if not isinstance(target.address, BuildFileAddress):
return [] # Siblings only make sense for BUILD files.
candidates = OrderedSet()
build_file = target.address.build_file
dir_relpath = os.path.dirname(build_file.relpath)
for descendant in BuildFile.scan_build_files(build_file.project_tree, dir_relpath,
build_ignore_patterns=self.build_ignore_patterns):
candidates.update(self.target_util.get_all_addresses(descendant))
if not self._is_root_relpath(dir_relpath):
ancestors = self._collect_ancestor_build_files(build_file.project_tree, os.path.dirname(dir_relpath),
self.build_ignore_patterns)
for ancestor in ancestors:
candidates.update(self.target_util.get_all_addresses(ancestor))
def is_sibling(target):
return source_target(target) and target_dirset.intersection(find_source_basedirs(target))
return filter(is_sibling, [self.target_util.get(a) for a in candidates if a != target.address])
resource_targets = []
for target in self.targets:
if isinstance(target, Resources):
# Wait to process these until all resources that are reachable from other targets are
# processed. That way we'll only add a new SourceSet if this target has never been seen
# before. This allows test resource SourceSets to be properly keep the is_test property.
resource_targets.append(target)
else:
target.walk(configure_target, predicate=source_target)
for target in resource_targets:
target.walk(configure_target)
def full_path(source_set):
return os.path.join(source_set.root_dir, source_set.source_base, source_set.path)
# Check if there are any overlapping source_sets, and output an error message if so.
# Overlapping source_sets cause serious problems with package name inference.
overlap_error = ('SourceSets {current} and {previous} evaluate to the same full path.'
' This can be caused by multiple BUILD targets claiming the same source,'
' e.g., if a BUILD target in a parent directory contains an rglobs() while'
' a BUILD target in a subdirectory of that uses a globs() which claims the'
' same sources. This may cause package names to be inferred incorrectly (e.g.,'
' you might see src.com.foo.bar.Main instead of com.foo.bar.Main).')
source_full_paths = {}
for source_set in sorted(self.sources, key=full_path):
full = full_path(source_set)
if full in source_full_paths:
previous_set = source_full_paths[full]
logger.debug(overlap_error.format(current=source_set, previous=previous_set))
source_full_paths[full] = source_set
# We need to figure out excludes, in doing so there are 2 cases we should not exclude:
# 1.) targets depend on A only should lead to an exclude of B
# A/BUILD
# A/B/BUILD
#
# 2.) targets depend on A and C should not lead to an exclude of B (would wipe out C)
# A/BUILD
# A/B
# A/B/C/BUILD
#
# 1 approach: build set of all paths and parent paths containing BUILDs our targets depend on -
# these are unexcludable
unexcludable_paths = set()
for source_set in self.sources:
parent = os.path.join(self.root_dir, source_set.source_base, source_set.path)
while True:
unexcludable_paths.add(parent)
parent, _ = os.path.split(parent)
# no need to add the repo root or above, all source paths and extra paths are children
if parent == self.root_dir:
break
for source_set in self.sources:
paths = set()
source_base = os.path.join(self.root_dir, source_set.source_base)
for root, dirs, _ in safe_walk(os.path.join(source_base, source_set.path)):
if dirs:
paths.update([os.path.join(root, directory) for directory in dirs])
unused_children = paths - targeted
if unused_children:
for child in unused_children:
if child not in unexcludable_paths:
source_set.excludes.append(os.path.relpath(child, source_base))
targets = OrderedSet()
for target in self.targets:
target.walk(lambda target: targets.add(target), source_target)
targets.update(analyzed_targets - targets)
self.sources.extend(SourceSet(get_buildroot(), p, None, is_test=False) for p in extra_source_paths)
self.sources.extend(SourceSet(get_buildroot(), p, None, is_test=True) for p in extra_test_paths)
if self.use_source_root:
self.sources = Project._collapse_by_source_root(self.context.source_roots, self.sources)
self.sources = self.dedup_sources(self.sources)
return targets
def set_tool_classpaths(self, checkstyle_classpath, scalac_classpath):
self.checkstyle_classpath = checkstyle_classpath
self.scala_compiler_classpath = scalac_classpath
@classmethod
def _collect_ancestor_build_files(cls, project_tree, dir_relpath, build_ignore_patterns):
for build_file in BuildFile.get_build_files_family(project_tree, dir_relpath, build_ignore_patterns):
yield build_file
while not cls._is_root_relpath(dir_relpath):
dir_relpath = os.path.dirname(dir_relpath)
for build_file in BuildFile.get_build_files_family(project_tree, dir_relpath, build_ignore_patterns):
yield build_file
@classmethod
def _is_root_relpath(cls, relpath):
return relpath == '.' or relpath == ''
| {
"content_hash": "023c9d2a74917de0190d18e2903fcad7",
"timestamp": "",
"source": "github",
"line_count": 707,
"max_line_length": 111,
"avg_line_length": 44.205091937765204,
"alnum_prop": 0.6671999488049147,
"repo_name": "dbentley/pants",
"id": "14f1fa2cfaf1a6d0ee1e3d11d3a7c334133de43b",
"size": "31400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/project_info/tasks/ide_gen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1569"
},
{
"name": "HTML",
"bytes": "64699"
},
{
"name": "Java",
"bytes": "290988"
},
{
"name": "JavaScript",
"bytes": "31040"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4277407"
},
{
"name": "Scala",
"bytes": "84066"
},
{
"name": "Shell",
"bytes": "50882"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'UserSocialAuth'
db.create_table(u'social_auth_usersocialauth', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(related_name='social_auth', to=orm['accounts.User'])),
('provider', self.gf('django.db.models.fields.CharField')(max_length=32)),
('uid', self.gf('django.db.models.fields.CharField')(max_length=255)),
('extra_data', self.gf('social_auth.fields.JSONField')(default='{}')),
))
db.send_create_signal('social_auth', ['UserSocialAuth'])
# Adding unique constraint on 'UserSocialAuth', fields ['provider', 'uid']
db.create_unique(u'social_auth_usersocialauth', ['provider', 'uid'])
# Adding model 'Nonce'
db.create_table(u'social_auth_nonce', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('server_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('timestamp', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('salt', self.gf('django.db.models.fields.CharField')(max_length=40)),
))
db.send_create_signal('social_auth', ['Nonce'])
# Adding unique constraint on 'Nonce', fields ['server_url', 'timestamp', 'salt']
db.create_unique(u'social_auth_nonce', ['server_url', 'timestamp', 'salt'])
# Adding model 'Association'
db.create_table(u'social_auth_association', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('server_url', self.gf('django.db.models.fields.CharField')(max_length=255)),
('handle', self.gf('django.db.models.fields.CharField')(max_length=255)),
('secret', self.gf('django.db.models.fields.CharField')(max_length=255)),
('issued', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('lifetime', self.gf('django.db.models.fields.IntegerField')()),
('assoc_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
))
db.send_create_signal('social_auth', ['Association'])
# Adding unique constraint on 'Association', fields ['server_url', 'handle']
db.create_unique(u'social_auth_association', ['server_url', 'handle'])
def backwards(self, orm):
# Removing unique constraint on 'Association', fields ['server_url', 'handle']
db.delete_unique(u'social_auth_association', ['server_url', 'handle'])
# Removing unique constraint on 'Nonce', fields ['server_url', 'timestamp', 'salt']
db.delete_unique(u'social_auth_nonce', ['server_url', 'timestamp', 'salt'])
# Removing unique constraint on 'UserSocialAuth', fields ['provider', 'uid']
db.delete_unique(u'social_auth_usersocialauth', ['provider', 'uid'])
# Deleting model 'UserSocialAuth'
db.delete_table(u'social_auth_usersocialauth')
# Deleting model 'Nonce'
db.delete_table(u'social_auth_nonce')
# Deleting model 'Association'
db.delete_table(u'social_auth_association')
models = {
u'accounts.user': {
'Meta': {'object_name': 'User'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_mailout': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'preferred_mailout_time': ('django.db.models.fields.IntegerField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'})
},
'social_auth.association': {
'Meta': {'unique_together': "(('server_url', 'handle'),)", 'object_name': 'Association'},
'assoc_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'handle': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'issued': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'lifetime': ('django.db.models.fields.IntegerField', [], {}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'social_auth.nonce': {
'Meta': {'unique_together': "(('server_url', 'timestamp', 'salt'),)", 'object_name': 'Nonce'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'salt': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'server_url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'})
},
'social_auth.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth'},
'extra_data': ('social_auth.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': u"orm['accounts.User']"})
}
}
complete_apps = ['social_auth'] | {
"content_hash": "44f92a05f724f61f72fd22ae8a8d9498",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 136,
"avg_line_length": 56.03669724770642,
"alnum_prop": 0.5857891290111329,
"repo_name": "oinopion/pipeye",
"id": "55cb77355ae73b27a60abcf7eb919fb19bb6ca3c",
"size": "6132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipeye/utils/migrations/social_auth/0001_initial.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "58516"
},
{
"name": "Python",
"bytes": "60140"
}
],
"symlink_target": ""
} |
from datetime import datetime
from rx.core import Observable, AnonymousObservable
from rx.disposables import CompositeDisposable, SingleAssignmentDisposable, \
SerialDisposable
from rx.concurrency import timeout_scheduler
from rx.internal import extensionmethod
@extensionmethod(Observable)
def timeout(self, duetime, other=None, scheduler=None):
"""Returns the source observable sequence or the other observable
sequence if duetime elapses.
1 - res = source.timeout(new Date()); # As a date
2 - res = source.timeout(5000); # 5 seconds
# As a date and timeout observable
3 - res = source.timeout(datetime(), rx.Observable.return_value(42))
# 5 seconds and timeout observable
4 - res = source.timeout(5000, rx.Observable.return_value(42))
# As a date and timeout observable
5 - res = source.timeout(datetime(), rx.Observable.return_value(42),
rx.Scheduler.timeout)
# 5 seconds and timeout observable
6 - res = source.timeout(5000, rx.Observable.return_value(42),
rx.Scheduler.timeout)
Keyword arguments:
:param datetime|int duetime: Absolute (specified as a datetime object) or
relative time (specified as an integer denoting milliseconds) when a
timeout occurs.
:param Observable other: Sequence to return in case of a timeout. If not
specified, a timeout error throwing sequence will be used.
:param Scheduler scheduler: Scheduler to run the timeout timers on. If not
specified, the timeout scheduler is used.
:returns: The source sequence switching to the other sequence in case of
a timeout.
:rtype: Observable
"""
scheduler_method = None
source = self
other = other or Observable.throw_exception(Exception("Timeout"))
other = Observable.from_future(other)
scheduler = scheduler or timeout_scheduler
if isinstance(duetime, datetime):
scheduler_method = scheduler.schedule_absolute
else:
scheduler_method = scheduler.schedule_relative
def subscribe(observer):
switched = [False]
_id = [0]
original = SingleAssignmentDisposable()
subscription = SerialDisposable()
timer = SerialDisposable()
subscription.disposable = original
def create_timer():
my_id = _id[0]
def action(scheduler, state=None):
switched[0] = (_id[0] == my_id)
timer_wins = switched[0]
if timer_wins:
subscription.disposable = other.subscribe(observer)
timer.disposable = scheduler_method(duetime, action)
create_timer()
def on_next(x):
on_next_wins = not switched[0]
if on_next_wins:
_id[0] += 1
observer.on_next(x)
create_timer()
def on_error(e):
on_error_wins = not switched[0]
if on_error_wins:
_id[0] += 1
observer.on_error(e)
def on_completed():
on_completed_wins = not switched[0]
if on_completed_wins:
_id[0] += 1
observer.on_completed()
original.disposable = source.subscribe(on_next, on_error, on_completed)
return CompositeDisposable(subscription, timer)
return AnonymousObservable(subscribe)
| {
"content_hash": "90d2c8c2ce7710a4baec4ea872e8b877",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 79,
"avg_line_length": 34.83673469387755,
"alnum_prop": 0.6303456356180434,
"repo_name": "Sprytile/Sprytile",
"id": "77dda3c432b7b9e5bf003e781cdd035e706d94ff",
"size": "3414",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rx/linq/observable/timeout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "720766"
}
],
"symlink_target": ""
} |
import os
import re
import demistomock as demisto # noqa: F401
import requests
from CommonServerPython import * # noqa: F401
from requests.auth import HTTPBasicAuth
VERSION = "v1.0.1"
USER_AGENT = "ReversingLabs XSOAR TitaniumCloud {version}".format(version=VERSION)
HEADERS = {
"User-Agent": USER_AGENT
}
BASE_URL = demisto.params()['base']
if BASE_URL[-1] == '/':
BASE_URL = BASE_URL[0:-1]
BASE_RL = demisto.params()['baserl']
if BASE_RL[-1] == '/':
BASE_RL = BASE_RL[0:-1]
AUTH = HTTPBasicAuth(demisto.params()['credentials']['identifier'], demisto.params()['credentials']['password'])
EXTENDED = demisto.params()['extended']
if not demisto.params()['proxy']:
del os.environ['HTTP_PROXY']
del os.environ['HTTPS_PROXY']
del os.environ['http_proxy']
del os.environ['https_proxy']
def get_score(classification):
score_dict = {
"UNKNOWN": 0,
"KNOWN": 1,
"SUSPICIOUS": 2,
"MALICIOUS": 3
}
return score_dict.get(classification)
# pylint: disable=function-redefined
def return_error(data): # type: ignore
"""
Return error as result and exit - filter 404 as non-errors
"""
if '404' in data:
demisto.results(
{
'Type': entryTypes['note'],
'ContentsFormat': formats['text'],
'Contents': data
}
)
else:
demisto.results(
{
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': data
}
)
sys.exit(0)
def validate_hash(hash_value):
"""
Validate that the given hash is valid and return the type
"""
type_dict = {
32: {
'type': 'md5',
'regex': r'([a-fA-F\d]{32})'
},
40: {
'type': 'sha1',
'regex': r'([a-fA-F\d]{40})'
},
64: {
'type': 'sha256',
'regex': r'([a-fA-F\d]{64})'
}
}
if len(hash_value) not in type_dict.keys():
return_error('Provided input string length does not match any hash type')
if not re.match(type_dict[len(hash_value)]['regex'], hash_value):
return_error('Provided input string is not as hash due to containing invalid characters')
return type_dict[len(hash_value)]['type']
def validate_http(r):
"""
Make sure that the HTTP response is valid and return relevant data if yes
"""
if r.status_code == 200:
try:
return True, r.json()
except Exception as e:
return False, 'HTTP response is not JSON [{error}] - {body}'.format(error=e, body=r.text)
elif r.status_code in (401, 403):
return False, 'Credential error - The provided TitaniumCloud credentials are either incorrect or lack ' \
'API roles [{code}] - {body}'.format(code=r.status_code, body=r.text)
elif r.status_code == 404:
return False, 'No reference found - There were no results found for the provided sample ' \
'[{code}] - {body}'.format(code=r.status_code, body=r.text)
else:
return False, 'An error has occurred [{code}] - {body}'.format(
code=r.status_code,
body=r.text
)
def rldata(hash_type, hash_value):
"""
Get the extended RL data
"""
endpoint = '/api/databrowser/rldata/query/{hash_type}/{hash_value}?format=json'.format(
hash_value=hash_value,
hash_type=hash_type
)
ok, r = validate_http(requests.get(
BASE_RL + endpoint,
auth=AUTH,
headers=HEADERS
))
if not ok:
return ok, r
contents = demisto.get(r, 'rl.sample')
if not contents:
return False, 'Unexpected JSON reply:\n' + str(r)
md5 = contents.get('md5')
sha1 = contents.get('sha1')
sha256 = contents.get('sha256')
sha512 = contents.get('sha512')
ssdeep = contents.get('ssdeep')
size = contents.get('sample_size')
ec = {}
md = '## ReversingLabs extended data\n'
if md5:
ec['MD5'] = md5
md += 'MD5: **' + md5 + '**\n'
if sha1:
ec['SHA1'] = sha1
md += 'SHA1: **' + sha1 + '**\n'
if sha256:
ec['SHA256'] = sha256
md += 'SHA256: **' + sha256 + '**\n'
if sha512:
ec['SHA512'] = sha512
md += 'SHA512: **' + sha512 + '**\n'
if ssdeep:
ec['SSDeep'] = ssdeep
md += 'SSDEEP: **' + ssdeep + '**\n'
if size:
ec['Size'] = size
md += 'Size: **' + str(size) + '**\n'
scan_entries = demisto.get(contents, 'xref.entries')
if len(scan_entries) > 0:
# Sort by latest date
scan_entries_sorted = sorted(scan_entries, key=lambda entry: entry['record_time'], reverse=True)
scanners = scan_entries_sorted[0].get('scanners')
if scanners:
recent_detections = [item for item in scanners if item['result']]
if recent_detections:
md += '***\n'
md += '#### Recent Detections ({record_time}):\n'.format(
record_time=scan_entries_sorted[0].get('record_time'))
md += "\n".join(['{} -- {}'.format(item['name'], item['result']) for item in recent_detections])
return True, (md, ec, r)
def mwp(hash_type, hash_value):
"""
Get the malware presence for the given hash
"""
endpoint = '/api/databrowser/malware_presence/query/{hash_type}/{hash_value}?extended=true&format=json'.format(
hash_value=hash_value,
hash_type=hash_type
)
ok, r = validate_http(requests.get(
BASE_URL + endpoint,
auth=AUTH,
headers=HEADERS
))
if not ok:
return ok, r
contents = demisto.get(r, 'rl.malware_presence')
if not contents:
return False, 'Unexpected JSON reply:\n' + str(r)
classification = contents["status"]
md = '## ReversingLabs Malware Presence for {hash_value}\n'.format(hash_value=hash_value)
md += 'Malware status: **{mwp_status}**\n'.format(mwp_status=contents['status'])
md += 'First seen: **' + demisto.gets(contents, 'first_seen') + '**\n'
md += 'Last seen: **' + demisto.gets(contents, 'last_seen') + '**\n'
md += 'Positives / Total: **' + demisto.gets(contents, 'scanner_match') + ' / ' + \
demisto.gets(contents, 'scanner_count') + '**\n'
md += 'Trust factor: **' + demisto.gets(contents, 'trust_factor') + '**\n'
if contents['status'] == 'MALICIOUS':
md += 'Threat name: **' + demisto.gets(contents, 'threat_name') + '**\n'
md += 'Threat level: **' + demisto.gets(contents, 'threat_level') + '**\n'
score = get_score(classification)
prop = contents['status'].title()
ec = {
outputPaths['file']: {
hash_type.upper(): hash_value,
prop: {
'Vendor': 'ReversingLabs',
'Detections': demisto.gets(contents, 'scanner_match'),
'TotalEngines': demisto.gets(contents, 'scanner_count')
},
'properties_to_append': prop
},
'DBotScore': [
{
'Indicator': hash_value,
'Type': 'hash',
'Vendor': 'ReversingLabs',
'Score': score
},
{
'Indicator': hash_value,
'Type': 'file',
'Vendor': 'ReversingLabs',
'Score': score
}
]
}
return True, (md, ec, r)
if __name__ in ('__main__', '__builtin__', 'builtins'):
if demisto.command() == 'test-module':
ok, r = validate_http(requests.get(
BASE_URL + '/api/databrowser/malware_presence/query/md5/6a95d3d00267c9fd80bd42122738e726?extended=true&'
'format=json', auth=AUTH))
if ok:
demisto.results('ok')
else:
return_error(r)
elif demisto.command() == 'file':
hash_value = demisto.args()['file']
hash_type = validate_hash(hash_value)
ok, res = mwp(hash_type, hash_value)
if not ok:
return_error(res)
md, ec, r = res
if demisto.get(demisto.args(), 'extended'):
EXTENDED = True if demisto.args()['extended'].lower() == 'true' else False
if EXTENDED:
ok, extended_res = rldata(hash_type, hash_value)
if ok:
md += '\n' + extended_res[0]
r['rl']['sample'] = extended_res[2]['rl']['sample']
score = ec['DBotScore'][0]['Score']
for k in extended_res[1]:
ec[outputPaths['file']][k] = extended_res[1][k]
if k in ('MD5', 'SHA1', 'SHA256') and k.lower() != hash_type:
ec['DBotScore'].append({'Indicator': extended_res[1][k], 'Type': 'hash',
'Vendor': 'ReversingLabs', 'Score': score})
ec['DBotScore'].append({'Indicator': extended_res[1][k], 'Type': 'file',
'Vendor': 'ReversingLabs', 'Score': score})
demisto.results({'Type': entryTypes['note'], 'ContentsFormat': formats['json'],
'Contents': r, 'EntryContext': ec, 'HumanReadable': md})
| {
"content_hash": "593cbb0b4928bf904dacc12f013fe43d",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 116,
"avg_line_length": 35.452471482889734,
"alnum_prop": 0.5306735306735306,
"repo_name": "VirusTotal/content",
"id": "738cbd7269ddb5dadc7dbc566b1669393dfec22f",
"size": "9324",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/ReversingLabs_Titanium_Cloud/Integrations/ReversingLabsTitaniumCloud/ReversingLabsTitaniumCloud.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from signprot import views
urlpatterns = [
url(r'^$', views.BrowseSelection.as_view(), name='index'),
url(r'^statistics', views.GProtein, name='gprotein'),
url(r'^ginterface/(?P<protein>[^/]*?)/$', views.Ginterface, name='render'),
url(r'^ginterface[/]?$', views.TargetSelection.as_view(), name='targetselection'),
url(r'^ajax/barcode/(?P<slug>[-\w]+)/(?P<cutoff>\d+\.\d{0,2})/$', views.ajaxBarcode, name='ajaxBarcode'),
url(r'^ajax/interface/(?P<slug>[-\w]+)/$', views.ajaxInterface, name='ajaxInterface'),
url(r'^(?P<slug>[-\w]+)/$', views.signprotdetail, name='signprotdetail'),
url(r'^structure/(?P<pdbname>[-\w]+)/$', views.StructureInfo, name='StructureInfo'),
url(r'^family/(?P<slug>[-\w]+)/$', views.familyDetail, name='familyDetail'),
url(r'^matrix$', views.InteractionMatrix, name='InteractionMatrix'),
]
| {
"content_hash": "2199594707c33cc4629b20386a1e355a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 109,
"avg_line_length": 55.3125,
"alnum_prop": 0.6463276836158192,
"repo_name": "fosfataza/protwis",
"id": "d271c40cc7130279dfbbb90e38a1113fa9b351d2",
"size": "885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "signprot/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "104739"
},
{
"name": "HTML",
"bytes": "1426027"
},
{
"name": "JavaScript",
"bytes": "1127392"
},
{
"name": "Python",
"bytes": "2593740"
},
{
"name": "Shell",
"bytes": "386"
}
],
"symlink_target": ""
} |
import unittest
from azure.cli.testsdk import ScenarioTest, ResourceGroupPreparer, record_only
class TestMonitorPrivateLinkScope(ScenarioTest):
def __init__(self, method_name, config_file=None, recording_dir=None, recording_name=None, recording_processors=None,
replay_processors=None, recording_patches=None, replay_patches=None):
super(TestMonitorPrivateLinkScope, self).__init__(method_name)
self.cmd('extension add -n application-insights')
# @record_only() # record_only as the private-link-scope scoped-resource cannot find the components of application insights
@unittest.skip('If it cannot run, how to record_only, how yaml file is created')
@ResourceGroupPreparer(location='westus2')
def test_monitor_private_link_scope_scenario(self, resource_group, resource_group_location):
self.kwargs.update({
'rg': resource_group,
'scope': 'clitestscopename',
'assigned_app': 'assigned_app',
'assigned_ws': 'assigned_ws',
'workspace': self.create_random_name('clitest', 20),
'app': self.create_random_name('clitest', 20),
'vnet': self.create_random_name('cli-vnet-', 24),
'subnet': self.create_random_name('cli-subnet-', 24),
'pe': self.create_random_name('cli-pe-', 24),
'pe_connection': self.create_random_name('cli-pec-', 24),
'loc': resource_group_location
})
self.cmd('monitor private-link-scope create -n {scope} -g {rg}', checks=[
self.check('name', '{scope}')
])
self.cmd('monitor private-link-scope update -n {scope} -g {rg} --tags tag1=d1', checks=[
self.check('tags.tag1', 'd1')
])
self.cmd('monitor private-link-scope show -n {scope} -g {rg}', checks=[
self.check('tags.tag1', 'd1')
])
self.cmd('monitor private-link-scope list -g {rg}', checks=[
self.check('length(@)', 1)
])
self.cmd('monitor private-link-scope list')
app_id = self.cmd('monitor app-insights component create -a {app} -g {rg} -l eastus').get_output_in_json()['id']
workspace_id = self.cmd('monitor log-analytics workspace create -n {workspace} -g {rg} -l {loc}').get_output_in_json()['id']
self.kwargs.update({
'app_id': app_id,
'workspace_id': workspace_id
})
# this command failed as service cannot find component of application insights
self.cmd('monitor private-link-scope scoped-resource create -g {rg} -n {assigned_app} --linked-resource {app_id} --scope-name {scope}', checks=[
self.check('name', '{assigned_app}')
])
self.cmd('monitor private-link-scope scoped-resource show -g {rg} -n {assigned_app} --scope-name {scope}', checks=[
self.check('name', '{assigned_app}')
])
self.cmd('monitor private-link-scope scoped-resource list -g {rg} --scope-name {scope}', checks=[
self.check('length(@)', 1)
])
self.cmd('monitor private-link-scope scoped-resource create -g {rg} -n {assigned_ws} --linked-resource {workspace_id} --scope-name {scope}', checks=[
self.check('name', '{assigned_ws}')
])
self.cmd('monitor private-link-scope scoped-resource list -g {rg} --scope-name {scope}', checks=[
self.check('length(@)', 2)
])
self.cmd('monitor private-link-scope private-link-resource list --scope-name {scope} -g {rg}', checks=[
self.check('length(@)', 1)
])
# Prepare network
self.cmd('network vnet create -n {vnet} -g {rg} -l {loc} --subnet-name {subnet}',
checks=self.check('length(newVNet.subnets)', 1))
self.cmd('network vnet subnet update -n {subnet} --vnet-name {vnet} -g {rg} '
'--disable-private-endpoint-network-policies true',
checks=self.check('privateEndpointNetworkPolicies', 'Disabled'))
# Create a private endpoint connection
pr = self.cmd('monitor private-link-scope private-link-resource list --scope-name {scope} -g {rg}').get_output_in_json()
self.kwargs['group_id'] = pr[0]['groupId']
private_link_scope = self.cmd('monitor private-link-scope show -n {scope} -g {rg}').get_output_in_json()
self.kwargs['scope_id'] = private_link_scope['id']
private_endpoint = self.cmd(
'network private-endpoint create -g {rg} -n {pe} --vnet-name {vnet} --subnet {subnet} -l {loc} '
'--connection-name {pe_connection} --private-connection-resource-id {scope_id} '
'--group-ids {group_id}').get_output_in_json()
self.assertEqual(private_endpoint['name'], self.kwargs['pe'])
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['name'], self.kwargs['pe_connection'])
self.assertEqual(
private_endpoint['privateLinkServiceConnections'][0]['privateLinkServiceConnectionState']['status'],
'Approved')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['provisioningState'], 'Succeeded')
self.assertEqual(private_endpoint['privateLinkServiceConnections'][0]['groupIds'][0], self.kwargs['group_id'])
self.kwargs['pe_id'] = private_endpoint['privateLinkServiceConnections'][0]['id']
# Show the connection at monitor private-link-scope
private_endpoint_connections = self.cmd('monitor private-link-scope show --name {scope} -g {rg}').get_output_in_json()['privateEndpointConnections']
self.assertEqual(len(private_endpoint_connections), 1)
self.assertEqual(private_endpoint_connections[0]['privateLinkServiceConnectionState']['status'], 'Approved')
self.kwargs['scope_pec_id'] = private_endpoint_connections[0]['id']
self.kwargs['scope_pec_name'] = private_endpoint_connections[0]['name']
self.cmd('monitor private-link-scope private-endpoint-connection show --scope-name {scope} -g {rg} --name {scope_pec_name}',
checks=self.check('id', '{scope_pec_id}'))
self.cmd('monitor private-link-scope private-endpoint-connection approve --scope-name {scope} -g {rg} --name {scope_pec_name}')
self.cmd('monitor private-link-scope private-endpoint-connection reject --scope-name {scope} -g {rg} --name {scope_pec_name}',
checks=[self.check('privateLinkServiceConnectionState.status', 'Rejected')])
self.cmd('monitor private-link-scope private-endpoint-connection delete --id {scope_pec_id} -y')
self.cmd('monitor private-link-scope show --name {scope} -g {rg}', checks=[
self.check('privateEndpointConnections', None)
])
self.cmd('monitor private-link-scope scoped-resource delete -g {rg} -n {assigned_app} --scope-name {scope} -y')
self.cmd('monitor private-link-scope scoped-resource list -g {rg} --scope-name {scope}', checks=[
self.check('length(@)', 1)
])
self.cmd('monitor private-link-scope delete -n {scope} -g {rg} -y')
with self.assertRaisesRegex(SystemExit, '3'):
self.cmd('monitor private-link-scope show -n {scope} -g {rg}')
| {
"content_hash": "2dcb835cbc06d21036fcd1e235c44937",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 157,
"avg_line_length": 56.23255813953488,
"alnum_prop": 0.6268265784394816,
"repo_name": "yugangw-msft/azure-cli",
"id": "d29fe0e52d0166039c5b06d2d0bd86202216492c",
"size": "7599",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "src/azure-cli/azure/cli/command_modules/monitor/tests/latest/test_monitor_private_link_scope.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "5355"
},
{
"name": "Batchfile",
"bytes": "14110"
},
{
"name": "Bicep",
"bytes": "1679"
},
{
"name": "C#",
"bytes": "1971"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "Dockerfile",
"bytes": "8427"
},
{
"name": "HTML",
"bytes": "794"
},
{
"name": "JavaScript",
"bytes": "1404"
},
{
"name": "Jupyter Notebook",
"bytes": "389"
},
{
"name": "PowerShell",
"bytes": "1781"
},
{
"name": "Python",
"bytes": "24270340"
},
{
"name": "Rich Text Format",
"bytes": "12032"
},
{
"name": "Roff",
"bytes": "1036959"
},
{
"name": "Shell",
"bytes": "56023"
},
{
"name": "TSQL",
"bytes": "1145"
}
],
"symlink_target": ""
} |
"""This example updates an ad group.
To get ad groups, run get_ad_groups.py.
"""
import argparse
import sys
from google.ads.googleads.client import GoogleAdsClient
from google.ads.googleads.errors import GoogleAdsException
from google.api_core import protobuf_helpers
# [START update_ad_group]
def main(client, customer_id, ad_group_id, cpc_bid_micro_amount):
ad_group_service = client.get_service("AdGroupService")
# Create ad group operation.
ad_group_operation = client.get_type("AdGroupOperation")
ad_group = ad_group_operation.update
ad_group.resource_name = ad_group_service.ad_group_path(
customer_id, ad_group_id
)
ad_group.status = client.enums.AdGroupStatusEnum.PAUSED
ad_group.cpc_bid_micros = cpc_bid_micro_amount
client.copy_from(
ad_group_operation.update_mask,
protobuf_helpers.field_mask(None, ad_group._pb),
)
# Update the ad group.
ad_group_response = ad_group_service.mutate_ad_groups(
customer_id=customer_id, operations=[ad_group_operation]
)
print(f"Updated ad group {ad_group_response.results[0].resource_name}.")
# [END update_ad_group]
if __name__ == "__main__":
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
googleads_client = GoogleAdsClient.load_from_storage(version="v12")
parser = argparse.ArgumentParser(
description=(
"Updates an ad group for specified customer and campaign "
"id with the given bid micro amount."
)
)
# The following argument(s) should be provided to run the example.
parser.add_argument(
"-c",
"--customer_id",
type=str,
required=True,
help="The Google Ads customer ID.",
)
parser.add_argument(
"-a", "--ad_group_id", type=str, required=True, help="The ad group ID."
)
parser.add_argument(
"-b",
"--cpc_bid_micro_amount",
type=int,
required=True,
help="The cpc bid micro amount.",
)
args = parser.parse_args()
try:
main(
googleads_client,
args.customer_id,
args.ad_group_id,
args.cpc_bid_micro_amount,
)
except GoogleAdsException as ex:
print(
f'Request with ID "{ex.request_id}" failed with status '
f'"{ex.error.code().name}" and includes the following errors:'
)
for error in ex.failure.errors:
print(f'\tError with message "{error.message}".')
if error.location:
for field_path_element in error.location.field_path_elements:
print(f"\t\tOn field: {field_path_element.field_name}")
sys.exit(1)
| {
"content_hash": "086c79d7e761dd062caf70d7a5874695",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 31.325842696629213,
"alnum_prop": 0.6244619799139168,
"repo_name": "googleads/google-ads-python",
"id": "8c92ab7a64a1192505698bff516f13e3702c2db0",
"size": "3385",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/basic_operations/update_ad_group.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
import StringIO
import logging
import mock
from twisted.internet import defer
from twisted.trial import unittest
from lbrynet.core import log_support
class TestLogger(unittest.TestCase):
def raiseError(self):
raise Exception('terrible things happened')
def triggerErrback(self, callback=None):
d = defer.Deferred()
d.addCallback(lambda _: self.raiseError())
d.addErrback(self.log.fail(callback), 'My message')
d.callback(None)
return d
def setUp(self):
self.log = log_support.Logger('test')
self.stream = StringIO.StringIO()
handler = logging.StreamHandler(self.stream)
handler.setFormatter(logging.Formatter("%(filename)s:%(lineno)d - %(message)s"))
self.log.addHandler(handler)
def test_can_log_failure(self):
def output_lines():
return self.stream.getvalue().split('\n')
# the line number could change if this file gets refactored
expected_first_line = 'test_log_support.py:18 - My message: terrible things happened'
# testing the entirety of the message is futile as the
# traceback will depend on the system the test is being run on
# but hopefully these two tests are good enough
d = self.triggerErrback()
d.addCallback(lambda _: self.assertEquals(expected_first_line, output_lines()[0]))
d.addCallback(lambda _: self.assertEqual(10, len(output_lines())))
return d
def test_can_log_failure_with_callback(self):
callback = mock.Mock()
d = self.triggerErrback(callback)
d.addCallback(lambda _: callback.assert_called_once_with(mock.ANY))
return d
| {
"content_hash": "46b1691d20599d69209725f6e92a83eb",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 93,
"avg_line_length": 35.270833333333336,
"alnum_prop": 0.6639102185469581,
"repo_name": "zestyr/lbry",
"id": "cf7bdfc27ffbcfc65b04fcbc3a9d70c0a38318f9",
"size": "1693",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/core/test_log_support.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PowerShell",
"bytes": "1080"
},
{
"name": "Python",
"bytes": "1034464"
},
{
"name": "Ruby",
"bytes": "309"
},
{
"name": "Shell",
"bytes": "2881"
}
],
"symlink_target": ""
} |
"""Common test lib."""
import textwrap
import sys
from contextlib import contextmanager
from pymdown import compat
@contextmanager
def capture(command, *args, **kwargs):
"""
Capture the stdout temporarily in test environment.
Takes a command to execute, and during its execution,
we will redirect stdout so we can capture it.
"""
out, sys.stdout = sys.stdout, compat.StringIO()
command(*args, **kwargs)
sys.stdout.seek(0)
yield sys.stdout.read()
sys.stdout = out
def dedent(text):
"""De-indent strings."""
return textwrap.dedent(text).strip()
| {
"content_hash": "5863721fd206a0ba78135f513400fc62",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 57,
"avg_line_length": 22.14814814814815,
"alnum_prop": 0.68561872909699,
"repo_name": "facelessuser/PyMdown",
"id": "e90114e215cf1d91fbb6b2b2a0f38fd27a8bcd34",
"size": "598",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28732"
},
{
"name": "HTML",
"bytes": "1274"
},
{
"name": "JavaScript",
"bytes": "5902"
},
{
"name": "Python",
"bytes": "95376"
}
],
"symlink_target": ""
} |
"""
Support for IP Cameras.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/camera.generic/
"""
import asyncio
import logging
import aiohttp
import async_timeout
import requests
from requests.auth import HTTPDigestAuth
import voluptuous as vol
from homeassistant.const import (
CONF_NAME, CONF_USERNAME, CONF_PASSWORD, CONF_AUTHENTICATION,
HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION, CONF_VERIFY_SSL)
from homeassistant.exceptions import TemplateError
from homeassistant.components.camera import (
PLATFORM_SCHEMA, DEFAULT_CONTENT_TYPE, Camera)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers import config_validation as cv
from homeassistant.util.async_ import run_coroutine_threadsafe
_LOGGER = logging.getLogger(__name__)
CONF_CONTENT_TYPE = 'content_type'
CONF_LIMIT_REFETCH_TO_URL_CHANGE = 'limit_refetch_to_url_change'
CONF_STILL_IMAGE_URL = 'still_image_url'
CONF_FRAMERATE = 'framerate'
DEFAULT_NAME = 'Generic Camera'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_STILL_IMAGE_URL): cv.template,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION):
vol.In([HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]),
vol.Optional(CONF_LIMIT_REFETCH_TO_URL_CHANGE, default=False): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_CONTENT_TYPE, default=DEFAULT_CONTENT_TYPE): cv.string,
vol.Optional(CONF_FRAMERATE, default=2): cv.positive_int,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up a generic IP Camera."""
async_add_entities([GenericCamera(hass, config)])
class GenericCamera(Camera):
"""A generic implementation of an IP camera."""
def __init__(self, hass, device_info):
"""Initialize a generic camera."""
super().__init__()
self.hass = hass
self._authentication = device_info.get(CONF_AUTHENTICATION)
self._name = device_info.get(CONF_NAME)
self._still_image_url = device_info[CONF_STILL_IMAGE_URL]
self._still_image_url.hass = hass
self._limit_refetch = device_info[CONF_LIMIT_REFETCH_TO_URL_CHANGE]
self._frame_interval = 1 / device_info[CONF_FRAMERATE]
self.content_type = device_info[CONF_CONTENT_TYPE]
self.verify_ssl = device_info[CONF_VERIFY_SSL]
username = device_info.get(CONF_USERNAME)
password = device_info.get(CONF_PASSWORD)
if username and password:
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
self._auth = HTTPDigestAuth(username, password)
else:
self._auth = aiohttp.BasicAuth(username, password=password)
else:
self._auth = None
self._last_url = None
self._last_image = None
@property
def frame_interval(self):
"""Return the interval between frames of the mjpeg stream."""
return self._frame_interval
def camera_image(self):
"""Return bytes of camera image."""
return run_coroutine_threadsafe(
self.async_camera_image(), self.hass.loop).result()
async def async_camera_image(self):
"""Return a still image response from the camera."""
try:
url = self._still_image_url.async_render()
except TemplateError as err:
_LOGGER.error(
"Error parsing template %s: %s", self._still_image_url, err)
return self._last_image
if url == self._last_url and self._limit_refetch:
return self._last_image
# aiohttp don't support DigestAuth yet
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
def fetch():
"""Read image from a URL."""
try:
response = requests.get(url, timeout=10, auth=self._auth,
verify=self.verify_ssl)
return response.content
except requests.exceptions.RequestException as error:
_LOGGER.error("Error getting camera image: %s", error)
return self._last_image
self._last_image = await self.hass.async_add_job(
fetch)
# async
else:
try:
websession = async_get_clientsession(
self.hass, verify_ssl=self.verify_ssl)
with async_timeout.timeout(10, loop=self.hass.loop):
response = await websession.get(
url, auth=self._auth)
self._last_image = await response.read()
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting image from: %s", self._name)
return self._last_image
except aiohttp.ClientError as err:
_LOGGER.error("Error getting new camera image: %s", err)
return self._last_image
self._last_url = url
return self._last_image
@property
def name(self):
"""Return the name of this device."""
return self._name
| {
"content_hash": "2f62aa836ec76a0a61442ea7da9d7fdc",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 78,
"avg_line_length": 38.02097902097902,
"alnum_prop": 0.632701857642082,
"repo_name": "nugget/home-assistant",
"id": "ae7e849c234d1e5890d0ef4b0543eff8a30f8c27",
"size": "5437",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/camera/generic.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14492390"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17526"
}
],
"symlink_target": ""
} |
from django.template import Library, Template, TemplateSyntaxError, Node, Variable
from courant.core.news.models import *
from courant.core.utils.text import split_contents
from datetime import timedelta, datetime
register = Library()
class WeeksIssuesNode(Node):
def __init__(self, issue, varname):
self.issue = Variable(issue)
self.varname = varname
def render(self, context):
issue = self.issue.resolve(context)
monday = issue.published_at - timedelta(issue.published_at.weekday())
days = []
for i in range(0, 5):
dt = monday + timedelta(i)
try:
i = Issue.objects.get(published_at__year=int(dt.year),
published_at__month=int(dt.month),
published_at__day=int(dt.day))
days.append({'day': dt, 'issue': i})
except Issue.DoesNotExist:
days.append({'day': dt})
context[self.varname] = days
return ''
def do_weeks_issues(parser, token):
"""
Finds all issues published in the same week as the specified issue.
Syntax::
{% weeks_issues [issue] as [varname ] %}
"""
bits = token.contents.split()
if len(bits) != 4:
raise template.TemplateSyntaxError("'weeks_issues' tag takes exactly 2 arguments")
return WeeksIssuesNode(bits[1], bits[3])
register.tag('weeks_issues', do_weeks_issues) | {
"content_hash": "03ca0b4fda73632b528b413dd13ae45f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 90,
"avg_line_length": 34.906976744186046,
"alnum_prop": 0.5829447035309794,
"repo_name": "maxcutler/Courant-News",
"id": "046190add5886ce9078b50c6bf81cf109d22a8ba",
"size": "1501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "courant/core/news/templatetags/issues.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "47452"
},
{
"name": "Python",
"bytes": "487441"
}
],
"symlink_target": ""
} |
import hashlib
import logging
import os
import sys
from collections import namedtuple
from .posix import get_drive_list as get_drive_list_posix
from .windows import get_drive_list as get_drive_list_windows
logger = logging.getLogger(__name__)
# The name of the folder we export data and content to, and what we look for in drives when we want to import
EXPORT_FOLDER_NAME = "KOLIBRI_DATA"
DriveData = namedtuple(
"DriveData",
[
"id",
"name",
"path",
"writable",
"datafolder",
"freespace",
"totalspace",
"filesystem",
"drivetype",
"metadata",
]
)
def enumerate_mounted_disk_partitions():
"""
Searches the local device for attached partitions/drives, and computes metadata about each one.
Returns a dict that maps drive IDs to DriveData objects containing metadata about each drive.
Note that drives for which the current user does not have read permissions are not included.
"""
if sys.platform == "win32":
drive_list = get_drive_list_windows()
else:
drive_list = get_drive_list_posix()
drives = {}
for drive in drive_list:
path = drive["path"]
drive_id = hashlib.sha1((drive["guid"] or path).encode('utf-8')).hexdigest()[:32]
drives[drive_id] = DriveData(
id=drive_id,
path=path,
name=drive["name"],
writable=os.access(path, os.W_OK),
datafolder=get_kolibri_data_dir_path(path),
freespace=drive["freespace"],
totalspace=drive["totalspace"],
filesystem=drive["filesystem"],
drivetype=drive["drivetype"],
metadata={},
)
return drives
def get_kolibri_data_dir_path(folder):
"""
Constructs an export data folder path by concatenating the parent folder
to the EXPORT_FOLDER_NAME folder name.
"""
return os.path.join(
folder,
EXPORT_FOLDER_NAME,
)
| {
"content_hash": "3313984de1d732161d934ee7414759c7",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 109,
"avg_line_length": 26.613333333333333,
"alnum_prop": 0.6177354709418837,
"repo_name": "jonboiser/kolibri",
"id": "fcc1f07147f2179029c992d5100cab2d10f4bab1",
"size": "1996",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "kolibri/core/discovery/utils/filesystem/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "864"
},
{
"name": "CSS",
"bytes": "29663"
},
{
"name": "HTML",
"bytes": "12733"
},
{
"name": "JavaScript",
"bytes": "786460"
},
{
"name": "Makefile",
"bytes": "7625"
},
{
"name": "Python",
"bytes": "1204842"
},
{
"name": "Shell",
"bytes": "10412"
},
{
"name": "Vue",
"bytes": "809549"
}
],
"symlink_target": ""
} |
"""
Extended math utilities.
"""
# Authors: Gael Varoquaux
# Alexandre Gramfort
# Alexandre T. Passos
# Olivier Grisel
# Lars Buitinck
# Stefan van der Walt
# Kyle Kastner
# Giorgio Patrini
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import linalg, sparse
from . import check_random_state
from ._logistic_sigmoid import _log_logistic_sigmoid
from .sparsefuncs_fast import csr_row_norms
from .validation import check_array
def squared_norm(x):
"""Squared Euclidean or Frobenius norm of x.
Faster than norm(x) ** 2.
Parameters
----------
x : array-like
Returns
-------
float
The Euclidean norm when x is a vector, the Frobenius norm when x
is a matrix (2-d array).
"""
x = np.ravel(x, order="K")
if np.issubdtype(x.dtype, np.integer):
warnings.warn(
"Array type is integer, np.dot may overflow. "
"Data should be float type to avoid this issue",
UserWarning,
)
return np.dot(x, x)
def row_norms(X, squared=False):
"""Row-wise (squared) Euclidean norm of X.
Equivalent to np.sqrt((X * X).sum(axis=1)), but also supports sparse
matrices and does not create an X.shape-sized temporary.
Performs no input validation.
Parameters
----------
X : array-like
The input array.
squared : bool, default=False
If True, return squared norms.
Returns
-------
array-like
The row-wise (squared) Euclidean norm of X.
"""
if sparse.issparse(X):
if not isinstance(X, sparse.csr_matrix):
X = sparse.csr_matrix(X)
norms = csr_row_norms(X)
else:
norms = np.einsum("ij,ij->i", X, X)
if not squared:
np.sqrt(norms, norms)
return norms
def fast_logdet(A):
"""Compute log(det(A)) for A symmetric.
Equivalent to : np.log(nl.det(A)) but more robust.
It returns -Inf if det(A) is non positive or is not defined.
Parameters
----------
A : array-like
The matrix.
"""
sign, ld = np.linalg.slogdet(A)
if not sign > 0:
return -np.inf
return ld
def density(w, **kwargs):
"""Compute density of a sparse vector.
Parameters
----------
w : array-like
The sparse vector.
Returns
-------
float
The density of w, between 0 and 1.
"""
if hasattr(w, "toarray"):
d = float(w.nnz) / (w.shape[0] * w.shape[1])
else:
d = 0 if w is None else float((w != 0).sum()) / w.size
return d
def safe_sparse_dot(a, b, *, dense_output=False):
"""Dot product that handle the sparse matrix case correctly.
Parameters
----------
a : {ndarray, sparse matrix}
b : {ndarray, sparse matrix}
dense_output : bool, default=False
When False, ``a`` and ``b`` both being sparse will yield sparse output.
When True, output will always be a dense array.
Returns
-------
dot_product : {ndarray, sparse matrix}
Sparse if ``a`` and ``b`` are sparse and ``dense_output=False``.
"""
if a.ndim > 2 or b.ndim > 2:
if sparse.issparse(a):
# sparse is always 2D. Implies b is 3D+
# [i, j] @ [k, ..., l, m, n] -> [i, k, ..., l, n]
b_ = np.rollaxis(b, -2)
b_2d = b_.reshape((b.shape[-2], -1))
ret = a @ b_2d
ret = ret.reshape(a.shape[0], *b_.shape[1:])
elif sparse.issparse(b):
# sparse is always 2D. Implies a is 3D+
# [k, ..., l, m] @ [i, j] -> [k, ..., l, j]
a_2d = a.reshape(-1, a.shape[-1])
ret = a_2d @ b
ret = ret.reshape(*a.shape[:-1], b.shape[1])
else:
ret = np.dot(a, b)
else:
ret = a @ b
if (
sparse.issparse(a)
and sparse.issparse(b)
and dense_output
and hasattr(ret, "toarray")
):
return ret.toarray()
return ret
def randomized_range_finder(
A, *, size, n_iter, power_iteration_normalizer="auto", random_state=None
):
"""Compute an orthonormal matrix whose range approximates the range of A.
Parameters
----------
A : 2D array
The input data matrix.
size : int
Size of the return array.
n_iter : int
Number of power iterations used to stabilize the result.
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Returns
-------
Q : ndarray
A (size x size) projection matrix, the range of which
approximates well the range of the input matrix A.
Notes
-----
Follows Algorithm 4.3 of
:arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
An implementation of a randomized algorithm for principal component
analysis
A. Szlam et al. 2014
"""
random_state = check_random_state(random_state)
# Generating normal random vectors with shape: (A.shape[1], size)
Q = random_state.normal(size=(A.shape[1], size))
if hasattr(A, "dtype") and A.dtype.kind == "f":
# Ensure f32 is preserved as f32
Q = Q.astype(A.dtype, copy=False)
# Deal with "auto" mode
if power_iteration_normalizer == "auto":
if n_iter <= 2:
power_iteration_normalizer = "none"
else:
power_iteration_normalizer = "LU"
# Perform power iterations with Q to further 'imprint' the top
# singular vectors of A in Q
for i in range(n_iter):
if power_iteration_normalizer == "none":
Q = safe_sparse_dot(A, Q)
Q = safe_sparse_dot(A.T, Q)
elif power_iteration_normalizer == "LU":
Q, _ = linalg.lu(safe_sparse_dot(A, Q), permute_l=True)
Q, _ = linalg.lu(safe_sparse_dot(A.T, Q), permute_l=True)
elif power_iteration_normalizer == "QR":
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode="economic")
Q, _ = linalg.qr(safe_sparse_dot(A.T, Q), mode="economic")
# Sample the range of A using by linear projection of Q
# Extract an orthonormal basis
Q, _ = linalg.qr(safe_sparse_dot(A, Q), mode="economic")
return Q
def randomized_svd(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
transpose="auto",
flip_sign=True,
random_state="warn",
svd_lapack_driver="gesdd",
):
"""Computes a truncated randomized SVD.
This method solves the fixed-rank approximation problem described in [1]_
(problem (1.5), p5).
Parameters
----------
M : {ndarray, sparse matrix}
Matrix to decompose.
n_components : int
Number of singular values and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of singular vectors and singular values. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See [1]_
(pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see [1]_ page 9).
.. versionchanged:: 0.18
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
.. versionadded:: 0.18
transpose : bool or 'auto', default='auto'
Whether the algorithm should be applied to M.T instead of M. The
result should approximately be the same. The 'auto' mode will
trigger the transposition if M.shape[1] > M.shape[0] since this
implementation of randomized SVD tend to be a little faster in that
case.
.. versionchanged:: 0.18
flip_sign : bool, default=True
The output of a singular value decomposition is only unique up to a
permutation of the signs of the singular vectors. If `flip_sign` is
set to `True`, the sign ambiguity is resolved by making the largest
loadings for each component in the left singular vectors positive.
random_state : int, RandomState instance or None, default='warn'
The seed of the pseudo random number generator to use when
shuffling the data, i.e. getting the random vectors to initialize
the algorithm. Pass an int for reproducible results across multiple
function calls. See :term:`Glossary <random_state>`.
.. versionchanged:: 1.2
The previous behavior (`random_state=0`) is deprecated, and
from v1.2 the default value will be `random_state=None`. Set
the value of `random_state` explicitly to suppress the deprecation
warning.
svd_lapack_driver : {"gesdd", "gesvd"}, default="gesdd"
Whether to use the more efficient divide-and-conquer approach
(`"gesdd"`) or more general rectangular approach (`"gesvd"`) to compute
the SVD of the matrix B, which is the projection of M into a low
dimensional subspace, as described in [1]_.
.. versionadded:: 1.2
Notes
-----
This algorithm finds a (usually very good) approximate truncated
singular value decomposition using randomization to speed up the
computations. It is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
References
----------
.. [1] :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
<0909.4061>`
Halko, et al. (2009)
.. [2] A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert
.. [3] An implementation of a randomized algorithm for principal component
analysis A. Szlam et al. 2014
"""
if isinstance(M, (sparse.lil_matrix, sparse.dok_matrix)):
warnings.warn(
"Calculating SVD of a {} is expensive. "
"csr_matrix is more efficient.".format(type(M).__name__),
sparse.SparseEfficiencyWarning,
)
if random_state == "warn":
warnings.warn(
"If 'random_state' is not supplied, the current default "
"is to use 0 as a fixed seed. This will change to "
"None in version 1.2 leading to non-deterministic results "
"that better reflect nature of the randomized_svd solver. "
"If you want to silence this warning, set 'random_state' "
"to an integer seed or to None explicitly depending "
"if you want your code to be deterministic or not.",
FutureWarning,
)
random_state = 0
random_state = check_random_state(random_state)
n_random = n_components + n_oversamples
n_samples, n_features = M.shape
if n_iter == "auto":
# Checks if the number of iterations is explicitly specified
# Adjust n_iter. 7 was found a good compromise for PCA. See #5299
n_iter = 7 if n_components < 0.1 * min(M.shape) else 4
if transpose == "auto":
transpose = n_samples < n_features
if transpose:
# this implementation is a bit faster with smaller shape[1]
M = M.T
Q = randomized_range_finder(
M,
size=n_random,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
random_state=random_state,
)
# project M to the (k + p) dimensional space using the basis vectors
B = safe_sparse_dot(Q.T, M)
# compute the SVD on the thin matrix: (k + p) wide
Uhat, s, Vt = linalg.svd(B, full_matrices=False, lapack_driver=svd_lapack_driver)
del B
U = np.dot(Q, Uhat)
if flip_sign:
if not transpose:
U, Vt = svd_flip(U, Vt)
else:
# In case of transpose u_based_decision=false
# to actually flip based on u and not v.
U, Vt = svd_flip(U, Vt, u_based_decision=False)
if transpose:
# transpose back the results according to the input convention
return Vt[:n_components, :].T, s[:n_components], U[:, :n_components].T
else:
return U[:, :n_components], s[:n_components], Vt[:n_components, :]
def _randomized_eigsh(
M,
n_components,
*,
n_oversamples=10,
n_iter="auto",
power_iteration_normalizer="auto",
selection="module",
random_state=None,
):
"""Computes a truncated eigendecomposition using randomized methods
This method solves the fixed-rank approximation problem described in the
Halko et al paper.
The choice of which components to select can be tuned with the `selection`
parameter.
.. versionadded:: 0.24
Parameters
----------
M : ndarray or sparse matrix
Matrix to decompose, it should be real symmetric square or complex
hermitian
n_components : int
Number of eigenvalues and vectors to extract.
n_oversamples : int, default=10
Additional number of random vectors to sample the range of M so as
to ensure proper conditioning. The total number of random vectors
used to find the range of M is n_components + n_oversamples. Smaller
number can improve speed but can negatively impact the quality of
approximation of eigenvectors and eigenvalues. Users might wish
to increase this parameter up to `2*k - n_components` where k is the
effective rank, for large matrices, noisy problems, matrices with
slowly decaying spectrums, or to increase precision accuracy. See Halko
et al (pages 5, 23 and 26).
n_iter : int or 'auto', default='auto'
Number of power iterations. It can be used to deal with very noisy
problems. When 'auto', it is set to 4, unless `n_components` is small
(< .1 * min(X.shape)) in which case `n_iter` is set to 7.
This improves precision with few components. Note that in general
users should rather increase `n_oversamples` before increasing `n_iter`
as the principle of the randomized method is to avoid usage of these
more costly power iterations steps. When `n_components` is equal
or greater to the effective matrix rank and the spectrum does not
present a slow decay, `n_iter=0` or `1` should even work fine in theory
(see Halko et al paper, page 9).
power_iteration_normalizer : {'auto', 'QR', 'LU', 'none'}, default='auto'
Whether the power iterations are normalized with step-by-step
QR factorization (the slowest but most accurate), 'none'
(the fastest but numerically unstable when `n_iter` is large, e.g.
typically 5 or larger), or 'LU' factorization (numerically stable
but can lose slightly in accuracy). The 'auto' mode applies no
normalization if `n_iter` <= 2 and switches to LU otherwise.
selection : {'value', 'module'}, default='module'
Strategy used to select the n components. When `selection` is `'value'`
(not yet implemented, will become the default when implemented), the
components corresponding to the n largest eigenvalues are returned.
When `selection` is `'module'`, the components corresponding to the n
eigenvalues with largest modules are returned.
random_state : int, RandomState instance, default=None
The seed of the pseudo random number generator to use when shuffling
the data, i.e. getting the random vectors to initialize the algorithm.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
Notes
-----
This algorithm finds a (usually very good) approximate truncated
eigendecomposition using randomized methods to speed up the computations.
This method is particularly fast on large matrices on which
you wish to extract only a small number of components. In order to
obtain further speed up, `n_iter` can be set <=2 (at the cost of
loss of precision). To increase the precision it is recommended to
increase `n_oversamples`, up to `2*k-n_components` where k is the
effective rank. Usually, `n_components` is chosen to be greater than k
so increasing `n_oversamples` up to `n_components` should be enough.
Strategy 'value': not implemented yet.
Algorithms 5.3, 5.4 and 5.5 in the Halko et al paper should provide good
condidates for a future implementation.
Strategy 'module':
The principle is that for diagonalizable matrices, the singular values and
eigenvalues are related: if t is an eigenvalue of A, then :math:`|t|` is a
singular value of A. This method relies on a randomized SVD to find the n
singular components corresponding to the n singular values with largest
modules, and then uses the signs of the singular vectors to find the true
sign of t: if the sign of left and right singular vectors are different
then the corresponding eigenvalue is negative.
Returns
-------
eigvals : 1D array of shape (n_components,) containing the `n_components`
eigenvalues selected (see ``selection`` parameter).
eigvecs : 2D array of shape (M.shape[0], n_components) containing the
`n_components` eigenvectors corresponding to the `eigvals`, in the
corresponding order. Note that this follows the `scipy.linalg.eigh`
convention.
See Also
--------
:func:`randomized_svd`
References
----------
* :arxiv:`"Finding structure with randomness:
Stochastic algorithms for constructing approximate matrix decompositions"
(Algorithm 4.3 for strategy 'module') <0909.4061>`
Halko, et al. (2009)
"""
if selection == "value": # pragma: no cover
# to do : an algorithm can be found in the Halko et al reference
raise NotImplementedError()
elif selection == "module":
# Note: no need for deterministic U and Vt (flip_sign=True),
# as we only use the dot product UVt afterwards
U, S, Vt = randomized_svd(
M,
n_components=n_components,
n_oversamples=n_oversamples,
n_iter=n_iter,
power_iteration_normalizer=power_iteration_normalizer,
flip_sign=False,
random_state=random_state,
)
eigvecs = U[:, :n_components]
eigvals = S[:n_components]
# Conversion of Singular values into Eigenvalues:
# For any eigenvalue t, the corresponding singular value is |t|.
# So if there is a negative eigenvalue t, the corresponding singular
# value will be -t, and the left (U) and right (V) singular vectors
# will have opposite signs.
# Fastest way: see <https://stackoverflow.com/a/61974002/7262247>
diag_VtU = np.einsum("ji,ij->j", Vt[:n_components, :], U[:, :n_components])
signs = np.sign(diag_VtU)
eigvals = eigvals * signs
else: # pragma: no cover
raise ValueError("Invalid `selection`: %r" % selection)
return eigvals, eigvecs
def weighted_mode(a, w, *, axis=0):
"""Returns an array of the weighted modal (most common) value in a.
If there is more than one such value, only the first is returned.
The bin-count for the modal bins is also returned.
This is an extension of the algorithm in scipy.stats.mode.
Parameters
----------
a : array-like
n-dimensional array of which to find mode(s).
w : array-like
n-dimensional array of weights for each value.
axis : int, default=0
Axis along which to operate. Default is 0, i.e. the first axis.
Returns
-------
vals : ndarray
Array of modal values.
score : ndarray
Array of weighted counts for each mode.
Examples
--------
>>> from sklearn.utils.extmath import weighted_mode
>>> x = [4, 1, 4, 2, 4, 2]
>>> weights = [1, 1, 1, 1, 1, 1]
>>> weighted_mode(x, weights)
(array([4.]), array([3.]))
The value 4 appears three times: with uniform weights, the result is
simply the mode of the distribution.
>>> weights = [1, 3, 0.5, 1.5, 1, 2] # deweight the 4's
>>> weighted_mode(x, weights)
(array([2.]), array([3.5]))
The value 2 has the highest score: it appears twice with weights of
1.5 and 2: the sum of these is 3.5.
See Also
--------
scipy.stats.mode
"""
if axis is None:
a = np.ravel(a)
w = np.ravel(w)
axis = 0
else:
a = np.asarray(a)
w = np.asarray(w)
if a.shape != w.shape:
w = np.full(a.shape, w, dtype=w.dtype)
scores = np.unique(np.ravel(a)) # get ALL unique values
testshape = list(a.shape)
testshape[axis] = 1
oldmostfreq = np.zeros(testshape)
oldcounts = np.zeros(testshape)
for score in scores:
template = np.zeros(a.shape)
ind = a == score
template[ind] = w[ind]
counts = np.expand_dims(np.sum(template, axis), axis)
mostfrequent = np.where(counts > oldcounts, score, oldmostfreq)
oldcounts = np.maximum(counts, oldcounts)
oldmostfreq = mostfrequent
return mostfrequent, oldcounts
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray of shape (M, len(arrays)), default=None
Array to place the cartesian product in.
Returns
-------
out : ndarray of shape (M, len(arrays))
Array containing the cartesian products formed of input arrays.
Notes
-----
This function may not be used on more than 32 arrays
because the underlying numpy functions do not support it.
Examples
--------
>>> from sklearn.utils.extmath import cartesian
>>> cartesian(([1, 2, 3], [4, 5], [6, 7]))
array([[1, 4, 6],
[1, 4, 7],
[1, 5, 6],
[1, 5, 7],
[2, 4, 6],
[2, 4, 7],
[2, 5, 6],
[2, 5, 7],
[3, 4, 6],
[3, 4, 7],
[3, 5, 6],
[3, 5, 7]])
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out
def svd_flip(u, v, u_based_decision=True):
"""Sign correction to ensure deterministic output from SVD.
Adjusts the columns of u and the rows of v such that the loadings in the
columns in u that are largest in absolute value are always positive.
Parameters
----------
u : ndarray
u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
v : ndarray
u and v are the output of `linalg.svd` or
:func:`~sklearn.utils.extmath.randomized_svd`, with matching inner
dimensions so one can compute `np.dot(u * s, v)`.
The input v should really be called vt to be consistent with scipy's
output.
u_based_decision : bool, default=True
If True, use the columns of u as the basis for sign flipping.
Otherwise, use the rows of v. The choice of which variable to base the
decision on is generally algorithm dependent.
Returns
-------
u_adjusted, v_adjusted : arrays with the same dimensions as the input.
"""
if u_based_decision:
# columns of u, rows of v
max_abs_cols = np.argmax(np.abs(u), axis=0)
signs = np.sign(u[max_abs_cols, range(u.shape[1])])
u *= signs
v *= signs[:, np.newaxis]
else:
# rows of v, columns of u
max_abs_rows = np.argmax(np.abs(v), axis=1)
signs = np.sign(v[range(v.shape[0]), max_abs_rows])
u *= signs
v *= signs[:, np.newaxis]
return u, v
def log_logistic(X, out=None):
"""Compute the log of the logistic function, ``log(1 / (1 + e ** -x))``.
This implementation is numerically stable because it splits positive and
negative values::
-log(1 + exp(-x_i)) if x_i > 0
x_i - log(1 + exp(x_i)) if x_i <= 0
For the ordinary logistic function, use ``scipy.special.expit``.
Parameters
----------
X : array-like of shape (M, N) or (M,)
Argument to the logistic function.
out : array-like of shape (M, N) or (M,), default=None
Preallocated output array.
Returns
-------
out : ndarray of shape (M, N) or (M,)
Log of the logistic function evaluated at every point in x.
Notes
-----
See the blog post describing this implementation:
http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
"""
is_1d = X.ndim == 1
X = np.atleast_2d(X)
X = check_array(X, dtype=np.float64)
n_samples, n_features = X.shape
if out is None:
out = np.empty_like(X)
_log_logistic_sigmoid(n_samples, n_features, X, out)
if is_1d:
return np.squeeze(out)
return out
def softmax(X, copy=True):
"""
Calculate the softmax function.
The softmax function is calculated by
np.exp(X) / np.sum(np.exp(X), axis=1)
This will cause overflow when large values are exponentiated.
Hence the largest value in each row is subtracted from each data
point to prevent this.
Parameters
----------
X : array-like of float of shape (M, N)
Argument to the logistic function.
copy : bool, default=True
Copy X or not.
Returns
-------
out : ndarray of shape (M, N)
Softmax function evaluated at every point in x.
"""
if copy:
X = np.copy(X)
max_prob = np.max(X, axis=1).reshape((-1, 1))
X -= max_prob
np.exp(X, X)
sum_prob = np.sum(X, axis=1).reshape((-1, 1))
X /= sum_prob
return X
def make_nonnegative(X, min_value=0):
"""Ensure `X.min()` >= `min_value`.
Parameters
----------
X : array-like
The matrix to make non-negative.
min_value : float, default=0
The threshold value.
Returns
-------
array-like
The thresholded array.
Raises
------
ValueError
When X is sparse.
"""
min_ = X.min()
if min_ < min_value:
if sparse.issparse(X):
raise ValueError(
"Cannot make the data matrix"
" nonnegative because it is sparse."
" Adding a value to every entry would"
" make it no longer sparse."
)
X = X + (min_value - min_)
return X
# Use at least float64 for the accumulating functions to avoid precision issue
# see https://github.com/numpy/numpy/issues/9393. The float64 is also retained
# as it is in case the float overflows
def _safe_accumulator_op(op, x, *args, **kwargs):
"""
This function provides numpy accumulator functions with a float64 dtype
when used on a floating point input. This prevents accumulator overflow on
smaller floating point dtypes.
Parameters
----------
op : function
A numpy accumulator function such as np.mean or np.sum.
x : ndarray
A numpy array to apply the accumulator function.
*args : positional arguments
Positional arguments passed to the accumulator function after the
input x.
**kwargs : keyword arguments
Keyword arguments passed to the accumulator function.
Returns
-------
result
The output of the accumulator function passed to this function.
"""
if np.issubdtype(x.dtype, np.floating) and x.dtype.itemsize < 8:
result = op(x, *args, **kwargs, dtype=np.float64)
else:
result = op(x, *args, **kwargs)
return result
def _incremental_mean_and_var(
X, last_mean, last_variance, last_sample_count, sample_weight=None
):
"""Calculate mean update and a Youngs and Cramer variance update.
If sample_weight is given, the weighted mean and variance is computed.
Update a given mean and (possibly) variance according to new data given
in X. last_mean is always required to compute the new mean.
If last_variance is None, no variance is computed and None return for
updated_variance.
From the paper "Algorithms for computing the sample variance: analysis and
recommendations", by Chan, Golub, and LeVeque.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Data to use for variance update.
last_mean : array-like of shape (n_features,)
last_variance : array-like of shape (n_features,)
last_sample_count : array-like of shape (n_features,)
The number of samples encountered until now if sample_weight is None.
If sample_weight is not None, this is the sum of sample_weight
encountered.
sample_weight : array-like of shape (n_samples,) or None
Sample weights. If None, compute the unweighted mean/variance.
Returns
-------
updated_mean : ndarray of shape (n_features,)
updated_variance : ndarray of shape (n_features,)
None if last_variance was None.
updated_sample_count : ndarray of shape (n_features,)
Notes
-----
NaNs are ignored during the algorithm.
References
----------
T. Chan, G. Golub, R. LeVeque. Algorithms for computing the sample
variance: recommendations, The American Statistician, Vol. 37, No. 3,
pp. 242-247
Also, see the sparse implementation of this in
`utils.sparsefuncs.incr_mean_variance_axis` and
`utils.sparsefuncs_fast.incr_mean_variance_axis0`
"""
# old = stats until now
# new = the current increment
# updated = the aggregated stats
last_sum = last_mean * last_sample_count
X_nan_mask = np.isnan(X)
if np.any(X_nan_mask):
sum_op = np.nansum
else:
sum_op = np.sum
if sample_weight is not None:
# equivalent to np.nansum(X * sample_weight, axis=0)
# safer because np.float64(X*W) != np.float64(X)*np.float64(W)
new_sum = _safe_accumulator_op(
np.matmul, sample_weight, np.where(X_nan_mask, 0, X)
)
new_sample_count = _safe_accumulator_op(
np.sum, sample_weight[:, None] * (~X_nan_mask), axis=0
)
else:
new_sum = _safe_accumulator_op(sum_op, X, axis=0)
n_samples = X.shape[0]
new_sample_count = n_samples - np.sum(X_nan_mask, axis=0)
updated_sample_count = last_sample_count + new_sample_count
updated_mean = (last_sum + new_sum) / updated_sample_count
if last_variance is None:
updated_variance = None
else:
T = new_sum / new_sample_count
temp = X - T
if sample_weight is not None:
# equivalent to np.nansum((X-T)**2 * sample_weight, axis=0)
# safer because np.float64(X*W) != np.float64(X)*np.float64(W)
correction = _safe_accumulator_op(
np.matmul, sample_weight, np.where(X_nan_mask, 0, temp)
)
temp **= 2
new_unnormalized_variance = _safe_accumulator_op(
np.matmul, sample_weight, np.where(X_nan_mask, 0, temp)
)
else:
correction = _safe_accumulator_op(sum_op, temp, axis=0)
temp **= 2
new_unnormalized_variance = _safe_accumulator_op(sum_op, temp, axis=0)
# correction term of the corrected 2 pass algorithm.
# See "Algorithms for computing the sample variance: analysis
# and recommendations", by Chan, Golub, and LeVeque.
new_unnormalized_variance -= correction**2 / new_sample_count
last_unnormalized_variance = last_variance * last_sample_count
with np.errstate(divide="ignore", invalid="ignore"):
last_over_new_count = last_sample_count / new_sample_count
updated_unnormalized_variance = (
last_unnormalized_variance
+ new_unnormalized_variance
+ last_over_new_count
/ updated_sample_count
* (last_sum / last_over_new_count - new_sum) ** 2
)
zeros = last_sample_count == 0
updated_unnormalized_variance[zeros] = new_unnormalized_variance[zeros]
updated_variance = updated_unnormalized_variance / updated_sample_count
return updated_mean, updated_variance, updated_sample_count
def _deterministic_vector_sign_flip(u):
"""Modify the sign of vectors for reproducibility.
Flips the sign of elements of all the vectors (rows of u) such that
the absolute maximum element of each vector is positive.
Parameters
----------
u : ndarray
Array with vectors as its rows.
Returns
-------
u_flipped : ndarray with same shape as u
Array with the sign flipped vectors as its rows.
"""
max_abs_rows = np.argmax(np.abs(u), axis=1)
signs = np.sign(u[range(u.shape[0]), max_abs_rows])
u *= signs[:, np.newaxis]
return u
def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):
"""Use high precision for cumsum and check that final value matches sum.
Warns if the final cumulative sum does not match the sum (up to the chosen
tolerance).
Parameters
----------
arr : array-like
To be cumulatively summed as flat.
axis : int, default=None
Axis along which the cumulative sum is computed.
The default (None) is to compute the cumsum over the flattened array.
rtol : float, default=1e-05
Relative tolerance, see ``np.allclose``.
atol : float, default=1e-08
Absolute tolerance, see ``np.allclose``.
Returns
-------
out : ndarray
Array with the cumulative sums along the chosen axis.
"""
out = np.cumsum(arr, axis=axis, dtype=np.float64)
expected = np.sum(arr, axis=axis, dtype=np.float64)
if not np.all(
np.isclose(
out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True
)
):
warnings.warn(
"cumsum was found to be unstable: "
"its last element does not correspond to sum",
RuntimeWarning,
)
return out
| {
"content_hash": "3d669afc3e00bf69d34c265f0da7412c",
"timestamp": "",
"source": "github",
"line_count": 1092,
"max_line_length": 85,
"avg_line_length": 34.07142857142857,
"alnum_prop": 0.6199537708971671,
"repo_name": "ivannz/scikit-learn",
"id": "bc4e18e7d55e3ed6b70dfd223be0925ffcaa3d0f",
"size": "37206",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sklearn/utils/extmath.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "670108"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10468527"
},
{
"name": "Shell",
"bytes": "42860"
}
],
"symlink_target": ""
} |
import io
import sys
import unittest
from mwel import ErrorLogger, toxml, fromxml
from . import TempFilesMixin
class TestErrorLogger(unittest.TestCase):
def test_basic(self):
el = ErrorLogger()
expected_output = '''\
negative lineno, no colno
zero lineno, no colno
positive lineno, negative colno
positive lineno, zero colno
positive lineno, positive colno [line 1, column 1]
positive lineno, no colno
positive lineno, positive colno [line 15, column 9]
positive lineno, positive colno [line 20, column 8]
positive lineno, positive colno [line 20, column 10]
no lineno, no colno
'''
el('no lineno, no colno', token='?')
el('negative lineno, no colno', lineno=-1)
el('zero lineno, no colno', lineno=0)
el('positive lineno, no colno', lineno=1)
el('positive lineno, negative colno', lineno=1, colno=-1)
el('positive lineno, zero colno', lineno=1, colno=0)
el('positive lineno, positive colno', lineno=1, colno=1)
el('positive lineno, positive colno', lineno=20, colno=10)
el('positive lineno, positive colno', lineno=15, colno=9)
el('positive lineno, positive colno', lineno=20, colno=8)
file = io.StringIO()
el.print_errors(file)
self.assertEqual(expected_output, file.getvalue())
def test_filenames(self):
el = ErrorLogger()
expected_output = '''\
no file 1 [line 1, column 1]
no file 2 [line 2, column 2]
no file 3 [line 8, column 8]
file1 1 [file1.mwel: line 5, column 5]
file1 2 [file1.mwel: line 6, column 6]
file1 3 [file1.mwel]
another file [another_file: line 100, column 100]
file2 1 [file2: line 3, column 3]
file2 2 [file2: line 4, column 4]
file2 3 [file2: line 7, column 7]
'''
el('no file 1', lineno=1, colno=1)
el('no file 2', lineno=2, colno=2)
with el.filename('dir1/file2'):
el('file2 1', lineno=3, colno=3)
el('file2 2', lineno=4, colno=4)
with el.filename('/dir2/dir3/file1.mwel'):
el('file1 1', lineno=5, colno=5)
el('file1 2', lineno=6, colno=6)
el('another file', lineno=100, colno=100,
filename='another_file')
el('file1 3')
self.assertEqual('/dir2/dir3/file1.mwel', el.current_filename)
el('file2 3', lineno=7, colno=7)
self.assertEqual('dir1/file2', el.current_filename)
el('no file 3', lineno=8, colno=8)
self.assertEqual('', el.current_filename)
file = io.StringIO()
el.print_errors(file)
self.assertEqual(expected_output, file.getvalue())
def test_backtraces(self):
el = ErrorLogger()
expected_output = '''\
a shorter backtrace [b.mwel: line 2, column 5; via line 3, column 5]
a long backtrace [a.mwel: line 1, column 4; via b.mwel: line 2, column 5; via line 3, column 6]
no backtrace [line 4, column 6]
another backtrace to a file [a.mwel: line 1, column 3; via b.mwel: line 2, column 4]
backtrace to a file [a.mwel: line 1, column 3; via b.mwel: line 3, column 4]
'''
el('backtrace to a file',
lineno = (1, 3),
colno = (3, 4),
filename = ('a.mwel', 'b.mwel'))
el('no backtrace', lineno=4, colno=6)
el('a long backtrace',
lineno = (1, 2, 3),
colno = (4, 5, 6),
filename = ('a.mwel', 'b.mwel', ''))
el('a shorter backtrace',
lineno = (2, 3),
colno = (5, 5),
filename = ('b.mwel', ''))
el('another backtrace to a file',
lineno = (1, 2),
colno = (3, 4),
filename = ('a.mwel', 'b.mwel'))
file = io.StringIO()
el.print_errors(file)
self.assertEqual(expected_output, file.getvalue())
class TestToXML(TempFilesMixin, unittest.TestCase):
def toxml(self, *argv):
self.stdout = io.BytesIO()
self.stderr = io.StringIO()
return toxml(argv, self.stdout, self.stderr)
def assertOutput(self, stdout='', stderr=''):
self.assertEqual(stdout, self.stdout.getvalue().decode('utf-8'))
self.assertEqual(stderr, self.stderr.getvalue())
def test_usage_error(self):
# No args
self.assertEqual(2, self.toxml('my_script'))
self.assertOutput(stderr='Usage: my_script [--omit-metadata] file\n')
# Extra args
self.assertEqual(2, self.toxml('/path/to/my_script', 'foo', 'bar'))
self.assertOutput(stderr='Usage: my_script [--omit-metadata] file\n')
def test_file_read_error(self):
self.assertEqual(1, self.toxml('my_script', '/path/to/not_a_file'))
self.assertOutput(stderr=("Failed to open file '/path/to/not_a_file': "
"No such file or directory\n"))
def test_parser_error(self):
src = '''\
var x = 1 +
var y = 7
'''
src_path = self.write_file('experiment.mwel', src)
self.assertEqual(1, self.toxml('my_script', src_path))
self.assertOutput(stderr='''\
Line ended unexpectedly [line 1, column 12]
''')
def test_analyzer_and_validator_errors(self):
src = '''\
var x = 1
action/floop (3) // Flagged by analyzer
block {} // Flagged by validator
var y = 7
'''
src_path = self.write_file('experiment.mwel', src)
self.assertEqual(1, self.toxml('my_script', src_path))
self.assertOutput(stderr='''\
Cannot infer parameter name for component 'action/floop' [line 2, column 1]
Component 'block' is not allowed at the top level [line 3, column 1]
''')
def test_success(self):
src = '''\
var x = 2
folder 'Other Vars' {
var foo = 12
var bar = 'This is a string'
var blah = [1.5, 2.5, 3.5]
}
protocol 'Test Protocol' {
if (x > 1) {
report ('x is greater than 1!')
}
x = 'foo'
report ('x = $x')
}
experiment 'My Experiment' {}
'''
src_path = self.write_file('experiment.mwel', src)
self.assertEqual(0, self.toxml('my_script', src_path))
self.assertOutput(stdout='''\
<?xml version='1.0' encoding='UTF-8'?>
<monkeyml version="1.0">
<variable _location="line 1, column 1" default_value="2" tag="x" />
<folder _location="line 3, column 1" tag="Other Vars">
<variable _location="line 4, column 5" default_value="12" tag="foo" />
<variable _location="line 5, column 5" default_value="'This is a string'" tag="bar" />
<variable _location="line 6, column 5" default_value="[1.5, 2.5, 3.5]" tag="blah" />
</folder>
<experiment _location="line 17, column 1" tag="My Experiment">
<protocol _location="line 9, column 1" tag="Test Protocol">
<action _location="line 10, column 5" condition="x > 1" type="if">
<action _location="line 11, column 9" message="x is greater than 1!" type="report" />
</action>
<action _location="line 13, column 7" type="assignment" value="'foo'" variable="x" />
<action _location="line 14, column 5" message="x = $x" type="report" />
</protocol>
</experiment>
<variable_assignment variable="#loadedExperiment">
<dictionary>
<dictionary_element>
<key>%s</key>
<value type="string">var x = 2
folder 'Other Vars' {
var foo = 12
var bar = 'This is a string'
var blah = [1.5, 2.5, 3.5]
}
protocol 'Test Protocol' {
if (x > 1) {
report ('x is greater than 1!')
}
x = 'foo'
report ('x = $x')
}
experiment 'My Experiment' {}
</value>
</dictionary_element>
</dictionary>
</variable_assignment>
</monkeyml>
''' % src_path)
def test_includes(self):
experiment_src = '''\
var x = 2
folder 'Other Vars' {
var foo = 12
var bar = 'This is a string'
var blah = [1.5, 2.5, 3.5]
}
%include protocol
experiment 'My Experiment' {}
protocol 'Another Protocol' {
set_x('bar')
}
'''
experiment_path = self.write_file('experiment.mwel', experiment_src)
protocol_src = '''\
%define set_x(value)
x = value
%end
protocol 'Test Protocol' {
if (x > 1) {
report ('x is greater than 1!')
}
set_x('foo')
report ('x = $x')
}
'''
protocol_path = self.write_file('protocol.mwel', protocol_src)
self.assertEqual(0, self.toxml('my_script', experiment_path))
self.assertOutput(stdout='''\
<?xml version='1.0' encoding='UTF-8'?>
<monkeyml version="1.0">
<variable _location="line 1, column 1" default_value="2" tag="x" />
<folder _location="line 3, column 1" tag="Other Vars">
<variable _location="line 4, column 5" default_value="12" tag="foo" />
<variable _location="line 5, column 5" default_value="'This is a string'" tag="bar" />
<variable _location="line 6, column 5" default_value="[1.5, 2.5, 3.5]" tag="blah" />
</folder>
<experiment _location="line 11, column 1" tag="My Experiment">
<protocol _location="protocol.mwel: line 4, column 1" tag="Test Protocol">
<action _location="protocol.mwel: line 5, column 5" condition="x > 1" type="if">
<action _location="protocol.mwel: line 6, column 9" message="x is greater than 1!" type="report" />
</action>
<action _location="protocol.mwel: line 2, column 7; via protocol.mwel: line 8, column 5" type="assignment" value="'foo'" variable="x" />
<action _location="protocol.mwel: line 9, column 5" message="x = $x" type="report" />
</protocol>
<protocol _location="line 13, column 1" tag="Another Protocol">
<action _location="protocol.mwel: line 2, column 7; via line 14, column 5" type="assignment" value="'bar'" variable="x" />
</protocol>
</experiment>
<variable_assignment variable="#loadedExperiment">
<dictionary>
<dictionary_element>
<key>%s</key>
<value type="string">var x = 2
folder 'Other Vars' {
var foo = 12
var bar = 'This is a string'
var blah = [1.5, 2.5, 3.5]
}
%%include protocol
experiment 'My Experiment' {}
protocol 'Another Protocol' {
set_x('bar')
}
</value>
</dictionary_element>
<dictionary_element>
<key>%s</key>
<value type="string">%%define set_x(value)
x = value
%%end
protocol 'Test Protocol' {
if (x > 1) {
report ('x is greater than 1!')
}
set_x('foo')
report ('x = $x')
}
</value>
</dictionary_element>
</dictionary>
</variable_assignment>
</monkeyml>
''' % (experiment_path, protocol_path))
def test_unicode(self):
experiment_src = '''\
// ¡Buenos días, amigos!
%include inc
'''
include_src = '''\
// ¡Y tu también!
var 😊 = 1
'''
#
# Both files UTF-8
#
experiment_path = self.write_file('exp.mwel', experiment_src)
self.write_file('inc.mwel', include_src)
self.assertEqual(1, self.toxml('my_script', experiment_path))
expected_stderr = '''\
Illegal character: '😊' [inc.mwel: line 2, column 5]
'''
self.assertOutput(stderr=expected_stderr)
#
# Include not UTF-8
#
include_path = self.write_file('inc.mwel', include_src, 'utf-16')
self.assertEqual(1, self.toxml('my_script', experiment_path))
self.assertOutput(stderr='''\
File '%s' does not contain valid UTF-8 encoded text [line 2, column 2]
''' % include_path)
#
# Experiment not UTF-8
#
experiment_path = self.write_file('exp.mwel', experiment_src, 'utf-16')
self.assertEqual(1, self.toxml('my_script', experiment_path))
self.assertOutput(stderr='''\
File '%s' does not contain valid UTF-8 encoded text
''' % experiment_path)
def test_omit_metadata(self):
src = '''\
var x = 2
folder 'Other Vars' {
var foo = 12
var bar = 'This is a string'
var blah = [1.5, 2.5, 3.5]
}
protocol 'Test Protocol' {
if (x > 1) {
report ('x is greater than 1!')
}
x = 'foo'
report ('x = $x')
}
experiment 'My Experiment' {}
'''
src_path = self.write_file('experiment.mwel', src)
self.assertEqual(0, self.toxml('my_script',
'--omit-metadata',
src_path))
self.assertOutput(stdout='''\
<?xml version='1.0' encoding='UTF-8'?>
<monkeyml version="1.0">
<variable default_value="2" tag="x" />
<folder tag="Other Vars">
<variable default_value="12" tag="foo" />
<variable default_value="'This is a string'" tag="bar" />
<variable default_value="[1.5, 2.5, 3.5]" tag="blah" />
</folder>
<experiment tag="My Experiment">
<protocol tag="Test Protocol">
<action condition="x > 1" type="if">
<action message="x is greater than 1!" type="report" />
</action>
<action type="assignment" value="'foo'" variable="x" />
<action message="x = $x" type="report" />
</protocol>
</experiment>
</monkeyml>
''')
class TestFromXML(TempFilesMixin, unittest.TestCase):
def fromxml(self, *argv):
self.stdout = io.StringIO()
self.stderr = io.StringIO()
return fromxml(argv, self.stdout, self.stderr)
def assertOutput(self, stdout='', stderr=''):
self.assertEqual(stdout, self.stdout.getvalue())
self.assertEqual(stderr, self.stderr.getvalue())
def test_usage_error(self):
# No args
self.assertEqual(2, self.fromxml('my_script'))
self.assertOutput(stderr='Usage: my_script file\n')
# Extra args
self.assertEqual(2, self.fromxml('/path/to/my_script', 'foo', 'bar'))
self.assertOutput(stderr='Usage: my_script file\n')
def test_file_read_error(self):
self.assertEqual(1, self.fromxml('my_script', '/path/to/not_a_file'))
self.assertOutput(stderr=("Failed to open file '/path/to/not_a_file': "
"No such file or directory\n"))
def test_xml_parsing_failure(self):
src = b'''\
<?xml version="1.0"?>
<monkeyml version="1.0">
<protocol>
<action type="assignment" variable="x" value="1">
</protocol>
</monkeyml>'''
src_path = self.write_file('experiment.xml', src, encoding=None)
self.assertEqual(1, self.fromxml('my_script', src_path))
self.assertOutput(stderr='''\
Failed to parse XML: mismatched tag [line 5, column 6]
''')
def test_errors(self):
src = b'''\
<?xml version="1.0"?>
<monkeyml version="1.0">
<!-- Flagged by XML parser-->
foo
<protocol>
<action type="assignment" variable="x" value="1"/>
<!-- Flagged by simplifier-->
<transition_marker>
<action type="report" message="Not cool!"></action>
</transition_marker>
</protocol>
</monkeyml>'''
src_path = self.write_file('experiment.xml', src, encoding=None)
self.assertEqual(1, self.fromxml('my_script', src_path))
self.assertOutput(stdout='''\
// Flagged by XML parser
protocol {
x = 1
// Flagged by simplifier
}
''',
stderr='''\
XML contains unexpected data
Ignored element 'transition_marker' has unexpected children that will also be ignored [line 8, column 8]
''')
def test_no_errors(self):
src = b'''\
<?xml version="1.0"?>
<monkeyml version="1.0">
<!-- Empty parameter removed by simplifier-->
<protocol nsamples="10" sampling_method="">
<!-- Assignment transformed by converter-->
<action type="assignment" variable="x" value="1"/>
<!-- Type name shortened by beautifier-->
<action type="update_stimulus_display"/>
</protocol>
</monkeyml>'''
src_path = self.write_file('experiment.xml', src, encoding=None)
self.assertEqual(0, self.fromxml('my_script', src_path))
self.assertOutput(stdout='''\
// Empty parameter removed by simplifier
protocol (nsamples = 10) {
// Assignment transformed by converter
x = 1
// Type name shortened by beautifier
update_stimulus_display ()
}
''')
| {
"content_hash": "ce96b6c7682960f526f5bec29c6cda9a",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 142,
"avg_line_length": 30.96303501945525,
"alnum_prop": 0.5932139491046183,
"repo_name": "mworks/mworks",
"id": "de11b47c561aec5a88f375e6f259afadcd4b2619",
"size": "15940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mwel/test_mwel/test_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "241110"
},
{
"name": "C++",
"bytes": "2564939"
},
{
"name": "HTML",
"bytes": "128054"
},
{
"name": "Java",
"bytes": "38100"
},
{
"name": "M",
"bytes": "424"
},
{
"name": "MATLAB",
"bytes": "23144"
},
{
"name": "Makefile",
"bytes": "3634"
},
{
"name": "Metal",
"bytes": "23463"
},
{
"name": "Objective-C",
"bytes": "454609"
},
{
"name": "Perl",
"bytes": "35594"
},
{
"name": "Python",
"bytes": "558518"
},
{
"name": "Rich Text Format",
"bytes": "1306"
},
{
"name": "Shell",
"bytes": "25366"
},
{
"name": "Swift",
"bytes": "10460"
},
{
"name": "XSLT",
"bytes": "52495"
}
],
"symlink_target": ""
} |
def set_app_resource_ownership(app_uid, app_name, app_api_version, resource):
""" Set the app as owner of the resource"""
set_resource_ownership(app_uid, app_name, app_api_version, "Application",
resource)
def set_service_account_resource_ownership(account_uid, account_name, resource):
""" Set the app as owner of the resource"""
set_resource_ownership(account_uid, account_name, "v1", "ServiceAccount",
resource)
def set_namespace_resource_ownership(namespace_uid, namespace_name, resource):
""" Set the namespace as owner of the resource"""
set_resource_ownership(namespace_uid, namespace_name, "v1", "Namespace",
resource)
def set_resource_ownership(owner_uid, owner_name, owner_api_version, owner_kind,
resource):
""" Set the owner of the given resource. """
if 'metadata' not in resource:
resource['metadata'] = {}
if 'ownerReferences' not in resource['metadata']:
resource['metadata']['ownerReferences'] = []
owner_reference = None
for existing_owner_reference in resource['metadata']['ownerReferences']:
if existing_owner_reference['uid'] == owner_uid:
owner_reference = existing_owner_reference
break
if not owner_reference:
owner_reference = {}
resource['metadata']['ownerReferences'].append(owner_reference)
owner_reference['apiVersion'] = owner_api_version
owner_reference['kind'] = owner_kind
owner_reference['blockOwnerDeletion'] = True
owner_reference['name'] = owner_name
owner_reference['uid'] = owner_uid
def find_application_resource(resources):
"""Finds the Application resource from a list of resource manifests."""
apps = [
r for r in resources if r["kind"] == "Application" and
r["apiVersion"].startswith('app.k8s.io/')
]
if len(apps) == 0:
raise Exception("Set of resources does not include an Application")
if len(apps) > 1:
raise Exception("Set of resources includes multiple Applications")
return apps[0]
| {
"content_hash": "fa112d0e8634511f60dbad8949e3f401",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 37.163636363636364,
"alnum_prop": 0.6746575342465754,
"repo_name": "GoogleCloudPlatform/marketplace-k8s-app-tools",
"id": "3371dfafff73fdcdf9b9f86b93513c19585a3aef",
"size": "2621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "marketplace/deployer_util/resources.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7489"
},
{
"name": "Makefile",
"bytes": "22317"
},
{
"name": "Python",
"bytes": "263511"
},
{
"name": "Shell",
"bytes": "51665"
}
],
"symlink_target": ""
} |
"""Tests for python.tpu.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
from absl.testing import parameterized
from tensorflow.python.client import session
from tensorflow.python.feature_column import feature_column_lib as fc_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables as variables_lib
from tensorflow.python.platform import test
from tensorflow.python.tpu import feature_column_v2 as tpu_fc
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_function
def _initialized_session():
sess = session.Session()
sess.run(variables_lib.global_variables_initializer())
sess.run(lookup_ops.tables_initializer())
return sess
class _TestStateManager(fc_lib.StateManager):
def __init__(self, trainable=True):
self._all_variables = {}
self._trainable = trainable
def create_variable(self,
feature_column,
name,
shape,
dtype=None,
trainable=True,
use_resource=True,
initializer=None):
if feature_column not in self._all_variables:
self._all_variables[feature_column] = {}
var_dict = self._all_variables[feature_column]
if name in var_dict:
return var_dict[name]
else:
var = variable_scope.get_variable(
name=name,
shape=shape,
dtype=dtype,
trainable=self._trainable and trainable,
use_resource=use_resource,
initializer=initializer)
var_dict[name] = var
return var
def get_variable(self, feature_column, name):
return self._all_variables[feature_column][name]
class EmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
def test_defaults(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=embedding_dimension)
# Can't test default initializer as it's a random function.
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('mean', embedding_column.combiner)
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
def test_all_constructor_args(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer')
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
self.assertEqual('my_combiner', embedding_column.combiner)
self.assertEqual('my_initializer', embedding_column.initializer())
self.assertEqual('aaa_embedding', embedding_column.name)
self.assertEqual((embedding_dimension,), embedding_column.variable_shape)
self.assertEqual({
'aaa': parsing_ops.VarLenFeature(dtypes.int64)
}, embedding_column._parse_example_spec)
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True,
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False,
})
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self, use_safe_embedding_lookup):
# Inputs.
vocabulary_size = 3
sparse_input = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
# example 3, ids [1]
indices=((0, 0), (1, 0), (1, 1), (3, 0)),
values=(2, 0, 1, 1),
dense_shape=(4, 2))
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups = (
# example 0, ids [2], embedding = [7, 11]
(7., 11.),
# example 1, ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
(2., 3.5),
# example 2, ids [], embedding = [0, 0]
(0., 0.),
# example 3, ids [1], embedding = [3, 5]
(3., 5.),
)
expected_lookups_sequence = (
# example 0, ids [2], embedding = [[7, 11], [0, 0]]
((7., 11.), (0., 0.),),
# example 1, ids [0, 1], embedding = [[1, 2], [3. 5]]
((1., 2.), (3., 5.),),
# example 2, ids [], embedding = [0, 0]
((0., 0.), (0., 0.),),
# example 3, ids [1], embedding = [3, 5]
((3., 5.), (0., 0.),),
)
# Build columns.
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
sequence_categorical_column = (
fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size))
embedding_column = tpu_fc.embedding_column_v2(
categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
use_safe_embedding_lookup=use_safe_embedding_lookup)
sequence_embedding_column = tpu_fc.embedding_column_v2(
sequence_categorical_column,
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_length=2,
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
features = {'aaa': sparse_input, 'bbb': sparse_input}
dense_features = fc_lib.DenseFeatures([embedding_column])
sequence_features = fc_lib.SequenceFeatures([sequence_embedding_column])
embedding_lookup = dense_features(features)
sequence_embedding_lookup = sequence_features(features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('dense_features/aaa_embedding/embedding_weights:0',
'sequence_features/bbb_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
with _initialized_session():
self.assertAllEqual(embedding_values, global_vars[0])
self.assertAllEqual(expected_lookups, embedding_lookup)
self.assertAllEqual(expected_lookups_sequence,
sequence_embedding_lookup[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
# a Rank3 embedding lookup.
if use_safe_embedding_lookup:
self.assertEqual(2, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
else:
self.assertEqual(1, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
def test_deepcopy(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_column = tpu_fc.embedding_column_v2(
categorical_column, dimension=2)
embedding_column_copy = copy.deepcopy(embedding_column)
self.assertEqual(embedding_column.dimension,
embedding_column_copy.dimension)
self.assertEqual(embedding_column._max_sequence_length,
embedding_column_copy._max_sequence_length)
def test_with_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=False)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
# With default scope validation, the same column cannot be used in a new
# variable scope.
with self.assertRaisesRegex(ValueError,
'the variable scope name is different'):
embedding_column.create_state(state_manager)
def test_bypass_scope_validation(self):
categorical_column = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=3)
embedding_dimension = 2
initializer = init_ops.truncated_normal_initializer(mean=0.0, stddev=.5)
embedding_column = tpu_fc._TPUEmbeddingColumnV2(
categorical_column=categorical_column,
dimension=embedding_dimension,
combiner='mean',
initializer=initializer,
max_sequence_length=0,
learning_rate_fn=None,
use_safe_embedding_lookup=True,
bypass_scope_validation=True)
self.assertIs(categorical_column, embedding_column.categorical_column)
self.assertEqual(embedding_dimension, embedding_column.dimension)
state_manager = _TestStateManager()
with tpu_function.tpu_shard_context(1):
with variable_scope.variable_scope('tower1/scope1'):
embedding_column.create_state(state_manager)
with variable_scope.variable_scope('tower2/scope2'):
embedding_column.create_state(state_manager)
class SharedEmbeddingColumnTestV2(test.TestCase, parameterized.TestCase):
@test_util.deprecated_graph_mode_only
def test_defaults(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_b, embedding_column_a = tpu_fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('mean', embedding_column_a.combiner)
self.assertEqual('mean', embedding_column_b.combiner)
self.assertIsNotNone(embedding_column_a.get_initializer())
self.assertIsNotNone(embedding_column_b.get_initializer())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_a.get_embedding_var_name())
self.assertEqual('aaa_bbb_shared_embedding',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@test_util.deprecated_graph_mode_only
def test_all_constructor_args(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
combiner='my_combiner',
initializer=lambda: 'my_initializer',
shared_embedding_collection_name='var_scope_name')
self.assertIs(categorical_column_a, embedding_column_a.categorical_column)
self.assertIs(categorical_column_b, embedding_column_b.categorical_column)
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual((vocabulary_size, embedding_dimension),
embedding_column_a.get_embedding_table_size())
self.assertEqual('my_combiner', embedding_column_a.combiner)
self.assertEqual('my_combiner', embedding_column_b.combiner)
self.assertEqual('my_initializer', embedding_column_a.get_initializer()())
self.assertEqual('my_initializer', embedding_column_b.get_initializer()())
self.assertEqual('var_scope_name',
embedding_column_a.get_embedding_var_name())
self.assertEqual('var_scope_name',
embedding_column_b.get_embedding_var_name())
self.assertEqual('aaa_shared_embedding', embedding_column_a.name)
self.assertEqual('bbb_shared_embedding', embedding_column_b.name)
self.assertEqual((embedding_dimension,), embedding_column_a.variable_shape)
self.assertEqual((embedding_dimension,), embedding_column_b.variable_shape)
@parameterized.named_parameters(
{
'testcase_name': 'use_safe_embedding_lookup',
'use_safe_embedding_lookup': True
}, {
'testcase_name': 'dont_use_safe_embedding_lookup',
'use_safe_embedding_lookup': False
})
@test_util.deprecated_graph_mode_only
def test_feature_layer_cpu(self, use_safe_embedding_lookup):
# Inputs.
vocabulary_size = 3
input_a = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(2, 2))
input_b = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1]
# example 2, ids []
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 1),
dense_shape=(3, 2))
input_features = {'aaa': input_a, 'bbb': input_b}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.) # id 2
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Expected lookup result, using combiner='mean'.
expected_lookups_a = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
expected_lookups_b = (
# example 0:
((7., 11.), (0., 0.),), # ids [2], embedding = [[7, 11], [0, 0]]
# example 1:
((1., 2.), (3., 5.),), # ids [0, 1], embedding = [[1, 2], [3, 5]]
# example 2:
((0., 0.), (0., 0.),), # ids [], embedding = [[0, 0], [0, 0]]
)
# Build columns.
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.sequence_categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_column_a, embedding_column_b = tpu_fc.shared_embedding_columns_v2(
[categorical_column_a, categorical_column_b],
dimension=embedding_dimension,
initializer=_initializer,
max_sequence_lengths=[0, 2],
use_safe_embedding_lookup=use_safe_embedding_lookup)
# Provide sparse input and get dense result.
dense_features = fc_lib.DenseFeatures([embedding_column_a])
sequence_features = fc_lib.SequenceFeatures([embedding_column_b])
embedding_lookup_a = dense_features(input_features)
embedding_lookup_b = sequence_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertItemsEqual(
('aaa_bbb_shared_embedding:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
self.assertAllEqual(expected_lookups_a, embedding_lookup_a)
self.assertAllEqual(expected_lookups_b,
embedding_lookup_b[0].eval())
# The graph will still have SparseFillEmptyRows due to sequence being
# a Rank3 embedding lookup.
if use_safe_embedding_lookup:
self.assertEqual(2, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
else:
self.assertEqual(1, [
x.type for x in ops.get_default_graph().get_operations()
].count('SparseFillEmptyRows'))
def test_deepcopy(self):
vocabulary_size = 3
categorical_column_a = fc_lib.categorical_column_with_identity(
key='aaa', num_buckets=vocabulary_size)
categorical_column_b = fc_lib.categorical_column_with_identity(
key='bbb', num_buckets=vocabulary_size)
embedding_dimension = 2
columns = tpu_fc.shared_embedding_columns_v2(
[categorical_column_b, categorical_column_a],
dimension=embedding_dimension)
columns_copy = copy.deepcopy(columns)
self.assertEqual(
[column._shared_embedding_collection_name for column in columns],
[column._shared_embedding_collection_name for column in columns_copy])
class DeviceSpecificEmbeddingColumnTestV2(test.TestCase,
parameterized.TestCase):
@parameterized.named_parameters(
{
'testcase_name': 'invalid_shared',
'shared': True,
}, {
'testcase_name': 'invalid_not_shared',
'shared': False,
})
@test_util.deprecated_graph_mode_only
def test_invalid_cases(self, shared):
# Inputs.
input_sparse_tensor = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1), (1, 4)),
values=(2, 0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=3)
# Training on TPU with cpu embedding lookups is not supported.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=2,
embedding_lookup_device='cpu',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=2,
embedding_lookup_device='cpu',
tensor_core_shape=[None, 3])
dense_features = fc_lib.DenseFeatures(embedding_column)
with self.assertRaisesRegex(
ValueError,
r'.*embedding_lookup_device=\"cpu\" during training is not'):
dense_features(input_features)
# Inference on with TPU Embedding Hardware is not supported.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=2,
embedding_lookup_device='tpu_embedding_core',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=2,
embedding_lookup_device='tpu_embedding_core',
tensor_core_shape=[None, 3])
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
dense_features = fc_lib.DenseFeatures(embedding_column)
with self.assertRaisesRegex(
ValueError,
r'Using embedding_lookup_device=tpu_embedding_core during inference is '
):
dense_features(input_features)
context.Exit()
@parameterized.named_parameters(
{
'testcase_name': 'combiner_mean_shared',
'shared': True,
'combiner': 'mean'
}, {
'testcase_name': 'combiner_sum_shared',
'shared': True,
'combiner': 'sum'
}, {
'testcase_name': 'combiner_sqrtn_shared',
'shared': True,
'combiner': 'sqrtn'
}, {
'testcase_name': 'combiner_mean_not_shared',
'shared': False,
'combiner': 'mean'
}, {
'testcase_name': 'combiner_sum_not_shared',
'shared': False,
'combiner': 'sum'
}, {
'testcase_name': 'combiner_sqrtn_not_shared',
'shared': False,
'combiner': 'sqrtn'
})
@test_util.deprecated_graph_mode_only
def test_dense_embedding_lookup(self, shared, combiner):
# Inputs.
vocabulary_size = 3
input_sparse_tensor = sparse_tensor.SparseTensorValue(
# example 0, ids [2]
# example 1, ids [0, 1, 3]
indices=((0, 0), (1, 0), (1, 1), (1, 4)),
values=(2, 0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(13., 17.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=vocabulary_size)
# Set tensor_core_shape to be [None, 20] to ensure some padding and
# dynamic batch size.
if shared:
embedding_column = tpu_fc.shared_embedding_columns_v2(
[categorical_column_input],
dimension=embedding_dimension,
initializer=_initializer,
combiner=combiner,
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
else:
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=embedding_dimension,
initializer=_initializer,
combiner=combiner,
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
# Run in TPUContexts so that we hit the intended densification case.
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
with tpu_function.tpu_shard_context(1):
dense_features = fc_lib.DenseFeatures(embedding_column)
# Sqrtn combiner not supported for now.
if combiner == 'sqrtn':
with self.assertRaisesRegex(
ValueError, 'Dense TPU Embedding does not support combiner'):
embedding_lookup = dense_features(input_features)
return
if combiner == 'mean':
expected_lookups = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) =
# [2, 3.5]
)
elif combiner == 'sum':
expected_lookups = (
# example 0:
(7., 11.), # ids [2], embedding = [7, 11]
# example 1:
(4., 7), # ids [0, 1], embedding = sum([1, 2] + [3, 5]) = [4, 7]
)
embedding_lookup = dense_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
if shared:
self.assertCountEqual(('inp_shared_embedding:0',),
tuple([v.name for v in global_vars]))
else:
self.assertCountEqual(
('dense_features/inp_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()
@test_util.deprecated_graph_mode_only
def test_empty_row(self):
# Inputs.
vocabulary_size = 3
input_sparse_tensor = sparse_tensor.SparseTensorValue(
# example 0, ids []
# example 1, ids [0, 1, 3]
indices=((1, 0), (1, 1), (1, 4)),
values=(0, 1, 3),
dense_shape=(2, 5))
input_features = {'inp': input_sparse_tensor}
# Embedding variable.
embedding_dimension = 2
embedding_values = (
(1., 2.), # id 0
(3., 5.), # id 1
(7., 11.), # id 2
(13., 17.) # id 3
)
def _initializer(shape, dtype, partition_info=None):
self.assertAllEqual((vocabulary_size, embedding_dimension), shape)
self.assertEqual(dtypes.float32, dtype)
self.assertIsNone(partition_info)
return embedding_values
# Build columns.
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=vocabulary_size)
# Set tensor_core_shape to be [None, 20] to ensure some padding and
# dynamic batch size.
embedding_column = tpu_fc.embedding_column_v2(
categorical_column_input,
dimension=embedding_dimension,
initializer=_initializer,
combiner='mean',
embedding_lookup_device='tpu_tensor_core',
tensor_core_shape=[None, 3])
# Run in TPUContexts so that we hit the intended densification case.
context = tpu._TPUInferenceContext('tpu_inference')
context.Enter()
with tpu_function.tpu_shard_context(1):
dense_features = fc_lib.DenseFeatures(embedding_column)
expected_lookups = (
# example 0:
(0., 0.), # ids [], embedding = [0, 0]
# example 1:
(2., 3.5), # ids [0, 1], embedding = mean([1, 2] + [3, 5]) = [2, 3.5]
)
embedding_lookup = dense_features(input_features)
# Assert expected embedding variable and lookups.
global_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
self.assertCountEqual(
('dense_features/inp_embedding/embedding_weights:0',),
tuple([v.name for v in global_vars]))
embedding_var = global_vars[0]
with _initialized_session():
self.assertAllEqual(embedding_values, embedding_var)
eval_res = embedding_lookup.eval()
self.assertAllEqual(expected_lookups, eval_res)
context.Exit()
@test_util.deprecated_graph_mode_only
def test_error_dense_shape_invalid(self):
categorical_column_input = fc_lib.categorical_column_with_identity(
key='inp', num_buckets=5)
with self.assertRaisesRegex(ValueError, 'tensor_core_shape must be size 2'):
tpu_fc.shared_embedding_columns_v2([categorical_column_input],
dimension=20,
tensor_core_shape=[None, 20, 15])
if __name__ == '__main__':
test.main()
| {
"content_hash": "1083e1ba309967ea0ad723066d6e47cf",
"timestamp": "",
"source": "github",
"line_count": 715,
"max_line_length": 80,
"avg_line_length": 39.38461538461539,
"alnum_prop": 0.6354048295454545,
"repo_name": "aldian/tensorflow",
"id": "93f65d6e1c4bec623c3a8e9d4b5b81311b2b36d5",
"size": "28838",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "tensorflow/python/tpu/feature_column_v2_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django.template import Context, Template
from django.template.loader import get_template
from taggit_templatetags.tests.models import AlphaModel, BetaModel
from taggit.tests.tests import BaseTaggingTest
from taggit_templatetags.templatetags.taggit_extras import get_weight_fun
class SetUpTestCase():
a_model = AlphaModel
b_model = BetaModel
def setUp(self):
a1 = self.a_model.objects.create(name="apple")
a2 = self.a_model.objects.create(name="pear")
b1 = self.b_model.objects.create(name="dog")
b2 = self.b_model.objects.create(name="kitty")
a1.tags.add("green")
a1.tags.add("sweet")
a1.tags.add("fresh")
a2.tags.add("yellow")
a2.tags.add("sour")
b1.tags.add("sweet")
b1.tags.add("yellow")
b2.tags.add("sweet")
b2.tags.add("green")
class TestWeightFun(TestCase):
def test_one(self):
t_min = 1
t_max = 6
f_min = 10
f_max = 20
weight_fun = get_weight_fun(t_min, t_max, f_min, f_max)
self.assertEqual(weight_fun(20), 6)
self.assertEqual(weight_fun(10), 1)
self.assertEqual(weight_fun(15), 3.5)
def test_two(self):
t_min = 10
t_max = 100
f_min = 5
f_max = 7
weight_fun = get_weight_fun(t_min, t_max, f_min, f_max)
self.assertEqual(weight_fun(5), 10)
self.assertEqual(weight_fun(7), 100)
self.assertEqual(weight_fun(6), 55)
class TemplateTagListTestCase(SetUpTestCase, BaseTaggingTest, TestCase):
def get_template(self, argument):
return """ {%% load taggit_extras %%}
{%% get_taglist %s %%}
""" % argument
def test_project(self):
t = Template(self.get_template("as taglist"))
c = Context({})
t.render(c)
self.assert_tags_equal(c.get("taglist"), ["sweet", "green", "yellow", "fresh", "sour"], False)
def test_app(self):
t = Template(self.get_template("as taglist for 'tests'"))
c = Context({})
t.render(c)
self.assert_tags_equal(c.get("taglist"), ["sweet", "green", "yellow", "fresh", "sour"], False)
def test_model(self):
t = Template(self.get_template("as taglist for 'tests.BetaModel'"))
c = Context({})
t.render(c)
self.assert_tags_equal(c.get("taglist"), ["sweet", "green", "yellow"], False)
class TemplateTagCloudTestCase(SetUpTestCase, BaseTaggingTest, TestCase):
def get_template(self, argument):
return """ {%% load taggit_extras %%}
{%% get_tagcloud %s %%}
""" % argument
def test_project(self):
t = Template(self.get_template("as taglist"))
c = Context({})
t.render(c)
self.assert_tags_equal(c.get("taglist"), ["fresh", "green", "sour", "sweet", "yellow"], False)
self.assertEqual(c.get("taglist")[3].name, "sweet")
self.assertEqual(c.get("taglist")[3].weight, 6.0)
self.assertEqual(c.get("taglist")[1].name, "green")
self.assertEqual(c.get("taglist")[1].weight, 3.5)
self.assertEqual(c.get("taglist")[2].name, "sour")
self.assertEqual(c.get("taglist")[2].weight, 1.0)
def test_app(self):
t = Template(self.get_template("as taglist for 'tests'"))
c = Context({})
t.render(c)
self.assert_tags_equal(c.get("taglist"), ["fresh", "green", "sour", "sweet", "yellow"], False)
self.assertEqual(c.get("taglist")[3].name, "sweet")
self.assertEqual(c.get("taglist")[3].weight, 6.0)
self.assertEqual(c.get("taglist")[1].name, "green")
self.assertEqual(c.get("taglist")[1].weight, 3.5)
self.assertEqual(c.get("taglist")[2].name, "sour")
self.assertEqual(c.get("taglist")[2].weight, 1.0)
def test_model(self):
t = Template(self.get_template("as taglist for 'tests.BetaModel'"))
c = Context({})
t.render(c)
self.assert_tags_equal(c.get("taglist"), ["green", "sweet", "yellow"], False)
self.assertEqual(c.get("taglist")[0].name, "green")
self.assertEqual(c.get("taglist")[0].weight, 1.0)
self.assertEqual(c.get("taglist")[1].name, "sweet")
self.assertEqual(c.get("taglist")[1].weight, 6.0)
self.assertEqual(c.get("taglist")[2].name, "yellow")
self.assertEqual(c.get("taglist")[2].weight, 1.0)
class TemplateInclusionTagTest(SetUpTestCase, TestCase, BaseTaggingTest):
def test_taglist_project(self):
t = get_template('taggit_templatetags/taglist_include.html')
c = Context({'forvar': None})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["sweet", "green", "yellow", "fresh", "sour"], False)
def test_taglist_app(self):
t = get_template('taggit_templatetags/taglist_include.html')
c = Context({'forvar': 'tests'})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["sweet", "green", "yellow", "fresh", "sour"], False)
def test_taglist_model(self):
t = get_template('taggit_templatetags/taglist_include.html')
c = Context({'forvar': 'tests.BetaModel'})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["sweet", "green", "yellow"], False)
def test_tagcloud_project(self):
t = get_template('taggit_templatetags/tagcloud_include.html')
c = Context({'forvar': None})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["fresh", "green", "sour", "sweet", "yellow"], False)
def test_tagcloud_app(self):
t = get_template('taggit_templatetags/tagcloud_include.html')
c = Context({'forvar': 'tests'})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["fresh", "green", "sour", "sweet", "yellow"], False)
def test_tagcloud_model(self):
t = get_template('taggit_templatetags/tagcloud_include.html')
c = Context({'forvar': 'tests.BetaModel'})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["green", "sweet", "yellow"], False)
class AlphaPathologicalCaseTestCase(TestCase, BaseTaggingTest):
"""
This is a testcase for one tag once.
"""
a_model = AlphaModel
def setUp(self):
a1 = self.a_model.objects.create(name="apple")
a1.tags.add("green")
def test_tagcloud(self):
t = get_template('taggit_templatetags/tagcloud_include.html')
c = Context({'forvar': None})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["green"], False)
self.assertEqual(c.get("tags")[0].name, "green")
self.assertEqual(c.get("tags")[0].weight, 6.0)
class BetaPathologicalCaseTestCase(TestCase, BaseTaggingTest):
"""
This is a testcase for one tag thrice.
"""
a_model = AlphaModel
b_model = BetaModel
def setUp(self):
a1 = self.a_model.objects.create(name="apple")
a2 = self.a_model.objects.create(name="pear")
b1 = self.b_model.objects.create(name="dog")
a1.tags.add("green")
a2.tags.add("green")
b1.tags.add("green")
def test_tagcloud(self):
t = get_template('taggit_templatetags/tagcloud_include.html')
c = Context({'forvar': None})
t.render(c)
self.assert_tags_equal(c.get("tags"), ["green"], False)
self.assertEqual(c.get("tags")[0].name, "green")
self.assertEqual(c.get("tags")[0].weight, 6.0)
class GammaPathologicalCaseTestCase(TestCase, BaseTaggingTest):
"""
This is a pathological testcase for no tag at all.
"""
a_model = AlphaModel
b_model = BetaModel
def setUp(self):
a1 = self.a_model.objects.create(name="apple")
b1 = self.b_model.objects.create(name="dog")
def test_tagcloud(self):
t = get_template('taggit_templatetags/tagcloud_include.html')
c = Context({'forvar': None})
t.render(c)
self.assert_tags_equal(c.get("tags"), [], False)
| {
"content_hash": "4ec6604db56768ef350255ff997da106",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 102,
"avg_line_length": 38.291666666666664,
"alnum_prop": 0.5763511062749366,
"repo_name": "feuervogel/django-taggit-templatetags",
"id": "011828d3c8f41aae6ab0fb31314a68719b87e537",
"size": "8271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taggit_templatetags/tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "14497"
}
],
"symlink_target": ""
} |
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
# typing library was introduced as a core module in version 3.5.0
requires = ["dirlistproc", "jsonasobj", "pyjxslt", "PyLD", "rdflib", "yadict-compare"]
if sys.version_info < (3, 5):
requires.append("typing")
setup(
name='dbgap',
version='0.2.1',
packages=['dbgap'],
url='http://github.com/crDDI/dbgap',
license='BSD 3-Clause license',
author='Harold Solbrig',
author_email='solbrig.harold@mayo.edu',
description='dbGaP to bioCaddie conversion utility',
long_description='A set of utilities for transforming dbGaP to bioCaddie RDF',
install_requires=requires,
scripts=['scripts/download_study'],
classifiers=[
'Topic :: Software Development :: Libraries',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3 :: Only']
)
| {
"content_hash": "a8240c00960b3ecf4ca7515e9e368ca8",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 33.142857142857146,
"alnum_prop": 0.6724137931034483,
"repo_name": "crDDI/dbgap",
"id": "41848bcccd45a6bbcc48dddc463949c9ceb2dde9",
"size": "929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "35359"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, print_function
import os
import datetime
import numpy as np
from sunpy.util.cond_dispatch import ConditionalDispatch
from sunpy.spectra.spectrogram import LinearTimeSpectrogram, REFERENCE, get_day
__all__ = ['SWavesSpectrogram']
class SWavesSpectrogram(LinearTimeSpectrogram):
_create = ConditionalDispatch.from_existing(LinearTimeSpectrogram._create)
create = classmethod(_create.wrapper())
COPY_PROPERTIES = LinearTimeSpectrogram.COPY_PROPERTIES + [
('bg', REFERENCE)
]
@staticmethod
def swavesfile_to_date(filename):
_, name = os.path.split(filename)
date = name.split('_')[2]
return datetime.datetime(
int(date[0:4]), int(date[4:6]), int(date[6:])
)
@classmethod
def read(cls, filename, **kwargs):
"""Read in FITS file and return a new SWavesSpectrogram. """
data = np.genfromtxt(filename, skip_header=2)
time_axis = data[:, 0] * 60.
data = data[:, 1:].transpose()
header = np.genfromtxt(filename, skip_footer=time_axis.size)
freq_axis = header[0, :]
bg = header[1, :]
start = cls.swavesfile_to_date(filename)
end = start + datetime.timedelta(seconds=time_axis[-1])
t_delt = 60.
t_init = (start - get_day(start)).seconds
content = ''
t_label = 'Time [UT]'
f_label = 'Frequency [KHz]'
freq_axis = freq_axis[::-1]
data = data[::-1, :]
return cls(data, time_axis, freq_axis, start, end, t_init, t_delt,
t_label, f_label, content, bg)
def __init__(self, data, time_axis, freq_axis, start, end,
t_init, t_delt, t_label, f_label, content, bg):
# Because of how object creation works, there is no avoiding
# unused arguments in this case.
# pylint: disable=W0613
super(SWavesSpectrogram, self).__init__(
data, time_axis, freq_axis, start, end,
t_init, t_delt, t_label, f_label,
content, set(["SWAVES"])
)
self.bg = bg
try:
SWavesSpectrogram.create.im_func.__doc__ = (
""" Create SWavesSpectrogram from given input dispatching to the
appropriate from_* function.
Possible signatures:
""" + SWavesSpectrogram._create.generate_docs())
except AttributeError:
SWavesSpectrogram.create.__func__.__doc__ = (
""" Create SWavesSpectrogram from given input dispatching to the
appropriate from_* function.
Possible signatures:
""" + SWavesSpectrogram._create.generate_docs())
if __name__ == "__main__":
opn = SWavesSpectrogram.read("/home/florian/swaves_average_20120705_a_hfr.dat")
opn.plot(min_=0, linear=False).show()
print("Press return to exit")
| {
"content_hash": "4c89b0d143551543d42fd52b328c7f30",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 83,
"avg_line_length": 33.023529411764706,
"alnum_prop": 0.6152475952974706,
"repo_name": "Alex-Ian-Hamilton/sunpy",
"id": "8957138268fb89b8be30888947a79f7806536dfd",
"size": "2884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sunpy/spectra/sources/swaves.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "72909"
},
{
"name": "Python",
"bytes": "1505795"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
} |
"""
This module holds controller components of
a typical glim app.
"""
class Controller(object):
"""
The controller component is responsible for handling requests
and returning appropriate response to the requests.
Attributes
----------
request (bottle.request): Thread safe bottle request object
response (bottle.response): Thread safe bottle response object
"""
def __init__(self, request, response):
self.request = request
self.response = response
| {
"content_hash": "01ec8a9a224d5339af8c2cc4895ec1d0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 68,
"avg_line_length": 28.27777777777778,
"alnum_prop": 0.6797642436149313,
"repo_name": "aacanakin/glim",
"id": "d6ce911b42f9dc2bd34b2d6d39261b26acfa0272",
"size": "509",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glim/controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48234"
}
],
"symlink_target": ""
} |
"""
sentry.coreapi
~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
# TODO: We should make the API a class, and UDP/HTTP just inherit from it
# This will make it so we can more easily control logging with various
# metadata (rather than generic log messages which aren't useful).
from datetime import datetime, timedelta
import base64
import logging
import uuid
import zlib
from django.conf import settings
from django.utils.encoding import smart_str
from sentry.app import env
from sentry.constants import (
DEFAULT_LOG_LEVEL, LOG_LEVELS, MAX_MESSAGE_LENGTH, MAX_CULPRIT_LENGTH,
MAX_TAG_VALUE_LENGTH, MAX_TAG_KEY_LENGTH)
from sentry.exceptions import InvalidTimestamp
from sentry.models import Project, ProjectKey
from sentry.tasks.store import preprocess_event
from sentry.utils import is_float, json
from sentry.utils.auth import parse_auth_header
from sentry.utils.imports import import_string
from sentry.utils.strings import decompress, truncatechars
logger = logging.getLogger('sentry.coreapi.errors')
LOG_LEVEL_REVERSE_MAP = dict((v, k) for k, v in LOG_LEVELS.iteritems())
INTERFACE_ALIASES = {
'exception': 'sentry.interfaces.Exception',
'request': 'sentry.interfaces.Http',
'user': 'sentry.interfaces.User',
'stacktrace': 'sentry.interfaces.Stacktrace',
'template': 'sentry.interfaces.Template',
}
RESERVED_FIELDS = (
'project',
'event_id',
'message',
'checksum',
'culprit',
'level',
'time_spent',
'logger',
'server_name',
'site',
'timestamp',
'extra',
'modules',
'tags',
'platform',
)
class APIError(Exception):
http_status = 400
msg = 'Invalid request'
def __init__(self, msg=None):
if msg:
self.msg = msg
def __str__(self):
return self.msg or ''
class APIUnauthorized(APIError):
http_status = 401
msg = 'Unauthorized'
class APIForbidden(APIError):
http_status = 403
class APITimestampExpired(APIError):
http_status = 410
class APIRateLimited(APIError):
http_status = 429
msg = 'Creation of this event was denied due to rate limiting.'
def get_interface(name):
if name not in settings.SENTRY_ALLOWED_INTERFACES:
raise ValueError
try:
interface = import_string(name)
except Exception:
raise ValueError('Unable to load interface: %s' % (name,))
return interface
def client_metadata(client=None, project=None, exception=None, tags=None, extra=None):
if not extra:
extra = {}
if not tags:
tags = {}
extra['client'] = client
extra['request'] = env.request
extra['tags'] = tags
if project:
extra['project_slug'] = project.slug
extra['project_id'] = project.id
if project.team:
extra['team_slug'] = project.team.slug
extra['team_id'] = project.team.id
tags['client'] = client
if exception:
tags['exc_type'] = type(exception).__name__
if project and project.team:
tags['project'] = '%s/%s' % (project.team.slug, project.slug)
result = {'extra': extra}
if exception:
result['exc_info'] = True
return result
def extract_auth_vars(request):
if request.META.get('HTTP_X_SENTRY_AUTH', '').startswith('Sentry'):
return parse_auth_header(request.META['HTTP_X_SENTRY_AUTH'])
elif request.META.get('HTTP_AUTHORIZATION', '').startswith('Sentry'):
return parse_auth_header(request.META['HTTP_AUTHORIZATION'])
else:
return dict(
(k, request.GET[k])
for k in request.GET.iterkeys()
if k.startswith('sentry_')
)
def project_from_auth_vars(auth_vars):
api_key = auth_vars.get('sentry_key')
if not api_key:
raise APIForbidden('Invalid api key')
try:
pk = ProjectKey.objects.get_from_cache(public_key=api_key)
except ProjectKey.DoesNotExist:
raise APIForbidden('Invalid api key')
if pk.secret_key != auth_vars.get('sentry_secret', pk.secret_key):
raise APIForbidden('Invalid api key')
project = Project.objects.get_from_cache(pk=pk.project_id)
return project, pk.user
def decode_and_decompress_data(encoded_data):
try:
try:
return decompress(encoded_data)
except zlib.error:
return base64.b64decode(encoded_data)
except Exception, e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
logger.info(e, **client_metadata(exception=e))
raise APIForbidden('Bad data decoding request (%s, %s)' % (
e.__class__.__name__, e))
def safely_load_json_string(json_string):
try:
obj = json.loads(json_string)
except Exception, e:
# This error should be caught as it suggests that there's a
# bug somewhere in the client's code.
logger.info(e, **client_metadata(exception=e))
raise APIForbidden('Bad data reconstructing object (%s, %s)' % (
e.__class__.__name__, e))
# XXX: ensure keys are coerced to strings
return dict((smart_str(k), v) for k, v in obj.iteritems())
def ensure_valid_project_id(desired_project, data, client=None):
# Confirm they're using either the master key, or their specified project
# matches with the signed project.
if desired_project and data.get('project'):
if str(data.get('project')) not in [str(desired_project.id), desired_project.slug]:
logger.info(
'Project ID mismatch: %s != %s', desired_project.id, desired_project.slug,
**client_metadata(client))
raise APIForbidden('Invalid credentials')
data['project'] = desired_project.id
elif not desired_project:
data['project'] = 1
elif not data.get('project'):
data['project'] = desired_project.id
def process_data_timestamp(data):
if is_float(data['timestamp']):
try:
data['timestamp'] = datetime.fromtimestamp(float(data['timestamp']))
except Exception:
raise InvalidTimestamp('Invalid value for timestamp: %r' % data['timestamp'])
elif not isinstance(data['timestamp'], datetime):
if '.' in data['timestamp']:
format = '%Y-%m-%dT%H:%M:%S.%f'
else:
format = '%Y-%m-%dT%H:%M:%S'
if 'Z' in data['timestamp']:
# support UTC market, but not other timestamps
format += 'Z'
try:
data['timestamp'] = datetime.strptime(data['timestamp'], format)
except Exception:
raise InvalidTimestamp('Invalid value for timestamp: %r' % data['timestamp'])
if data['timestamp'] > datetime.now() + timedelta(minutes=1):
raise InvalidTimestamp('Invalid value for timestamp (in future): %r' % data['timestamp'])
return data
def validate_data(project, data, client=None):
ensure_valid_project_id(project, data, client=client)
if not data.get('message'):
data['message'] = '<no message value>'
elif not isinstance(data['message'], basestring):
raise APIError('Invalid value for message')
elif len(data['message']) > MAX_MESSAGE_LENGTH:
logger.info(
'Truncated value for message due to length (%d chars)',
len(data['message']), **client_metadata(client, project))
data['message'] = truncatechars(data['message'], MAX_MESSAGE_LENGTH)
if data.get('culprit') and len(data['culprit']) > MAX_CULPRIT_LENGTH:
logger.info(
'Truncated value for culprit due to length (%d chars)',
len(data['culprit']), **client_metadata(client, project))
data['culprit'] = truncatechars(data['culprit'], MAX_CULPRIT_LENGTH)
if not data.get('event_id'):
data['event_id'] = uuid.uuid4().hex
if len(data['event_id']) > 32:
logger.info(
'Discarded value for event_id due to length (%d chars)',
len(data['event_id']), **client_metadata(client, project))
data['event_id'] = uuid.uuid4().hex
if 'timestamp' in data:
try:
process_data_timestamp(data)
except InvalidTimestamp, e:
# Log the error, remove the timestamp, and continue
logger.info(
'Discarded invalid value for timestamp: %r', data['timestamp'],
**client_metadata(client, project, exception=e))
del data['timestamp']
if data.get('modules') and type(data['modules']) != dict:
logger.info(
'Discarded invalid type for modules: %s',
type(data['modules']), **client_metadata(client, project))
del data['modules']
if data.get('extra') is not None and type(data['extra']) != dict:
logger.info(
'Discarded invalid type for extra: %s',
type(data['extra']), **client_metadata(client, project))
del data['extra']
if data.get('tags') is not None:
if type(data['tags']) == dict:
data['tags'] = data['tags'].items()
elif not isinstance(data['tags'], (list, tuple)):
logger.info(
'Discarded invalid type for tags: %s',
type(data['tags']), **client_metadata(client, project))
del data['tags']
if data.get('tags'):
# remove any values which are over 32 characters
tags = []
for k, v in data['tags']:
if not isinstance(k, basestring):
try:
k = unicode(k)
except Exception:
logger.info('Discarded invalid tag key: %r',
type(k), **client_metadata(client, project))
continue
if not isinstance(v, basestring):
try:
v = unicode(v)
except Exception:
logger.info('Discarded invalid tag value: %s=%r',
k, type(v), **client_metadata(client, project))
continue
if len(k) > MAX_TAG_KEY_LENGTH or len(v) > MAX_TAG_VALUE_LENGTH:
logger.info('Discarded invalid tag: %s=%s',
k, v, **client_metadata(client, project))
continue
tags.append((k, v))
data['tags'] = tags
for k in data.keys():
if k in RESERVED_FIELDS:
continue
if not data[k]:
logger.info(
'Ignored empty interface value: %s', k,
**client_metadata(client, project))
del data[k]
continue
import_path = INTERFACE_ALIASES.get(k, k)
if '.' not in import_path:
logger.info(
'Ignored unknown attribute: %s', k,
**client_metadata(client, project))
del data[k]
continue
try:
interface = get_interface(import_path)
except ValueError:
logger.info(
'Invalid unknown attribute: %s', k,
**client_metadata(client, project))
del data[k]
continue
value = data.pop(k)
try:
# HACK: exception allows you to pass the value as a list
# so let's try to actually support that
if isinstance(value, dict):
inst = interface(**value)
else:
inst = interface(value)
inst.validate()
data[import_path] = inst.serialize()
except Exception, e:
if isinstance(e, AssertionError):
log = logger.info
else:
log = logger.error
log('Discarded invalid value for interface: %s', k,
**client_metadata(client, project, exception=e, extra={'value': value}))
level = data.get('level') or DEFAULT_LOG_LEVEL
if isinstance(level, basestring) and not level.isdigit():
# assume it's something like 'warning'
try:
data['level'] = LOG_LEVEL_REVERSE_MAP[level]
except KeyError, e:
logger.info(
'Discarded invalid logger value: %s', level,
**client_metadata(client, project, exception=e))
data['level'] = LOG_LEVEL_REVERSE_MAP.get(
DEFAULT_LOG_LEVEL, DEFAULT_LOG_LEVEL)
return data
def ensure_has_ip(data, ip_address):
if data.get('sentry.interfaces.Http', {}).get('env', {}).get('REMOTE_ADDR'):
return
if data.get('sentry.interfaces.User', {}).get('ip_address'):
return
data.setdefault('sentry.interfaces.User', {})['ip_address'] = ip_address
def insert_data_to_database(data):
preprocess_event.delay(data=data)
| {
"content_hash": "796b4fc786ec3834b8522154e3658806",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 97,
"avg_line_length": 32.97948717948718,
"alnum_prop": 0.5897994091121131,
"repo_name": "SilentCircle/sentry",
"id": "99bd1bf8c5f6aecab69199ef74a5c47ca53c9437",
"size": "12862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/coreapi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "967481"
},
{
"name": "JavaScript",
"bytes": "789737"
},
{
"name": "Perl",
"bytes": "618"
},
{
"name": "Python",
"bytes": "2671262"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy as sp
import scipy.linalg as LA
from .covar_base import Covariance
from limix.hcache import cached
import pdb
import logging as LG
class FreeFormCov(Covariance):
"""
General semi-definite positive matrix with no contraints.
A free-form covariance matrix of dimension d has 1/2 * d * (d + 1) params
"""
def __init__(self, dim, jitter=1e-4):
"""
Args:
dim: dimension of the free-form covariance
jitter: extent of diagonal offset which is added for numerical stability
(default value: 1e-4)
"""
Covariance.__init__(self, dim)
self._K_act = True
self._calcNumberParams()
self.dim = dim
self.params = sp.zeros(self.n_params)
self.idx_r, self.idx_c = sp.tril_indices(self.dim)
self.set_jitter(jitter)
#####################
# Properties
#####################
@property
def variance(self):
return self.K().diagonal()
@property
def correlation(self):
R = self.K().copy()
inv_diag = 1./sp.sqrt(R.diagonal())[:,sp.newaxis]
R *= inv_diag
R *= inv_diag.T
return R
@property
def variance_ste(self):
if self.getFIinv() is None:
R = None
else:
R = self.K_ste().diagonal()
# IN A VARIANCE / CORRELATION PARAMETRIZATION
#if self.getFIinv() is None:
# R = None
#else:
# R = sp.sqrt(self.getFIinv().diagonal()[:self.dim])
return R
@property
def correlation_ste(self):
if self.getFIinv() is None:
R = None
else:
idx_M = sp.zeros((self.dim,self.dim))
idx_M[sp.tril_indices(self.dim)] = sp.arange( int( 0.5 * self.dim * (self.dim + 1) ) )
R = sp.zeros(idx_M)
for i in range(self.dim):
for j in range(0,self.dim):
ij = idx_M[i,j] # index of cov_ij_ste from fisher
ii = idx_M[i,i] # index of cov_ii_ste from fisher
jj = idx_M[j,j] # index of cov_jj_ste from fisher
#TODO: complete
# IN A VARIANCE / CORRELATION PARAMETRIZATION
#if self.getFIinv() is None:
# R = None
#else:
# R = sp.zeros((self.dim, self.dim))
# R[sp.tril_indices(self.dim, k = -1)] = sp.sqrt(self.getFIinv().diagonal()[self.dim:])
# R += R.T
return R
@property
def X(self):
return self.L()
#####################
# Activation handling
#####################
@property
def act_K(self):
return self._K_act
@act_K.setter
def act_K(self, act):
self._K_act = bool(act)
self._notify()
#####################
# Params handling
#####################
def setParams(self, params):
if not self._K_act and len(params) > 0:
raise ValueError("Trying to set a parameter via setParams that "
"is not active.")
if self._K_act:
self.params[:] = params
self.clear_all()
def getParams(self):
if not self._K_act:
return np.array([])
return self.params
def getNumberParams(self):
return int(self._K_act) * self.n_params
def _calcNumberParams(self):
self.n_params = int(0.5*self.dim*(self.dim+1))
def set_jitter(self,value):
self.jitter = value
def setCovariance(self,cov):
""" set hyperparameters from given covariance """
chol = LA.cholesky(cov,lower=True)
params = chol[sp.tril_indices(self.dim)]
self.setParams(params)
#####################
# Cached
#####################
@cached('covar_base')
def K(self):
RV = sp.dot(self.L(),self.L().T)+self.jitter*sp.eye(self.dim)
return RV
@cached('covar_base')
def K_grad_i(self,i):
if not self._K_act:
raise ValueError("Trying to retrieve the gradient over a "
"parameter that is inactive.")
RV = sp.dot(self.L(),self.Lgrad(i).T)+sp.dot(self.Lgrad(i),self.L(i).T)
return RV
@cached
def K_hess_i_j(self, i, j):
if not self._K_act:
raise ValueError("Trying to retrieve the gradient over a "
"parameter that is inactive.")
RV = sp.dot(self.Lgrad(i),self.Lgrad(j).T)
RV+= RV.T
return RV
def K_ste(self):
if self.getFIinv() is None:
R = None
else:
R = sp.zeros((self.dim, self.dim))
R[sp.tril_indices(self.dim)] = sp.sqrt(self.getFIinv().diagonal())
# symmetrize
R = R + R.T - sp.diag(R.diagonal())
return R
####################
# Interpretable Params
####################
def getInterParams(self):
# VARIANCE + CORRELATIONS
#R1 = self.variance
#R2 = self.correlation[sp.tril_indices(self.dim, k = -1)]
#R = sp.concatenate([R1,R2])
# COVARIANCES
R = self.K()[sp.tril_indices(self.dim)]
return R
# DERIVARIVE WITH RESPECT TO COVARIANCES
def K_grad_interParam_i(self, i):
ix, iy = sp.tril_indices(self.dim)
ix = ix[i]
iy = iy[i]
R = sp.zeros((self.dim,self.dim))
R[ix, iy] = R[iy, ix] = 1
return R
# DERIVARIVE WITH RESPECT TO VARIANCES AND CORRELATIONS
#def K_grad_interParam_i(self, i):
# if i < self.dim:
# # derivative with respect to the variance
# R = sp.zeros((self.dim,self.dim))
# R[i,:] = self.K()[i,:] / (2 * self.variance[i])
# R += R.T
# else:
# # derivarice with respect to a correlation
# ## 1. take the corresponding off diagonal element
# ix, iy = sp.tril_indices(self.dim, k = -1)
# ix = ix[i - self.dim]
# iy = iy[i - self.dim]
# ## 2. fill it with sqrt(var * var)
# R = sp.zeros((self.dim,self.dim))
# R[ix,iy] = R[iy,ix] = sp.sqrt(self.variance[ix] * self.variance[iy])
# return R
######################
# Private functions
######################
@cached('covar_base')
def L(self):
R = sp.zeros((self.dim, self.dim))
R[(self.idx_r, self.idx_c)] = self.params
return R
@cached
def Lgrad(self, i):
R = sp.zeros((self.dim, self.dim))
R[self.idx_r[i], self.idx_c[i]] = 1
return R
def Xgrad(self, i):
return self.Lgrad(i)
if __name__ == '__main__':
n = 2
cov = FreeFormCov(n)
print((cov.K()))
print((cov.K_grad_i(0)))
| {
"content_hash": "f95ef7b676e618a0d33d3ccfb8a671eb",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 98,
"avg_line_length": 30,
"alnum_prop": 0.5022123893805309,
"repo_name": "PMBio/limix",
"id": "cfc98a750f23c935680d9cf10cda64e2874a865c",
"size": "6780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "limix/core/covar/freeform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "C",
"bytes": "1550482"
},
{
"name": "C++",
"bytes": "8073525"
},
{
"name": "CMake",
"bytes": "21097"
},
{
"name": "Fortran",
"bytes": "363470"
},
{
"name": "M4",
"bytes": "16520"
},
{
"name": "Makefile",
"bytes": "11605"
},
{
"name": "Matlab",
"bytes": "25435"
},
{
"name": "PowerShell",
"bytes": "3104"
},
{
"name": "Python",
"bytes": "1704175"
},
{
"name": "Roff",
"bytes": "66747"
},
{
"name": "Shell",
"bytes": "15645"
},
{
"name": "TeX",
"bytes": "26251"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python2.7
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import ctypes
import math
import sys
import yaml
import json
with open('src/core/lib/debug/stats_data.yaml') as f:
attrs = yaml.load(f.read())
REQUIRED_FIELDS = ['name', 'doc']
def make_type(name, fields):
return (collections.namedtuple(name, ' '.join(list(set(REQUIRED_FIELDS + fields)))), [])
def c_str(s, encoding='ascii'):
if isinstance(s, unicode):
s = s.encode(encoding)
result = ''
for c in s:
if not (32 <= ord(c) < 127) or c in ('\\', '"'):
result += '\\%03o' % ord(c)
else:
result += c
return '"' + result + '"'
types = (
make_type('Counter', []),
make_type('Histogram', ['max', 'buckets']),
)
inst_map = dict((t[0].__name__, t[1]) for t in types)
stats = []
for attr in attrs:
found = False
for t, lst in types:
t_name = t.__name__.lower()
if t_name in attr:
name = attr[t_name]
del attr[t_name]
lst.append(t(name=name, **attr))
found = True
break
assert found, "Bad decl: %s" % attr
def dbl2u64(d):
return ctypes.c_ulonglong.from_buffer(ctypes.c_double(d)).value
def shift_works_until(mapped_bounds, shift_bits):
for i, ab in enumerate(zip(mapped_bounds, mapped_bounds[1:])):
a, b = ab
if (a >> shift_bits) == (b >> shift_bits):
return i
return len(mapped_bounds)
def find_ideal_shift(mapped_bounds, max_size):
best = None
for shift_bits in reversed(range(0,64)):
n = shift_works_until(mapped_bounds, shift_bits)
if n == 0: continue
table_size = mapped_bounds[n-1] >> shift_bits
if table_size > max_size: continue
if table_size > 65535: continue
if best is None:
best = (shift_bits, n, table_size)
elif best[1] < n:
best = (shift_bits, n, table_size)
print best
return best
def gen_map_table(mapped_bounds, shift_data):
tbl = []
cur = 0
print mapped_bounds
mapped_bounds = [x >> shift_data[0] for x in mapped_bounds]
print mapped_bounds
for i in range(0, mapped_bounds[shift_data[1]-1]):
while i > mapped_bounds[cur]:
cur += 1
tbl.append(cur)
return tbl
static_tables = []
def decl_static_table(values, type):
global static_tables
v = (type, values)
for i, vp in enumerate(static_tables):
if v == vp: return i
print "ADD TABLE: %s %r" % (type, values)
r = len(static_tables)
static_tables.append(v)
return r
def type_for_uint_table(table):
mv = max(table)
if mv < 2**8:
return 'uint8_t'
elif mv < 2**16:
return 'uint16_t'
elif mv < 2**32:
return 'uint32_t'
else:
return 'uint64_t'
def gen_bucket_code(histogram):
bounds = [0, 1]
done_trivial = False
done_unmapped = False
first_nontrivial = None
first_unmapped = None
while len(bounds) < histogram.buckets + 1:
if len(bounds) == histogram.buckets:
nextb = int(histogram.max)
else:
mul = math.pow(float(histogram.max) / bounds[-1],
1.0 / (histogram.buckets + 1 - len(bounds)))
nextb = int(math.ceil(bounds[-1] * mul))
if nextb <= bounds[-1] + 1:
nextb = bounds[-1] + 1
elif not done_trivial:
done_trivial = True
first_nontrivial = len(bounds)
bounds.append(nextb)
bounds_idx = decl_static_table(bounds, 'int')
if done_trivial:
first_nontrivial_code = dbl2u64(first_nontrivial)
code_bounds = [dbl2u64(x) - first_nontrivial_code for x in bounds]
shift_data = find_ideal_shift(code_bounds[first_nontrivial:], 256 * histogram.buckets)
#print first_nontrivial, shift_data, bounds
#if shift_data is not None: print [hex(x >> shift_data[0]) for x in code_bounds[first_nontrivial:]]
code = 'value = GPR_CLAMP(value, 0, %d);\n' % histogram.max
map_table = gen_map_table(code_bounds[first_nontrivial:], shift_data)
if first_nontrivial is None:
code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n'
% histogram.name.upper())
else:
code += 'if (value < %d) {\n' % first_nontrivial
code += ('GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, value);\n'
% histogram.name.upper())
code += 'return;\n'
code += '}'
first_nontrivial_code = dbl2u64(first_nontrivial)
if shift_data is not None:
map_table_idx = decl_static_table(map_table, type_for_uint_table(map_table))
code += 'union { double dbl; uint64_t uint; } _val, _bkt;\n'
code += '_val.dbl = value;\n'
code += 'if (_val.uint < %dull) {\n' % ((map_table[-1] << shift_data[0]) + first_nontrivial_code)
code += 'int bucket = '
code += 'grpc_stats_table_%d[((_val.uint - %dull) >> %d)] + %d;\n' % (map_table_idx, first_nontrivial_code, shift_data[0], first_nontrivial)
code += '_bkt.dbl = grpc_stats_table_%d[bucket];\n' % bounds_idx
code += 'bucket -= (_val.uint < _bkt.uint);\n'
code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, bucket);\n' % histogram.name.upper()
code += 'return;\n'
code += '}\n'
code += 'GRPC_STATS_INC_HISTOGRAM((exec_ctx), GRPC_STATS_HISTOGRAM_%s, '% histogram.name.upper()
code += 'grpc_stats_histo_find_bucket_slow((exec_ctx), value, grpc_stats_table_%d, %d));\n' % (bounds_idx, histogram.buckets)
return (code, bounds_idx)
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print >>f, '/*'
for line in banner:
print >>f, ' * %s' % line
print >>f, ' */'
print >>f
with open('src/core/lib/debug/stats_data.h', 'w') as H:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H], [line[2:].rstrip() for line in copyright])
put_banner([H], ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
print >>H, "#ifndef GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
print >>H, "#define GRPC_CORE_LIB_DEBUG_STATS_DATA_H"
print >>H
print >>H, "#include <inttypes.h>"
print >>H, "#include \"src/core/lib/iomgr/exec_ctx.h\""
print >>H
for typename, instances in sorted(inst_map.items()):
print >>H, "typedef enum {"
for inst in instances:
print >>H, " GRPC_STATS_%s_%s," % (typename.upper(), inst.name.upper())
print >>H, " GRPC_STATS_%s_COUNT" % (typename.upper())
print >>H, "} grpc_stats_%ss;" % (typename.lower())
print >>H, "extern const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT];" % (
typename.lower(), typename.upper())
print >>H, "extern const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT];" % (
typename.lower(), typename.upper())
histo_start = []
histo_buckets = []
histo_bucket_boundaries = []
print >>H, "typedef enum {"
first_slot = 0
for histogram in inst_map['Histogram']:
histo_start.append(first_slot)
histo_buckets.append(histogram.buckets)
print >>H, " GRPC_STATS_HISTOGRAM_%s_FIRST_SLOT = %d," % (histogram.name.upper(), first_slot)
print >>H, " GRPC_STATS_HISTOGRAM_%s_BUCKETS = %d," % (histogram.name.upper(), histogram.buckets)
first_slot += histogram.buckets
print >>H, " GRPC_STATS_HISTOGRAM_BUCKETS = %d" % first_slot
print >>H, "} grpc_stats_histogram_constants;"
for ctr in inst_map['Counter']:
print >>H, ("#define GRPC_STATS_INC_%s(exec_ctx) " +
"GRPC_STATS_INC_COUNTER((exec_ctx), GRPC_STATS_COUNTER_%s)") % (
ctr.name.upper(), ctr.name.upper())
for histogram in inst_map['Histogram']:
print >>H, "#define GRPC_STATS_INC_%s(exec_ctx, value) grpc_stats_inc_%s((exec_ctx), (int)(value))" % (
histogram.name.upper(), histogram.name.lower())
print >>H, "void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int x);" % histogram.name.lower()
for i, tbl in enumerate(static_tables):
print >>H, "extern const %s grpc_stats_table_%d[%d];" % (tbl[0], i, len(tbl[1]))
print >>H, "extern const int grpc_stats_histo_buckets[%d];" % len(inst_map['Histogram'])
print >>H, "extern const int grpc_stats_histo_start[%d];" % len(inst_map['Histogram'])
print >>H, "extern const int *const grpc_stats_histo_bucket_boundaries[%d];" % len(inst_map['Histogram'])
print >>H, "extern void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x);" % len(inst_map['Histogram'])
print >>H
print >>H, "#endif /* GRPC_CORE_LIB_DEBUG_STATS_DATA_H */"
with open('src/core/lib/debug/stats_data.c', 'w') as C:
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([C], [line[2:].rstrip() for line in copyright])
put_banner([C], ["Automatically generated by tools/codegen/core/gen_stats_data.py"])
print >>C, "#include \"src/core/lib/debug/stats_data.h\""
print >>C, "#include \"src/core/lib/debug/stats.h\""
print >>C, "#include \"src/core/lib/iomgr/exec_ctx.h\""
print >>C, "#include <grpc/support/useful.h>"
histo_code = []
for histogram in inst_map['Histogram']:
code, bounds_idx = gen_bucket_code(histogram)
histo_bucket_boundaries.append(bounds_idx)
histo_code.append(code)
for typename, instances in sorted(inst_map.items()):
print >>C, "const char *grpc_stats_%s_name[GRPC_STATS_%s_COUNT] = {" % (
typename.lower(), typename.upper())
for inst in instances:
print >>C, " %s," % c_str(inst.name)
print >>C, "};"
print >>C, "const char *grpc_stats_%s_doc[GRPC_STATS_%s_COUNT] = {" % (
typename.lower(), typename.upper())
for inst in instances:
print >>C, " %s," % c_str(inst.doc)
print >>C, "};"
for i, tbl in enumerate(static_tables):
print >>C, "const %s grpc_stats_table_%d[%d] = {%s};" % (
tbl[0], i, len(tbl[1]), ','.join('%s' % x for x in tbl[1]))
for histogram, code in zip(inst_map['Histogram'], histo_code):
print >>C, ("void grpc_stats_inc_%s(grpc_exec_ctx *exec_ctx, int value) {%s}") % (
histogram.name.lower(),
code)
print >>C, "const int grpc_stats_histo_buckets[%d] = {%s};" % (
len(inst_map['Histogram']), ','.join('%s' % x for x in histo_buckets))
print >>C, "const int grpc_stats_histo_start[%d] = {%s};" % (
len(inst_map['Histogram']), ','.join('%s' % x for x in histo_start))
print >>C, "const int *const grpc_stats_histo_bucket_boundaries[%d] = {%s};" % (
len(inst_map['Histogram']), ','.join('grpc_stats_table_%d' % x for x in histo_bucket_boundaries))
print >>C, "void (*const grpc_stats_inc_histogram[%d])(grpc_exec_ctx *exec_ctx, int x) = {%s};" % (
len(inst_map['Histogram']), ','.join('grpc_stats_inc_%s' % histogram.name.lower() for histogram in inst_map['Histogram']))
# patch qps_test bigquery schema
RECORD_EXPLICIT_PERCENTILES = [50, 95, 99]
with open('tools/run_tests/performance/scenario_result_schema.json', 'r') as f:
qps_schema = json.loads(f.read())
def FindNamed(js, name):
for el in js:
if el['name'] == name:
return el
def RemoveCoreFields(js):
new_fields = []
for field in js['fields']:
if not field['name'].startswith('core_'):
new_fields.append(field)
js['fields'] = new_fields
RemoveCoreFields(FindNamed(qps_schema, 'clientStats'))
RemoveCoreFields(FindNamed(qps_schema, 'serverStats'))
def AddCoreFields(js):
for counter in inst_map['Counter']:
js['fields'].append({
'name': 'core_%s' % counter.name,
'type': 'INTEGER',
'mode': 'NULLABLE'
})
for histogram in inst_map['Histogram']:
js['fields'].append({
'name': 'core_%s' % histogram.name,
'type': 'STRING',
'mode': 'NULLABLE'
})
js['fields'].append({
'name': 'core_%s_bkts' % histogram.name,
'type': 'STRING',
'mode': 'NULLABLE'
})
for pctl in RECORD_EXPLICIT_PERCENTILES:
js['fields'].append({
'name': 'core_%s_%dp' % (histogram.name, pctl),
'type': 'FLOAT',
'mode': 'NULLABLE'
})
AddCoreFields(FindNamed(qps_schema, 'clientStats'))
AddCoreFields(FindNamed(qps_schema, 'serverStats'))
with open('tools/run_tests/performance/scenario_result_schema.json', 'w') as f:
f.write(json.dumps(qps_schema, indent=2, sort_keys=True))
# and generate a helper script to massage scenario results into the format we'd
# like to query
with open('tools/run_tests/performance/massage_qps_stats.py', 'w') as P:
with open(sys.argv[0]) as my_source:
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
print >>P, line.rstrip()
break
for line in my_source:
if line[0] != '#':
break
print >>P, line.rstrip()
print >>P
print >>P, '# Autogenerated by tools/codegen/core/gen_stats_data.py'
print >>P
print >>P, 'import massage_qps_stats_helpers'
print >>P, 'def massage_qps_stats(scenario_result):'
print >>P, ' for stats in scenario_result["serverStats"] + scenario_result["clientStats"]:'
print >>P, ' if "coreStats" not in stats: return'
print >>P, ' core_stats = stats["coreStats"]'
print >>P, ' del stats["coreStats"]'
for counter in inst_map['Counter']:
print >>P, ' stats["core_%s"] = massage_qps_stats_helpers.counter(core_stats, "%s")' % (counter.name, counter.name)
for i, histogram in enumerate(inst_map['Histogram']):
print >>P, ' h = massage_qps_stats_helpers.histogram(core_stats, "%s")' % histogram.name
print >>P, ' stats["core_%s"] = ",".join("%%f" %% x for x in h.buckets)' % histogram.name
print >>P, ' stats["core_%s_bkts"] = ",".join("%%f" %% x for x in h.boundaries)' % histogram.name
for pctl in RECORD_EXPLICIT_PERCENTILES:
print >>P, ' stats["core_%s_%dp"] = massage_qps_stats_helpers.percentile(h.buckets, %d, h.boundaries)' % (
histogram.name, pctl, pctl)
with open('src/core/lib/debug/stats_data_bq_schema.sql', 'w') as S:
columns = []
for counter in inst_map['Counter']:
columns.append(('%s_per_iteration' % counter.name, 'FLOAT'))
print >>S, ',\n'.join('%s:%s' % x for x in columns)
| {
"content_hash": "c33baff49a7a181ab3beda5125c71f33",
"timestamp": "",
"source": "github",
"line_count": 410,
"max_line_length": 146,
"avg_line_length": 36.66829268292683,
"alnum_prop": 0.6164693361713449,
"repo_name": "kumaralokgithub/grpc",
"id": "8359734c8484d78659d4487a3e692783997f3a3f",
"size": "15034",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/codegen/core/gen_stats_data.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5025"
},
{
"name": "C",
"bytes": "6877246"
},
{
"name": "C#",
"bytes": "1442251"
},
{
"name": "C++",
"bytes": "2050296"
},
{
"name": "CMake",
"bytes": "454078"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "373846"
},
{
"name": "M4",
"bytes": "42541"
},
{
"name": "Makefile",
"bytes": "891762"
},
{
"name": "Objective-C",
"bytes": "280036"
},
{
"name": "PHP",
"bytes": "292373"
},
{
"name": "Protocol Buffer",
"bytes": "96004"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1525651"
},
{
"name": "Ruby",
"bytes": "664572"
},
{
"name": "Shell",
"bytes": "35751"
},
{
"name": "Swift",
"bytes": "3486"
}
],
"symlink_target": ""
} |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example.settings")
from django.core.management import execute_from_command_line
# Allow starting the app without installing the module.
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
execute_from_command_line(sys.argv)
| {
"content_hash": "7c3845669a43b49e2bdc1a4f479ad089",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 84,
"avg_line_length": 28.846153846153847,
"alnum_prop": 0.7066666666666667,
"repo_name": "PetrDlouhy/django-su",
"id": "0977c7ebc4dc21bde1aa92ac1f8a5fa6032324d8",
"size": "397",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "example/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2997"
},
{
"name": "Python",
"bytes": "29575"
}
],
"symlink_target": ""
} |
from test import get_client_credentials_session, cassette
def test_should_page_through_search_results():
session = get_client_credentials_session()
with cassette('fixtures/resources/catalog/list_search/page_through_search_results.yaml'):
first_page = session.catalog.search('mapreduce').list(page_size=2)
assert len(first_page.items) == 2
assert first_page.count == 1781
assert first_page.items[0].title == 'Rapid parallel genome indexing with MapReduce'
assert first_page.items[1].title == 'MapReduce'
second_page = first_page.next_page
assert len(second_page.items) == 2
assert second_page.count == 1781
assert second_page.items[0].title == 'Mumak: Map-Reduce Simulator'
assert second_page.items[1].title == 'Exploring mapreduce efficiency with highly-distributed data'
def test_should_list_search_results_all_view():
session = get_client_credentials_session()
with cassette('fixtures/resources/catalog/list_search/list_search_results_all_view.yaml'):
first_page = session.catalog.search('mapreduce', view='all').list(page_size=2)
assert len(first_page.items) == 2
assert first_page.count == 1781
assert first_page.items[0].title == 'Rapid parallel genome indexing with MapReduce'
assert first_page.items[0].publisher == 'ACM Press'
| {
"content_hash": "a0ed085210452f69827459facf84f682",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 106,
"avg_line_length": 39.542857142857144,
"alnum_prop": 0.6907514450867052,
"repo_name": "lucidbard/mendeley-python-sdk",
"id": "2965ae489f3fa989bf20b6e3d5a5dafe494f4737",
"size": "1384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/resources/catalog/test_list_search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "171502"
}
],
"symlink_target": ""
} |
import sys, time
import pytest
def test_we_can_import_module():
pytest.importorskip('timelimit')
def test_context_manager_exists():
timelimit = pytest.importorskip('timelimit')
timelimit.timelimit
def test_context_manager_can_be_used():
timelimit = pytest.importorskip('timelimit')
with timelimit.timelimit(1):
pass
def test_sleep_1():
timelimit = pytest.importorskip('timelimit')
with timelimit.timelimit(2):
time.sleep(1)
def test_sleep_2():
timelimit = pytest.importorskip('timelimit')
with pytest.raises(RuntimeError):
with timelimit.timelimit(1):
time.sleep(2)
@pytest.mark.xfail
def test_sleep_nested():
# We expect this test to fail, because there's only one alarm timer,
# so we cannot nest them…
timelimit = pytest.importorskip('timelimit')
with pytest.raises(RuntimeError):
with timelimit.timelimit(1):
with timelimit.timelimit(5):
time.sleep(2)
if __name__ == '__main__':
pytest.main([__file__] + sys.argv[1:])
| {
"content_hash": "f676397942172c2f6cd97bfc581b006f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 72,
"avg_line_length": 25.853658536585368,
"alnum_prop": 0.6575471698113208,
"repo_name": "INM-6/Python-Module-of-the-Week",
"id": "6d4c49878e3994edd0c7de98ef5439d579da7072",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "session01_Decorators/test_timelimit.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "795"
},
{
"name": "C++",
"bytes": "678"
},
{
"name": "CSS",
"bytes": "17737"
},
{
"name": "Cython",
"bytes": "792"
},
{
"name": "HTML",
"bytes": "4241166"
},
{
"name": "Jupyter Notebook",
"bytes": "6418232"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "85871"
},
{
"name": "Ruby",
"bytes": "149"
},
{
"name": "TeX",
"bytes": "11069"
},
{
"name": "Vim script",
"bytes": "10526"
}
],
"symlink_target": ""
} |
from __future__ import print_function
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import json
import os
import sys
import re
import requests
import multipart
import string
class MZBenchAPIException(Exception):
pass
def start(host, script_file, script_content,
node_commit = None, nodes = None, workers_per_node = None, deallocate_after_bench = None,
provision_nodes = None, benchmark_name = None,
cloud = None, tags = None, emails=[], includes=[], env={}, no_cert_check = False,
exclusive = None
):
"""Starts a bench
:param host: MZBench API server host with port
:type host: str
:param script_file: Scenario filename for dashboard
:type script_file: str or unicode
:param script_content: Scenario content to execute
:type script_content: str or unicode
:param node_commit: Commit or branch name for MZBench node, default is "master"
:type node_commit: str
:param nodes: Number of nodes to allocate or node list, 1 by default
:type nodes: int or list of strings
:param workers_per_node: Number of workers to start on one node
:type workers_per_node: int
:param deallocate_after_bench: Deallocate nodes after bench is over
:type deallocate_after_bench: "true" or "false"
:param provision_nodes: Install required software
:type provision_nodes: "true" or "false"
:param benchmark_name: Set benchmark name
:type benchmark_name: str or unicode
:param cloud: Specify cloud provider to use
:type cloud: str or unicode
:param tags: Benchmark tags
:type tags: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:param exclusive: Exclusive label
:type exclusive: str or unicode
:param emails: Emails to notify on bench results
:type emails: List of strings
:param env: Dictionary of environment variables to substitute
:type env: Dictionary
:returns: Operation status
:rtype: Dictionary
"""
import erl_utils
import bdl_utils
import math
script_utils = bdl_utils if bdl_utils.is_bdl_scenario(script_content) else erl_utils
script_terms = script_utils.convert(script_content, env)
includes = script_utils.get_includes(script_terms)
if workers_per_node is not None:
desired_num_nodes = int(math.ceil(float(script_utils.get_num_of_workers(script_terms))/float(workers_per_node)))
else:
desired_num_nodes = None
if nodes is not None:
if isinstance(nodes, int):
params = [('nodes', desired_num_nodes if desired_num_nodes is not None else nodes)]
else:
params = [('nodes', ','.join(nodes[:desired_num_nodes] if desired_num_nodes is not None else nodes))]
else:
params = [] if desired_num_nodes is None else [('nodes', desired_num_nodes)]
if deallocate_after_bench is not None:
params += [('deallocate_after_bench', deallocate_after_bench)]
if provision_nodes is not None:
params += [('provision_nodes', provision_nodes)]
if benchmark_name is not None:
params += [('benchmark_name', benchmark_name)]
if cloud is not None:
params += [('cloud', cloud)]
if tags is not None:
params += [('tags', tags)]
if exclusive is not None:
params += [('exclusive', exclusive)]
if node_commit is not None:
params += [('node_commit', node_commit)]
params += [('email', email) for email in emails]
params += [(k, v) for k, v in env.items()]
files = [('bench',
{'filename': os.path.basename(script_file),
'content': script_content})]
for (incname, incurl) in includes:
script_dir = os.path.dirname(script_file)
if not re.search(r'^https?://', incurl, re.IGNORECASE):
filename = os.path.join(script_dir, incurl)
try:
with open(filename) as fi:
files.append(('include',
{'filename': incurl, 'content': fi.read()}))
except IOError as e:
print("Failed to get content for resource ({0}, {1}): {2}".format(
incname, incurl, e), file=sys.stderr)
raise
body, headers = multipart.encode_multipart({}, files)
return assert_successful_post(
host,
'/start',
params,
data=body, headers=headers, no_cert_check = no_cert_check)
def restart(host, bench_id, no_cert_check = False):
"""Creates a copy of a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id to copy
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: operation status
:rtype: dict
"""
return assert_successful_get(host, '/restart', {'id': bench_id})
def log(host, bench_id, no_cert_check = False):
"""Outputs log for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: log
:rtype: generator of str
"""
for x in stream_lines(host, '/log', {'id': bench_id}, no_cert_check = no_cert_check):
yield x
def userlog(host, bench_id, no_cert_check = False):
"""Outputs user log for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: log
:rtype: generator of str
"""
for x in stream_lines(host, '/userlog', {'id': bench_id}, no_cert_check = no_cert_check):
yield x
def change_env(host, bench_id, env, no_cert_check = False):
"""Changes environment variables for existing benchmark on the fly
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:param env: Dictionary of environment variables to substitute
:type env: Dictionary
"""
env['id'] = bench_id
return assert_successful_get(host, '/change_env', env, no_cert_check = no_cert_check)
def run_command(host, bench_id, pool, percent, bdl_command, no_cert_check = False):
"""Executes worker operation on a given percent of a pool on the fly
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param pool: pool number from the top of the script, starting from 1
:type pool: int
:param percent: percent of workers 0 < percent <= 100
:type percent: int
:param command: BDL statement to be executed
:type command: string
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
import bdl_utils
bdl_utils.convert("#!benchDL\n" + bdl_command, {}) # To check syntax
return assert_successful_get(
host,
'/run_command',
{'id': bench_id,
'pool': pool,
'percent': percent,
'command': bdl_command}, no_cert_check = no_cert_check)
def data(host, bench_id, no_cert_check = False):
"""Outputs CSV data for a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: CSV data
:rtype: generator of str
"""
for x in stream_lines(host, '/data', {'id': bench_id}, no_cert_check = no_cert_check):
yield x
def status(host, bench_id, wait=False, no_cert_check = False):
"""Get bench status
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: benchmark status
:rtype: dict
"""
return assert_successful_get(
host,
'/status',
{'id': bench_id,
'wait': 'true' if wait else 'false'}, no_cert_check = no_cert_check)
def results(host, bench_id, wait=False, no_cert_check = False):
"""Get bench results
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: benchmark resulting metric values
:rtype: dict
"""
return assert_successful_get(
host,
'/results',
{'id': bench_id,
'wait': 'true' if wait else 'false'}, no_cert_check = no_cert_check)
def stop(host, bench_id, no_cert_check = False):
"""Stop a bench
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
:returns: operation status
:rtype: dict
"""
return assert_successful_get(
host,
'/stop',
{'id': bench_id}, no_cert_check = no_cert_check)
def clusters_info(host, no_cert_check = False):
"""Get info about currenlty allocated clusters
:param host: MZBench API server host with port
:type host: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/clusters_info', {}, no_cert_check = no_cert_check)
def deallocate_cluster(host, cluster_id, no_cert_check = False):
"""Deallocate cluster
:param host: MZBench API server host with port
:type host: str
:param cluster_id: id of target cluster
:type cluster_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/deallocate_cluster', {'id': cluster_id}, no_cert_check = no_cert_check)
def remove_cluster_info(host, cluster_id, no_cert_check = False):
"""Remove cluster record from the table of current allocated cluster info
:param host: MZBench API server host with port
:type host: str
:param cluster_id: id of target cluster
:type cluster_id: int
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/remove_cluster_info', {'id': cluster_id}, no_cert_check = no_cert_check)
def add_tags(host, bench_id, tags, no_cert_check = False):
"""Add tags to an existing benchmark
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param tags: Tags to add
:type tags: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/add_tags', {'id': bench_id, 'tags': tags}, no_cert_check = no_cert_check)
def remove_tags(host, bench_id, tags, no_cert_check = False):
"""Remove tags from an existing benchmark
:param host: MZBench API server host with port
:type host: str
:param bench_id: benchmark run id
:type bench_id: int
:param tags: Tags to remove
:type tags: str
:param no_cert_check: Don't check server HTTPS certificate
:type no_cert_check: boolean
"""
return assert_successful_get(host, '/remove_tags', {'id': bench_id, 'tags': tags}, no_cert_check = no_cert_check)
def addproto(host):
if host.startswith("http://") or host.startswith("https://"):
return host
return "http://" + host
def stream_lines(host, endpoint, args, no_cert_check = False):
try:
response = requests.get(
addproto(host) + endpoint + '?' + urlencode(args),
stream=True, verify = not no_cert_check, headers=get_auth_headers(host))
for line in fast_iter_lines(response, chunk_size=1024):
try:
yield line
except ValueError:
print(line)
if response.status_code == 200:
pass
else:
raise MZBenchAPIException('Server call to {0} failed with code {1}'.format(endpoint, response.status_code))
except requests.exceptions.ConnectionError as e:
raise MZBenchAPIException('Connect to "{0}" failed with message: {1}'.format(host, e))
def fast_iter_lines(response, chunk_size=512):
pending = None
for chunk in response.iter_content(chunk_size=chunk_size):
lines = chunk.splitlines()
if pending is not None:
if lines:
lines[0] = pending + lines[0]
else:
lines.append(pending)
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
def assert_successful_request(perform_request):
def wrapped(*args, **kwargs):
try:
response = perform_request(*args, **kwargs)
if response.status_code == 200:
return response.json()
else:
try:
data = json.loads(response.text)
except:
raise MZBenchAPIException('Server call with arguments {0} failed with code {1} respose body:\n{2}'.format(args, response.status_code, response.text))
if ('reason_code' in data and 'reason' in data):
raise MZBenchAPIException('Server call with arguments {0} failed with code {1} and reason: {2}\n{3}'.format(args, response.status_code, data['reason_code'], data['reason']))
else:
from StringIO import StringIO
io = StringIO()
json.dump(data, io, indent=4)
raise MZBenchAPIException('Server call with arguments {0} failed with code {1} respose body:\n{2}'.format(args, response.status_code, io.getvalue()))
except requests.exceptions.ConnectionError as e:
raise MZBenchAPIException('Connect to "{0}" failed with message: {1}'.format(args[0], e))
return wrapped
@assert_successful_request
def assert_successful_get(host, endpoint, args, no_cert_check = False):
return requests.get(
addproto(host) + endpoint + '?' + urlencode(args),
verify=not no_cert_check, headers=get_auth_headers(host))
@assert_successful_request
def assert_successful_post(host, endpoint, args, data=None, headers=None, no_cert_check = False):
return requests.post(
addproto(host) + endpoint + '?' + urlencode(args),
data=data,
headers=add_auth_headers(headers, host),
verify=not no_cert_check)
def add_auth_headers(headers, host):
auth_headers = get_auth_headers(host);
if (headers is None):
return auth_headers;
if (auth_headers is None):
return headers;
headers.update(auth_headers)
return headers
def get_auth_headers(host):
token = read_token(host)
if (token is not None):
return {"Authorization": "Bearer {}".format(string.rstrip(token, " \n\r"))}
else:
return None
def read_token(host):
if 'MZBENCHTOKEN' in os.environ:
token_file = os.environ['MZBENCHTOKEN']
else:
token_file = os.path.expanduser("~/.config/mzbench/token")
if (not os.path.isfile(token_file)):
return None
with open(token_file) as f:
s = f.read()
for line in s.split('\n'):
line_no_comments = line.split('#', 1)[0]
strtokens = line_no_comments.split()
if len(strtokens) > 1 and host == strtokens[0]:
return strtokens[1]
if len(strtokens) == 1:
return line_no_comments
return None
| {
"content_hash": "6368a9ae7e972e21e9f1535b578abca4",
"timestamp": "",
"source": "github",
"line_count": 481,
"max_line_length": 193,
"avg_line_length": 33.943866943866944,
"alnum_prop": 0.628100692105102,
"repo_name": "machinezone/mzbench",
"id": "3ad7ebc2de6d10f30a37072626f4d67d48a006bf",
"size": "16328",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/mzbench_api_client.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4250"
},
{
"name": "C",
"bytes": "8115"
},
{
"name": "CSS",
"bytes": "79551"
},
{
"name": "Erlang",
"bytes": "717459"
},
{
"name": "Gnuplot",
"bytes": "480"
},
{
"name": "HTML",
"bytes": "2170"
},
{
"name": "JavaScript",
"bytes": "221772"
},
{
"name": "Lua",
"bytes": "300"
},
{
"name": "Makefile",
"bytes": "23862"
},
{
"name": "Python",
"bytes": "110797"
},
{
"name": "Shell",
"bytes": "12067"
}
],
"symlink_target": ""
} |
class RelationshipType:
"""
Stores components of a relationship type:
inUse: Whether relationship type is in use
type_block_id: ID of the block that stores string name of our type
Along with the index where the relationship type is stored
"""
def __init__(self, index=0, in_use=True, type_block_id=0):
"""
Initialize a RelationshipType record.
:param index: Index of relationship type
:type index: int
:param in_use: Whether relationship type is in use
:type in_use: bool
:param type_block_id: ID of the block that stores string
name of our type
:type type_block_id: int
:return: relationship type instance
:rtype: RelationshipType
"""
# Index is only stored in memory.
self.index = index
# Values stored in the RelationshipTypeStore file
self.inUse = in_use
self.typeBlockId = type_block_id
def __eq__(self, other):
"""
Overload the == operator
:param other: Other relationship type
:type other: RelationshipType
:return: True if equivalent, false otherwise
:rtype: bool
"""
if isinstance(other, self.__class__):
return (self.index == other.index) and \
(self.inUse == other.inUse) and \
(self.typeBlockId == other.typeBlockId)
else:
return False
def __ne__(self, other):
"""
Overload the != operator
:param other: Other relationship type
:type other: RelationshipType
:return: True if not equivalent, false otherwise
:rtype: bool
"""
return not (self == other)
def list(self):
"""
List the items that this type contains, excluding the index. This is
essentially how it's stored on disk
:return: List of data contained in this type
:rtype: list
"""
return [self.inUse, self.typeBlockId]
| {
"content_hash": "a051874a001a02f50a26570bda21e2e9",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 76,
"avg_line_length": 31.646153846153847,
"alnum_prop": 0.5775401069518716,
"repo_name": "PHB-CS123/graphene",
"id": "547ba33fd59b000fd0d696d79d16fb92a9082016",
"size": "2057",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphene/storage/base/relationship_type.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "11976"
},
{
"name": "HTML",
"bytes": "257"
},
{
"name": "Makefile",
"bytes": "1056"
},
{
"name": "Python",
"bytes": "628746"
},
{
"name": "Shell",
"bytes": "363"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Indicator.identifier'
db.add_column(u'survey_indicator', 'identifier',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Indicator.identifier'
db.delete_column(u'survey_indicator', 'identifier')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'locations.location': {
'Meta': {'object_name': 'Location'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'parent_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'point': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Point']", 'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'locations'", 'null': 'True', 'to': u"orm['locations.LocationType']"})
},
u'locations.locationtype': {
'Meta': {'object_name': 'LocationType'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'})
},
u'locations.point': {
'Meta': {'object_name': 'Point'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '13', 'decimal_places': '10'})
},
'survey.answerrule': {
'Meta': {'object_name': 'AnswerRule'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'condition': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'next_question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_question_rules'", 'null': 'True', 'to': "orm['survey.Question']"}),
'question': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'rule'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Question']"}),
'validate_with_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'validate_with_question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'validate_with_value': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.batch': {
'Meta': {'object_name': 'Batch'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'survey': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'batches'", 'null': 'True', 'to': "orm['survey.Survey']"})
},
'survey.batchlocationstatus': {
'Meta': {'object_name': 'BatchLocationStatus'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_locations'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'open_batches'", 'null': 'True', 'to': u"orm['locations.Location']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.children': {
'Meta': {'object_name': 'Children'},
'aged_between_0_5_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_12_23_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_13_17_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_24_59_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_5_12_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_6_11_months': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'children'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.household': {
'Meta': {'object_name': 'Household'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'households'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'number_of_females': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'number_of_males': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'survey.householdbatchcompletion': {
'Meta': {'object_name': 'HouseholdBatchCompletion'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_households'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'completed_batches'", 'null': 'True', 'to': "orm['survey.Investigator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
},
'survey.householdhead': {
'Meta': {'object_name': 'HouseholdHead'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'head'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'default': "'16'", 'max_length': '100'}),
'resident_since_month': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'resident_since_year': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1984'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'})
},
'survey.indicator': {
'Meta': {'object_name': 'Indicator'},
'batch': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'indicators'", 'null': 'True', 'to': "orm['survey.Batch']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'})
},
'survey.investigator': {
'Meta': {'object_name': 'Investigator'},
'age': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "'English'", 'max_length': '100', 'null': 'True'}),
'level_of_education': ('django.db.models.fields.CharField', [], {'default': "'Primary'", 'max_length': '100', 'null': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'male': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.locationautocomplete': {
'Meta': {'object_name': 'LocationAutoComplete'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['locations.Location']", 'null': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'survey.multichoiceanswer': {
'Meta': {'object_name': 'MultiChoiceAnswer'},
'answer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.QuestionOption']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.numericalanswer': {
'Meta': {'object_name': 'NumericalAnswer'},
'answer': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '5', 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.question': {
'Meta': {'object_name': 'Question'},
'answer_type': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'questions'", 'null': 'True', 'to': "orm['survey.Indicator']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': "orm['survey.Question']"}),
'subquestion': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '60'})
},
'survey.questionoption': {
'Meta': {'object_name': 'QuestionOption'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'max_length': '2', 'null': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'null': 'True', 'to': "orm['survey.Question']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'survey.survey': {
'Meta': {'object_name': 'Survey'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'survey.textanswer': {
'Meta': {'object_name': 'TextAnswer'},
'answer': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Household']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investigator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Investigator']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.Question']", 'null': 'True'}),
'rule_applied': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['survey.AnswerRule']", 'null': 'True'})
},
'survey.women': {
'Meta': {'object_name': 'Women'},
'aged_between_15_19_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'aged_between_20_49_years': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'household': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'women'", 'unique': 'True', 'null': 'True', 'to': "orm['survey.Household']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'})
}
}
complete_apps = ['survey'] | {
"content_hash": "fc52fd8a4e0fe537dd3b57a0b27910b6",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 179,
"avg_line_length": 85.26839826839827,
"alnum_prop": 0.5666345128699802,
"repo_name": "antsmc2/mics",
"id": "44b8ae5becfbb800490799271c0607eb3daf5bdb",
"size": "19721",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "survey/migrations/0029_auto__add_field_indicator_identifier.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "37725"
},
{
"name": "JavaScript",
"bytes": "390607"
},
{
"name": "Python",
"bytes": "5206913"
},
{
"name": "Shell",
"bytes": "1277"
}
],
"symlink_target": ""
} |
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:45
:Licence MIT
Part of grammpy
"""
from .representation import *
| {
"content_hash": "99a491e9e5726ed05398e0b44edca56e",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 29,
"avg_line_length": 13.222222222222221,
"alnum_prop": 0.7226890756302521,
"repo_name": "PatrikValkovic/grammpy",
"id": "8904a2b59a840caf9f17cf3c3e300cd401fa5586",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grammpy/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "604926"
}
],
"symlink_target": ""
} |
""":mod:`sqlalchemy_imageattach` --- SQLAlchemy-ImageAttach
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This package provides a simple way to attach images to the other
object-relationally mapped entities and store these into the physically
agnostic backend storages.
For example, you can simply store image files into the filesystem, and
then deploy your application into the production, make the production to
use AWS S3 instead. The common backend interface concists of only
essential operations, so you can easily implement a new storage backend.
"""
| {
"content_hash": "5c9e255534a383cad98646f3d2e89947",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 72,
"avg_line_length": 44.23076923076923,
"alnum_prop": 0.7217391304347827,
"repo_name": "youknowone/sqlalchemy-imageattach",
"id": "fec1256977fea4a3dd23369e280ecb7a5432ffcb",
"size": "575",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sqlalchemy_imageattach/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "162759"
}
],
"symlink_target": ""
} |
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"88.208.34.101"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):9988$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Satoshi:0.10.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
for ip in ips:
print ip['ip']
if __name__ == '__main__':
main()
| {
"content_hash": "f98be9396d1d145bdab09161cb31decb",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 186,
"avg_line_length": 31.256880733944953,
"alnum_prop": 0.5705899618432638,
"repo_name": "tropa/axecoin",
"id": "f1d9e368c61c8b77036d0b9cc88fe3eaa07c28f3",
"size": "3480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master-0.10",
"path": "contrib/seeds/makeseeds.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "7639"
},
{
"name": "C",
"bytes": "320976"
},
{
"name": "C++",
"bytes": "3542102"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "142312"
},
{
"name": "Makefile",
"bytes": "84115"
},
{
"name": "Objective-C",
"bytes": "3283"
},
{
"name": "Objective-C++",
"bytes": "7236"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "221281"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Roff",
"bytes": "36588"
},
{
"name": "Shell",
"bytes": "44911"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=125)),
('audio_file', models.FileField(upload_to='')),
],
),
]
| {
"content_hash": "2e714c129a94aaf111aac6298ec86228",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 114,
"avg_line_length": 24.545454545454547,
"alnum_prop": 0.5592592592592592,
"repo_name": "Lightshadow244/OwnMusicWeb",
"id": "e896c11782f0107fb83cdc657104b92b8d154080",
"size": "613",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ownmusicweb/player/migrations/0001_initial.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "884"
},
{
"name": "Dockerfile",
"bytes": "1001"
},
{
"name": "HTML",
"bytes": "2434"
},
{
"name": "JavaScript",
"bytes": "34134"
},
{
"name": "Python",
"bytes": "16800"
},
{
"name": "Shell",
"bytes": "464"
}
],
"symlink_target": ""
} |
from engine.event import *
from engine.action import *
from engine.code import *
from engine.player import *
from engine.round import *
from engine.team import *
import configparser
from flask import Flask, json, jsonify, make_response, session, render_template, request, send_file
import json
import os
import psycopg2
import queue
class App:
app = Flask(__name__, static_url_path = "", static_folder = "www")
SESSION_TYPE = 'Redis'
app.config.from_object(__name__)
app.secret_key = "ExtraSecretSessionKey"#os.urandom(24)
# START BLOCK
# Player registration
def registration_template(error):
return render_template("registration.html", error=error)
def pending_template():
if App.logged_in():
return render_template("pending.html", user=request.cookies.get("user"), phone=request.cookies.get("phone"))
else:
return "403 Connection Forbidden"
def playing_template():
if App.logged_in():
return render_template("game_view.html")
else:
return "403 Connection Forbidden"
@app.route("/isJailed")
def jailed():
if App.logged_in():
return str(Event.isPlayerJailed(Player._getIdByName(request.cookies.get("user"))))
else:
return "403 Connection Forbidden"
def logged_in():
try:
if request.cookies.get("user") == None or request.cookies.get("web_hash") == None:
return False
else:
return True
except KeyError:
return False
@app.route("/login", methods=["GET"])
def login():
web_hash = request.args.get("hash")
phone = Player.getMobileById(Player.getIdByHash(web_hash))
user = Player.getNameById(Player.getIdByHash(web_hash))
return App.add_cookies(user, phone, web_hash)
@app.route("/")
def index():
if App.logged_in():
if not Event.isPlayerJailed(Player._getIdByName(request.cookies.get("user"))):
return App.playing_template()
return App.pending_template()
else:
return App.registration_template(" ")
# Set HTTP headers so those files would not be cached
@app.route("/events.json")
def events():
response = make_response(send_file("www/events.json"))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'
return response
@app.route("/stats.json")
def stats():
response = make_response(send_file("www/stats.json"))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, proxy-revalidate, max-age=0'
return response
@app.route("/register", methods=["GET"])
def new_player():
user = request.args.get("user")
phone = request.args.get("phone")
if user and phone:
if Action.addPlayer(user, phone, ''):
return App.add_cookies(user, phone, Player.getHashById(Player._getIdByName(user)))
else:
return App.registration_template("Probleem registreerimisel, kontrolli sisestatud andmeid.")
else:
return App.registration_template("Mõlemad väljad on kohustuslikud.")
@app.route("/cookie")
def add_cookies(user, phone, web_hash):
try:
expire_date = datetime.datetime.now()
expire_date = expire_date + datetime.timedelta(days=1)
cookies = make_response(render_template("to_game.html"))
cookies.set_cookie("user", user, expires=expire_date)
cookies.set_cookie("phone", phone, expires=expire_date)
cookies.set_cookie("web_hash", web_hash, expires=expire_date)
return cookies
except:
return "Problem adding cookies"
@app.route("/delCookies")
def delete_cookies():
try:
cookies = make_response(render_template("to_game.html"))
cookies.set_cookie("user", "", expires=0)
cookies.set_cookie("phone", "", expires=0)
cookies.set_cookie("web_hash", "", expires=0)
return cookies
except:
return "Problem adding cookies"
@app.route("/wrongInfo")
def wrong_info():
if App.logged_in():
phone = request.args.get("phone")
if phone == request.cookies.get("phone"):
Player.delPlayer(request.cookies.get("user"))
return App.delete_cookies()
else:
return "User data preserved"
else:
return "403 Connection Forbidden"
# Player registration
# END BLOCK
# START BLOCK
# Player actions
@app.route("/flee")
def flee_jail():
fleeing_code = request.args.get("fleeingCode")
if Action.fleePlayerWithCode(fleeing_code):
return "You got out"
else:
return "Your escape failed"
@app.route("/tag")
def tag():
if App.logged_in():
tag_code = request.args.get("tagCode")
if Action.handleWeb(request.cookies.get("web_hash"), tag_code):
return "Hit"
else:
return "Your attempt to catch them failed"
else:
return "403 Connection Forbidden"
@app.route("/messageTeam", methods=["GET"])
def messageTeam():
if App.logged_in():
team_message = request.args.get("message")
player_id = Player.getIdByHash(request.cookies.get("web_hash"))
if team_message and player_id:
if Action.sayToMyTeam(player_id, team_message):
return "Message sent"
else:
return "Error sending message"
else:
return "Message missing, or invalid player info"
else:
return "403 Connection Forbidden"
# Player actions
# END BLOCK
# START BLOCK
# Getting data
@app.route("/user")
def username():
if App.logged_in():
return request.cookies.get("user")
else:
return "403 Connection Forbidden"
@app.route("/userTeam")
def user_team():
if App.logged_in():
if Team.getPlayerTeamId(Player.getIdByHash(request.cookies.get("web_hash")),Round.getActiveId()):
return str(Team.getPlayerTeamId(Player.getIdByHash(request.cookies.get("web_hash")),Round.getActiveId()))
else:
return "Player is not currently in a team"
else:
return "403 Connection Forbidden"
@app.route("/baseMessage")
def base_message():
try:
return Action.base_msg_get()["text"]
except KeyError:
return ""
@app.route("/message")
def personal_message():
if App.logged_in():
data = {}
data['jailed'] = str(Event.isPlayerJailed(Player._getIdByName(request.cookies.get("user"))))
message = Action.browserRequestsMessages(request.cookies.get("web_hash"))
data['message'] = message
return jsonify(data)
else:
return "403 Connection Forbidden"
@app.route("/teams")
def teams():
all_teams = []
for team in game_config.teams:
all_teams.append(team['name'])
return jsonify(all_teams)
# Getting data
# END BLOCK
# START BLOCK
# Spawnmaster screen
def spawn_view():
if App.is_master():
Round.updateActiveId()
players, teamless = Stats.playersDetailed()
rounds = Round.getRounds()
return render_template("spawn.html", rounds=rounds, teamless=teamless, players = players)
else:
return "403 Connection Forbidden"
def is_master():
try:
if session["master"] == 1:
return True
else:
return False
except KeyError:
return False
@app.route("/spawn")
def spawnmaster():
if App.is_master():
return App.spawn_view()
else:
return render_template("spawn_login.html")
@app.route("/masterLogin", methods=["GET"])
def master_login():
user = request.args.get("user")
password = request.args.get("pw")
if user == config['users']['spawnuser'] and \
password == config['users']['spawnpassword']:
session["master"] = 1
return App.spawnmaster()
else:
return "403 Connection Forbidden"
@app.route("/getcode", methods=["GET"])
def getcode():
# expects request /getcode?site=A
site = request.args.get("site")
if site not in ['A', 'B']:
return "403 Connection Forbidden"
code, shortcode = game.sites[site].lock()
data = {'code': code, 'shortcode': shortcode}
return jsonify(data)
@app.route("/unlock", methods=["GET"])
def unlock():
# expects request /getcode?site=A
site = request.args.get("s")
code = request.args.get("c")
if site not in ['A', 'B']:
return "403 Connection Forbidden"
print(site, code)
data = {}
data['response'] = game.sites[site].unlock(code)
return jsonify(data)
@app.route("/pollsite", methods=["GET"])
def pollsite():
site = request.args.get("site")
if site not in ['A', 'B']:
return "403 Connection Forbidden"
data = {}
s = game.sites[site]
# Check if keypad was unlocked
data['lock'] = s.locked
if s.starting:
data['startround'] = True
s.starting = False
return jsonify(data)
@app.route("/masterout")
def master_logout():
if App.is_master():
session.clear()
return "Spanwmaster has logged out"
else:
return "403 Connection Forbidden"
# Spawnmaster screen
# END BLOCK
# START BLOCK
# Stats screens
@app.route("/baseLogin", methods=["GET"])
def base_login():
user = request.args.get("user")
password = request.args.get("pw")
if user == config['users']['baseuser'] and \
password == config['users']['basepassword']:
session["base"] = 1
return App.base_template()
else:
return "403 Connection Forbidden"
def is_base():
try:
if session["base"] == 1:
return True
else:
return False
except KeyError:
return False
@app.route("/base")
def base_template():
if App.is_base():
return render_template("base.html")
else:
return render_template("base_login.html")
@app.route("/spectate")
def spectator_template():
return render_template("spectate.html")
@app.route("/baseout")
def base_logout():
if App.is_base():
session.clear()
return "Basemaster has logged out"
else:
return "403 Connection Forbidden"
# Stats screens
# END BLOCK
# START BLOCK
# Spawnmaster's actions
# Adding a new round
@app.route("/addRound", methods=["GET"])
def startRound():
roundName = request.args.get("roundName")
# How many minutes does the round last
roundLength = request.args.get("roundLength")
# In how many minutes does the round begin
startsAt = request.args.get("startsAt")
try:
int(roundLength)
#int(startsIn)
except ValueError:
return "Round length and starttime has to be entered as integers."
startTime = datetime.datetime.now()
startTime = startTime.replace(hour=int(startsAt[0:2]), minute=int(startsAt[3:5]), second=0, microsecond=0)
endTime = startTime + datetime.timedelta(seconds = int(roundLength) * 60)
startTimeString = format(startTime, dateformat)
endTimeString = format(endTime, dateformat)
if not roundName or not roundLength or not startsAt:
return "Insufficient info for a new round"
else:
if Round.add(roundName, startTimeString, endTimeString):
Action.addTeamsToAllRounds()
return "New round \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
else:
return "Error: New round has overlapping time. not added: \"" + roundName + "\" start time " + startTimeString + ", end time " + endTimeString + "."
# Adding player to a team in active round
@app.route("/addToTeam", methods = ["GET"])
def addToTeam():
team_name = request.args.get("teamName")
player_id = request.args.get("playerId")
if team_name and player_id:
try:
Action.addPlayerToTeam(Player.getNameById(player_id), team_name)
return "Player " + Player.getNameById(player_id) + " added to team" + team_name
except:
return "Team or player id were given as invalid values."
else:
return "Missing team or player id."
# Spawnmaster's actions
# END BLOCK
# Routes for SMS
@app.route("/sms", methods=['GET'])
def smsserver():
# Check the stupid "password"
if request.args.get('pass') != 'avf2DA3XeJZmqy9KKVjFdGfU':
return jsonify({'error': 'error'})
# Receive incoming SMSes
incoming = json.loads(request.data.decode('utf8'))
for message in incoming['incoming']:
# Act on the message, it's something similar to
# {'number': 512314, 'contents': 'Welcome here',
# 'sent': sent, 'received': received}
#print(message)
Action.handleSms(message['number'], message['contents'])
# Mark all the old enough messages ready for SMSing
Action.messages_timeout_check()
out = []
try:
while True:
element = sms_queue.get_nowait()
out.append(element)
except queue.Empty:
pass
return jsonify({'outgoing': out})
# Routes for printing
@app.route("/print", methods=['GET'])
def printserver():
if request.args.get('pass') != 'htpT2U8UMpApV852DGSncBP7':
return jsonify({'error': 'error'})
data = []
try:
while True:
element = printer_queue.get_nowait()
data.append(element)
except queue.Empty:
pass
return jsonify({'print': data})
if __name__ == "__main__":
# Start program
config = configparser.ConfigParser()
config.read('config.ini')
# Connect to database
try:
db = config['database']
parameters = "host='%s' dbname='%s' user='%s' password='%s'" % (
db['host'], db['dbname'], db['user'], db['password'])
connection = psycopg2.connect(parameters)
connection.set_session(autocommit=True)
cursor = connection.cursor()
except:
print ("Error. Unable to connect to the database. If losing data is acceptable, try running 'python reset_db.py'")
exit()
# Queues
sms_queue = queue.Queue()
printer_queue = queue.Queue()
game = Game(config, cursor)
Action.initAllConnect(cursor, sms_queue, printer_queue)
Round.updateActiveId()
Stats.updateStats()
Stats.printPlayersDetailed()
debug = False
if debug:
App.app.run(debug=True)
else:
import logging
from threading import Thread
from engine.cli import processInput
logging.basicConfig(filename='flask.log', level=logging.DEBUG)
appthread = Thread(target=App.app.run, args=())
appthread.setDaemon(True)
appthread.start()
while True:
processInput()
| {
"content_hash": "1a960525384024a74059aa353288868b",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 164,
"avg_line_length": 32.01197604790419,
"alnum_prop": 0.5712682379349046,
"repo_name": "mahfiaz/spotter_irl",
"id": "ef20aae862196bf22c4b066c53c4cabead7db1fe",
"size": "16060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gameserver.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "14045"
},
{
"name": "HTML",
"bytes": "24723"
},
{
"name": "JavaScript",
"bytes": "55397"
},
{
"name": "Python",
"bytes": "103318"
}
],
"symlink_target": ""
} |
import Image
from ImageDraw import Draw
import random
from vision.annotations import *
class Geppetto(object):
"""
Facilitates making toy tracking data. Geppetto manages the toys and renders the
frames to produce the actual data.
"""
def __init__(self, size = (720, 480), background = (255, 255, 255), cap = -1):
self.size = size
self.background = background
self.toys = []
self.frames = 0
self.cap = cap
def add(self, toy):
"""
Adds a toy that will be rendered.
"""
self.toys.append(toy)
self.frames = max(self.frames, toy.frames)
def render(self, frame):
"""
Renders a frame and returns an PIL instance.
"""
if frame >= self.frames:
raise ValueError("Requested frame {0}, but there are only {1}".format(frame, self.frames))
canvas = Image.new("RGB", self.size, self.background)
for toy in self.toys:
toy.render(frame, canvas)
return canvas
def write(self, frame, location):
"""
Writes a rendered frame to disk.
"""
self.render(frame).save(location)
def export(self, location, format = "jpg"):
"""
Exports all rendered frames to disk.
"""
self.frames = max(x.frames for x in self.toys)
if self.cap > 0:
self.frames = min(self.cap, self.frames)
for frame in range(self.frames):
self.write(frame, "{location}/{frame}.{format}".format(location=location, frame=frame, format=format))
def __getitem__(self, frame):
"""
Alias to render a frame.
"""
return self.render(frame)
def __len__(self):
return self.frames
class Toy(object):
"""
An abstract toy class.
"""
def __init__(self, position = (0,0), size = (100,100), color = "black"):
self.size = size
self.color = color
self.positions = [position]
self.lastposition = position
self.frames = 1
def linear(self, position, frame, chaos = 0):
"""
Moves the object to a new location using linear interpolation.
If chaos is nonzero, then the object will jiggle on its way there.
"""
if frame <= self.frames:
raise ValueError("Target frame is behind current time index")
fdiff = float(frame - self.frames)
rx = (position[0] - self.lastposition[0]) / fdiff
ry = (position[1] - self.lastposition[1]) / fdiff
for i in range(self.frames + 1, frame):
x = self.lastposition[0] + rx * (i - self.frames)
x += random.randint(-chaos, chaos)
y = self.lastposition[1] + ry * (i - self.frames)
y += random.randint(-chaos, chaos)
y = max(y, 0)
x = max(x, 0)
self.positions.append((int(x),int(y)))
self.positions.append(position)
self.frames = frame
self.lastposition = position
return self
def stationary(self, frame):
"""
Causes the object to remain still until the specified frame.
"""
for i in range(frame - self.frames):
self.positions.append(self.lastposition)
self.frames = frame
return self
def disappear(self, frame, reappear = True):
"""
Causes the object to disappear until specified frame.
"""
if frame < self.frames:
raise ValueError("Target frame is behind current time index")
amount = frame - self.frames
if reappear:
amount -= 1
self.positions.extend([None] * amount)
if reappear:
self.positions.append(self.lastposition)
self.frames = frame
return self
def random(self, frame, estate = (720, 480)):
"""
Causes the object to randomly teleport around the screen.
"""
for _ in range(frame - self.frames):
self.positions.append((random.randint(0, estate[0] - self.size[0]), random.randint(0, estate[1] - self.size[1])))
self.frames = frame
return self
def set(self, position):
"""
Moves the object to a new location by one frame only.
"""
self.positions.append(position)
self.lastposition = position
self.frames += 1
return self
def render(self, frame, canvas):
"""
Renders the specified frame to the canvas.
"""
if frame < self.frames and self.positions[frame]:
self.draw(frame, canvas)
def draw(self, canvas):
raise NotImplementedError()
def __getitem__(self, frame):
"""
Gets the bounding for this toy at a certain frame.
"""
if frame < 0:
frame = len(self) + frame
pos = self.positions[frame]
if not pos:
return Box(0, 0, 1, 1, frame, 1)
return Box(pos[0], pos[1],
pos[0] + self.size[0],
pos[1] + self.size[1], frame, 0)
def __len__(self):
return len(self.positions)
def groundtruth(self):
return list(self)
class Rectangle(Toy):
"""
Produces a rectangle as the toy.
"""
def draw(self, frame, canvas):
p = self.positions[frame]
Draw(canvas).rectangle((p[0], p[1], p[0] + self.size[0], p[1] + self.size[1]), fill = self.color)
class Ellipse(Toy):
"""
Produces an ellipsis as the toy.
"""
def draw(self, frame, canvas):
p = self.positions[frame]
Draw(canvas).ellipse((p[0], p[1], p[0] + self.size[0], p[1] + self.size[1]), fill = self.color)
class Bitmap(Toy):
"""
Draws a bitmap instead of any vector graph.
"""
def __init__(self, image, positions = (0,0)):
Toy.__init__(positions, size = image.size)
self.image = image
def draw(self, frame, canvas):
p = self.positions[frame]
canvas.paste(image, p)
def seed(s = 0):
"""
Allows changing the random seed so that the same path is generated repeatedly.
"""
random.seed(s)
| {
"content_hash": "9d81f7038a0272accd3804917dd65e70",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 125,
"avg_line_length": 31.060606060606062,
"alnum_prop": 0.5580487804878049,
"repo_name": "weiliu89/pyvision",
"id": "76b6f2bff938e9d34b8f6b131573cb01b24e34eb",
"size": "6150",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vision/toymaker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "60738"
},
{
"name": "C++",
"bytes": "46332"
},
{
"name": "Makefile",
"bytes": "3057"
},
{
"name": "Matlab",
"bytes": "1611"
},
{
"name": "Python",
"bytes": "159518"
}
],
"symlink_target": ""
} |
import os
import json
import codecs
from PIL import Image
from io import BytesIO
import numpy as np
class Hexagram(object):
"""
Generate and write a hexagram to PNG
Input is an iterable of six binary digits or booleans;
1 / True is a solid line,
0 / False is a broken line
Write to hexagram_output\hexagram.png by calling .dump(),
with an optional filename string argument
"""
def __init__(self, pattern, plength=6):
if len(pattern) != plength:
raise HexagramException("Pass an iterable of %s digits or booleans" % plength)
self.bar_height = 8
self.wbar_height = 4
# we always want to produce a square hexagram
self.bar_width = (self.bar_height * 6) + (self.wbar_height * 5)
self.generated = self.generate(pattern)
def _black_row(self):
""" an unbroken bar """
return np.vstack([
np.zeros((self.bar_height, self.bar_width)),
np.ones((self.wbar_height, self.bar_width))]
)
def _broken_row(self):
""" a broken bar """
return np.vstack([
np.hstack([
np.zeros((self.bar_height, (self.bar_width // 2) - self.bar_height)),
np.ones((self.bar_height, self.bar_height * 2)),
np.zeros((self.bar_height, (self.bar_width // 2) - self.bar_height))]),
np.ones((self.wbar_height, self.bar_width))]
)
def trim(self, raw_hexagram):
""" remove trailing white bar from bottom of hexagram / trigram """
raw_hexagram[-1] = raw_hexagram[-1][0:self.bar_height]
return raw_hexagram
def generate(self, pattern):
""" generate a scaled b&w hexagram """
container = []
# hexagrams are grown bottom to top
for row in pattern:
if row:
container.insert(0, self._black_row())
else:
container.insert(0, self._broken_row())
container = self.trim(container)
stacked = np.vstack(container)
# rescale to 256 x 8-bit (0 = black, 255 = white)
return (255.0 / stacked.max() * (stacked - stacked.min())).astype(np.uint8)
def dump(self, fname=False):
""" write hexagram to PNG """
_fname = (fname or self.__class__.__name__.lower())
im = Image.fromarray(self.generated)
outdir = '%s%s' % (self.__class__.__name__.lower(), '_output')
if not os.path.exists(outdir):
os.makedirs(outdir)
path = os.path.join(outdir, "%s%s" % (_fname, ".png"))
im.save(path)
def dump_json(self, fname=False):
""" tries to dump JSON representation to a file """
_fname = (fname or self.__class__.__name__.lower())
try:
with codecs.open("%s%s" % (_fname, ".json"), 'w', encoding="utf-8") as f:
f.write(
json.dumps(
self.generated.tolist(),
indent=4,
separators=(',', ':'),
sort_keys=True)
)
except IOError:
raise WriteException("Couldn't write json! You could also copy the .json property to your clipboard.")
def dump_image(self):
""" returns a hexagram as an in-memory file object """
img_io = BytesIO()
image = Image.fromarray(self.generated)
image.save(img_io, 'PNG', quality=100)
img_io.seek(0)
return img_io
class Trigram(Hexagram):
""" Same as hexagram, but with three bars """
def __init__(self, pattern):
super(self.__class__, self).__init__(pattern, plength=3)
class HexagramException(Exception):
""" tfw your hexagram can't be constructed bc it's too short """
pass
class WriteException(Exception):
""" like an IOError """
pass
| {
"content_hash": "63991729312a5353456952a84d838d2d",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 114,
"avg_line_length": 35.054545454545455,
"alnum_prop": 0.5567946058091287,
"repo_name": "urschrei/hexagrams",
"id": "6b6020e64b0370459a4c1974ff1ba0d6cc2b690e",
"size": "3968",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hexagram.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2569"
},
{
"name": "Jupyter Notebook",
"bytes": "1279"
},
{
"name": "Python",
"bytes": "14443"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('umibukela', '0029_auto_20170226_0745'),
]
operations = [
migrations.RenameField(
model_name='surveytype',
old_name='description',
new_name='short_description',
),
migrations.AddField(
model_name='surveytype',
name='full_description',
field=models.TextField(default='', help_text=b'This is a thorough description used to fully explain the purpose behind the surveys of this type.'),
preserve_default=False,
),
migrations.AddField(
model_name='surveytype',
name='public',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='cycleresultset',
name='cycle',
field=models.ForeignKey(related_name='cycle_result_sets', to='umibukela.Cycle'),
),
]
| {
"content_hash": "52b58e54f16a8c87eeec3361d64cbbcc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 159,
"avg_line_length": 31.029411764705884,
"alnum_prop": 0.5867298578199052,
"repo_name": "Code4SA/umibukela",
"id": "25238183ba590a97270fd473cda7f2b0a03e0e3b",
"size": "1079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "umibukela/migrations/0030_pub_priv_survey_types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "136084"
},
{
"name": "HTML",
"bytes": "148202"
},
{
"name": "JavaScript",
"bytes": "70122"
},
{
"name": "Python",
"bytes": "210522"
},
{
"name": "Shell",
"bytes": "511"
}
],
"symlink_target": ""
} |
class Database:
def __init__(self):
pass
def save_target(self, target_system_id, target_system_data):
""" saves the data of an OEDA target system with the provided id """
pass
def get_target(self, target_system_id):
""" returns the configuration of an OEDA target system """
pass
def get_targets(self):
""" returns all the target systems """
pass
def save_experiment(self, experiment_id, experiment_data):
""" saves the data of an OEDA experiment with the provided id """
pass
def get_experiment(self, experiment_id):
""" returns the configuration of an OEDA experiment """
pass
def get_experiments(self):
""" returns all OEDA experiments """
pass
def update_experiment_status(self, experiment_id, status):
""" updates experiment status with provided id """
pass
def update_target_system_status(self, target_system_id, status):
""" updates experiment status with provided id """
pass
def save_stage(self, stage_no, knobs, experiment_id):
""" saves stage of an OEDA experiment with provided configuration and stage no """
pass
def get_stages(self, experiment_id):
""" returns all stages of an OEDA experiment with provided id """
pass
def get_stages_after(self, experiment_id, timestamp):
""" returns all stages of an OEDA experiment that are created after the timestamp """
pass
def save_data_point(self, payload, data_point_count, experiment_id, stage_no):
""" saves data of the given stage """
pass
def get_data_points(self, experiment_id, stage_no):
""" returns data_points whose parent is the concatenated stage_id (see create_stage_id) """
pass
def get_data_points_after(self, experiment_id, stage_no, timestamp):
""" returns data_points that are created after the given timestamp. Data points' parents are the concatenated stage_id (see create_stage_id) """
pass
@staticmethod
def create_stage_id(experiment_id, stage_no):
return str(experiment_id) + "#" + str(stage_no)
@staticmethod
def create_data_point_id(experiment_id, stage_no, data_point_count):
return str(experiment_id) + "#" + str(stage_no) + "_" + str(data_point_count)
class TargetSystemNotFoundException(Exception):
pass
| {
"content_hash": "a8210b04ac9d1e47f44976c809360087",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 152,
"avg_line_length": 33.41095890410959,
"alnum_prop": 0.6396063960639606,
"repo_name": "Starofall/OEDA",
"id": "82a1e15039a3bbbf3e37eef3688b2881c59bb357",
"size": "2544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Backend/oeda/databases/Database.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "551"
},
{
"name": "CSS",
"bytes": "51907"
},
{
"name": "HTML",
"bytes": "94331"
},
{
"name": "JavaScript",
"bytes": "6406440"
},
{
"name": "Python",
"bytes": "118468"
},
{
"name": "Shell",
"bytes": "101"
},
{
"name": "TypeScript",
"bytes": "159455"
}
],
"symlink_target": ""
} |
"""Tests for linear.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.feature_column import feature_column
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow_estimator.python.estimator.canned.v1 import linear_testing_utils_v1
def _linear_regressor_fn(*args, **kwargs):
return linear.LinearRegressor(*args, **kwargs)
def _linear_classifier_fn(*args, **kwargs):
return linear.LinearClassifier(*args, **kwargs)
# Tests for Linear Regressor.
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorPartitionerTest(
linear_testing_utils_v1.BaseLinearRegressorPartitionerTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorPartitionerV2Test(
linear_testing_utils_v1.BaseLinearRegressorPartitionerTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorPartitionerTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorEvaluationTest(
linear_testing_utils_v1.BaseLinearRegressorEvaluationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorEvaluationV2Test(
linear_testing_utils_v1.BaseLinearRegressorEvaluationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorEvaluationTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorPredictTest(
linear_testing_utils_v1.BaseLinearRegressorPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorPredictV2Test(
linear_testing_utils_v1.BaseLinearRegressorPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorPredictTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorIntegrationTest(
linear_testing_utils_v1.BaseLinearRegressorIntegrationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorIntegrationV2Test(
linear_testing_utils_v1.BaseLinearRegressorIntegrationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorIntegrationTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorTrainingTest(
linear_testing_utils_v1.BaseLinearRegressorTrainingTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearRegressorTrainingV2Test(
linear_testing_utils_v1.BaseLinearRegressorTrainingTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearRegressorTrainingTest.__init__(
self, _linear_regressor_fn, fc_lib=feature_column_v2)
# Tests for Linear Classifier.
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierTrainingTest(
linear_testing_utils_v1.BaseLinearClassifierTrainingTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierTrainingTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierTrainingV2Test(
linear_testing_utils_v1.BaseLinearClassifierTrainingTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierTrainingTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierEvaluationTest(
linear_testing_utils_v1.BaseLinearClassifierEvaluationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierEvaluationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierEvaluationV2Test(
linear_testing_utils_v1.BaseLinearClassifierEvaluationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierEvaluationTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierPredictTest(
linear_testing_utils_v1.BaseLinearClassifierPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierPredictTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierPredictV2Test(
linear_testing_utils_v1.BaseLinearClassifierPredictTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierPredictTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierIntegrationTest(
linear_testing_utils_v1.BaseLinearClassifierIntegrationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierIntegrationTest.__init__(
self, linear_classifier_fn=_linear_classifier_fn, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearClassifierIntegrationV2Test(
linear_testing_utils_v1.BaseLinearClassifierIntegrationTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearClassifierIntegrationTest.__init__(
self,
linear_classifier_fn=_linear_classifier_fn,
fc_lib=feature_column_v2)
# Tests for Linear logit_fn.
@test_util.run_v1_only('Tests v1 only symbols')
class LinearLogitFnTest(linear_testing_utils_v1.BaseLinearLogitFnTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearLogitFnTest.__init__(
self, fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearLogitFnV2Test(linear_testing_utils_v1.BaseLinearLogitFnTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearLogitFnTest.__init__(
self, fc_lib=feature_column_v2)
# Tests for warm-starting with Linear logit_fn.
@test_util.run_v1_only('Tests v1 only symbols')
class LinearWarmStartingTest(linear_testing_utils_v1.BaseLinearWarmStartingTest,
tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearWarmStartingTest.__init__(
self,
_linear_classifier_fn,
_linear_regressor_fn,
fc_lib=feature_column)
@test_util.run_v1_only('Tests v1 only symbols')
class LinearWarmStartingV2Test(
linear_testing_utils_v1.BaseLinearWarmStartingTest, tf.test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
tf.test.TestCase.__init__(self, methodName)
linear_testing_utils_v1.BaseLinearWarmStartingTest.__init__(
self,
_linear_classifier_fn,
_linear_regressor_fn,
fc_lib=feature_column_v2)
@test_util.run_v1_only('Tests v1 only symbols')
class ComputeFractionOfZeroTest(tf.test.TestCase):
def _assertSparsity(self, expected_sparsity, tensor):
sparsity = linear._compute_fraction_of_zero([tensor])
with self.test_session() as sess:
self.assertAllClose(expected_sparsity, sess.run(sparsity))
def test_small_float32(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.float32))
self._assertSparsity(
0.5, ops.convert_to_tensor([0, 1, 0, 1], dtype=tf.dtypes.float32))
def test_small_int32(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.int32))
def test_small_float64(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.float64))
def test_small_int64(self):
self._assertSparsity(
0.75, ops.convert_to_tensor([0, 0, 0, 1], dtype=tf.dtypes.int64))
def test_nested(self):
self._assertSparsity(
0.75, [ops.convert_to_tensor([0, 0]),
ops.convert_to_tensor([0, 1])])
def test_none(self):
with self.assertRaises(ValueError):
linear._compute_fraction_of_zero([])
def test_empty(self):
sparsity = linear._compute_fraction_of_zero([ops.convert_to_tensor([])])
with self.test_session() as sess:
sparsity_np = sess.run(sparsity)
self.assertTrue(
np.isnan(sparsity_np), 'Expected sparsity=nan, got %s' % sparsity_np)
def test_multiple_empty(self):
sparsity = linear._compute_fraction_of_zero([
ops.convert_to_tensor([]),
ops.convert_to_tensor([]),
])
with self.test_session() as sess:
sparsity_np = sess.run(sparsity)
self.assertTrue(
np.isnan(sparsity_np), 'Expected sparsity=nan, got %s' % sparsity_np)
def test_some_empty(self):
with self.test_session():
self._assertSparsity(0.5, [
ops.convert_to_tensor([]),
ops.convert_to_tensor([0.]),
ops.convert_to_tensor([1.]),
])
def test_mixed_types(self):
with self.test_session():
self._assertSparsity(0.6, [
ops.convert_to_tensor([0, 0, 1, 1, 1], dtype=tf.dtypes.float32),
ops.convert_to_tensor([0, 0, 0, 0, 1], dtype=tf.dtypes.int32),
])
def test_2_27_zeros__using_512_MiB_of_ram(self):
self._assertSparsity(1., tf.zeros([int(2**27 * 1.01)],
dtype=tf.dtypes.int8))
def test_2_27_ones__using_512_MiB_of_ram(self):
self._assertSparsity(0., tf.ones([int(2**27 * 1.01)], dtype=tf.dtypes.int8))
if __name__ == '__main__':
tf.test.main()
| {
"content_hash": "e23ce7e87480730a97540acdf29d3e94",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 83,
"avg_line_length": 37.578347578347575,
"alnum_prop": 0.7078089461713419,
"repo_name": "tensorflow/estimator",
"id": "4e8184a54f4f821f0372c0898618761917ffc80f",
"size": "13879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_estimator/python/estimator/canned/v1/linear_test_v1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "11293"
},
{
"name": "Python",
"bytes": "3919795"
},
{
"name": "Shell",
"bytes": "4038"
},
{
"name": "Starlark",
"bytes": "86773"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import errno
import warnings
import hmac
from binascii import hexlify, unhexlify
from hashlib import md5, sha1, sha256
from ..exceptions import SSLError, InsecurePlatformWarning, SNIMissingWarning
SSLContext = None
HAS_SNI = False
IS_PYOPENSSL = False
# Maps the length of a digest to a possible hash function producing this digest
HASHFUNC_MAP = {
32: md5,
40: sha1,
64: sha256,
}
def _const_compare_digest_backport(a, b):
"""
Compare two digests of equal length in constant time.
The digests must be of type str/bytes.
Returns True if the digests match, and False otherwise.
"""
result = abs(len(a) - len(b))
for l, r in zip(bytearray(a), bytearray(b)):
result |= l ^ r
return result == 0
_const_compare_digest = getattr(hmac, 'compare_digest',
_const_compare_digest_backport)
try: # Test for SSL features
import ssl
from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23
from ssl import HAS_SNI # Has SNI?
except ImportError:
pass
try:
from ssl import OP_NO_SSLv2, OP_NO_SSLv3, OP_NO_COMPRESSION
except ImportError:
OP_NO_SSLv2, OP_NO_SSLv3 = 0x1000000, 0x2000000
OP_NO_COMPRESSION = 0x20000
# A secure default.
# Sources for more information on TLS ciphers:
#
# - https://wiki.mozilla.org/Security/Server_Side_TLS
# - https://www.ssllabs.com/projects/best-practices/index.html
# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/
#
# The general intent is:
# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE),
# - prefer ECDHE over DHE for better performance,
# - prefer any AES-GCM and ChaCha20 over any AES-CBC for better performance and
# security,
# - prefer AES-GCM over ChaCha20 because hardware-accelerated AES is common,
# - disable NULL authentication, MD5 MACs and DSS for security reasons.
DEFAULT_CIPHERS = ':'.join([
'ECDH+AESGCM',
'ECDH+CHACHA20',
'DH+AESGCM',
'DH+CHACHA20',
'ECDH+AES256',
'DH+AES256',
'ECDH+AES128',
'DH+AES',
'RSA+AESGCM',
'RSA+AES',
'!aNULL',
'!eNULL',
'!MD5',
])
try:
from ssl import SSLContext # Modern SSL?
except ImportError:
import sys
class SSLContext(object): # Platform-specific: Python 2 & 3.1
supports_set_ciphers = ((2, 7) <= sys.version_info < (3,) or
(3, 2) <= sys.version_info)
def __init__(self, protocol_version):
self.protocol = protocol_version
# Use default values from a real SSLContext
self.check_hostname = False
self.verify_mode = ssl.CERT_NONE
self.ca_certs = None
self.options = 0
self.certfile = None
self.keyfile = None
self.ciphers = None
def load_cert_chain(self, certfile, keyfile):
self.certfile = certfile
self.keyfile = keyfile
def load_verify_locations(self, cafile=None, capath=None):
self.ca_certs = cafile
if capath is not None:
raise SSLError("CA directories not supported in older Pythons")
def set_ciphers(self, cipher_suite):
if not self.supports_set_ciphers:
raise TypeError(
'Your version of Python does not support setting '
'a custom cipher suite. Please upgrade to Python '
'2.7, 3.2, or later if you need this functionality.'
)
self.ciphers = cipher_suite
def wrap_socket(self, socket, server_hostname=None, server_side=False):
warnings.warn(
'A true SSLContext object is not available. This prevents '
'urllib3 from configuring SSL appropriately and may cause '
'certain SSL connections to fail. You can upgrade to a newer '
'version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
InsecurePlatformWarning
)
kwargs = {
'keyfile': self.keyfile,
'certfile': self.certfile,
'ca_certs': self.ca_certs,
'cert_reqs': self.verify_mode,
'ssl_version': self.protocol,
'server_side': server_side,
}
if self.supports_set_ciphers: # Platform-specific: Python 2.7+
return wrap_socket(socket, ciphers=self.ciphers, **kwargs)
else: # Platform-specific: Python 2.6
return wrap_socket(socket, **kwargs)
def assert_fingerprint(cert, fingerprint):
"""
Checks if given fingerprint matches the supplied certificate.
:param cert:
Certificate as bytes object.
:param fingerprint:
Fingerprint as string of hexdigits, can be interspersed by colons.
"""
fingerprint = fingerprint.replace(':', '').lower()
digest_length = len(fingerprint)
hashfunc = HASHFUNC_MAP.get(digest_length)
if not hashfunc:
raise SSLError(
'Fingerprint of invalid length: {0}'.format(fingerprint))
# We need encode() here for py32; works on py2 and p33.
fingerprint_bytes = unhexlify(fingerprint.encode())
cert_digest = hashfunc(cert).digest()
if not _const_compare_digest(cert_digest, fingerprint_bytes):
raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".'
.format(fingerprint, hexlify(cert_digest)))
def resolve_cert_reqs(candidate):
"""
Resolves the argument to a numeric constant, which can be passed to
the wrap_socket function/method from the ssl module.
Defaults to :data:`ssl.CERT_NONE`.
If given a string it is assumed to be the name of the constant in the
:mod:`ssl` module or its abbrevation.
(So you can specify `REQUIRED` instead of `CERT_REQUIRED`.
If it's neither `None` nor a string we assume it is already the numeric
constant which can directly be passed to wrap_socket.
"""
if candidate is None:
return CERT_NONE
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'CERT_' + candidate)
return res
return candidate
def resolve_ssl_version(candidate):
"""
like resolve_cert_reqs
"""
if candidate is None:
return PROTOCOL_SSLv23
if isinstance(candidate, str):
res = getattr(ssl, candidate, None)
if res is None:
res = getattr(ssl, 'PROTOCOL_' + candidate)
return res
return candidate
def create_urllib3_context(ssl_version=None, cert_reqs=None,
options=None, ciphers=None):
"""All arguments have the same meaning as ``ssl_wrap_socket``.
By default, this function does a lot of the same work that
``ssl.create_default_context`` does on Python 3.4+. It:
- Disables SSLv2, SSLv3, and compression
- Sets a restricted set of server ciphers
If you wish to enable SSLv3, you can do::
from urllib3.util import ssl_
context = ssl_.create_urllib3_context()
context.options &= ~ssl_.OP_NO_SSLv3
You can do the same to enable compression (substituting ``COMPRESSION``
for ``SSLv3`` in the last line above).
:param ssl_version:
The desired protocol version to use. This will default to
PROTOCOL_SSLv23 which will negotiate the highest protocol that both
the server and your installation of OpenSSL support.
:param cert_reqs:
Whether to require the certificate verification. This defaults to
``ssl.CERT_REQUIRED``.
:param options:
Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,
``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``.
:param ciphers:
Which cipher suites to allow the server to select.
:returns:
Constructed SSLContext object with specified options
:rtype: SSLContext
"""
context = SSLContext(ssl_version or ssl.PROTOCOL_SSLv23)
# Setting the default here, as we may have no ssl module on import
cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs
if options is None:
options = 0
# SSLv2 is easily broken and is considered harmful and dangerous
options |= OP_NO_SSLv2
# SSLv3 has several problems and is now dangerous
options |= OP_NO_SSLv3
# Disable compression to prevent CRIME attacks for OpenSSL 1.0+
# (issue #309)
options |= OP_NO_COMPRESSION
context.options |= options
if getattr(context, 'supports_set_ciphers', True): # Platform-specific: Python 2.6
context.set_ciphers(ciphers or DEFAULT_CIPHERS)
context.verify_mode = cert_reqs
if getattr(context, 'check_hostname', None) is not None: # Platform-specific: Python 3.2
# We do our own verification, including fingerprints and alternative
# hostnames. So disable it here
context.check_hostname = False
return context
def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None,
ca_certs=None, server_hostname=None,
ssl_version=None, ciphers=None, ssl_context=None,
ca_cert_dir=None):
"""
All arguments except for server_hostname, ssl_context, and ca_cert_dir have
the same meaning as they do when using :func:`ssl.wrap_socket`.
:param server_hostname:
When SNI is supported, the expected hostname of the certificate
:param ssl_context:
A pre-made :class:`SSLContext` object. If none is provided, one will
be created using :func:`create_urllib3_context`.
:param ciphers:
A string of ciphers we wish the client to support. This is not
supported on Python 2.6 as the ssl module does not support it.
:param ca_cert_dir:
A directory containing CA certificates in multiple separate files, as
supported by OpenSSL's -CApath flag or the capath argument to
SSLContext.load_verify_locations().
"""
context = ssl_context
if context is None:
# Note: This branch of code and all the variables in it are no longer
# used by urllib3 itself. We should consider deprecating and removing
# this code.
context = create_urllib3_context(ssl_version, cert_reqs,
ciphers=ciphers)
if ca_certs or ca_cert_dir:
print("|%s|" % ca_certs, "|%s|" % ca_cert_dir)
try:
context.load_verify_locations(ca_certs, ca_cert_dir)
except IOError as e: # Platform-specific: Python 2.6, 2.7, 3.2
raise SSLError(e)
# Py33 raises FileNotFoundError which subclasses OSError
# These are not equivalent unless we check the errno attribute
except OSError as e: # Platform-specific: Python 3.3 and beyond
if e.errno == errno.ENOENT:
raise SSLError(e)
raise
elif getattr(context, 'load_default_certs', None) is not None:
# try to load OS default certs; works well on Windows (require Python3.4+)
context.load_default_certs()
if certfile:
context.load_cert_chain(certfile, keyfile)
if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI
return context.wrap_socket(sock, server_hostname=server_hostname)
warnings.warn(
'An HTTPS request has been made, but the SNI (Subject Name '
'Indication) extension to TLS is not available on this platform. '
'This may cause the server to present an incorrect TLS '
'certificate, which can cause validation failures. You can upgrade to '
'a newer version of Python to solve this. For more information, see '
'https://urllib3.readthedocs.io/en/latest/advanced-usage.html'
'#ssl-warnings',
SNIMissingWarning
)
return context.wrap_socket(sock)
| {
"content_hash": "322e96a500830f47c73ae5b0f87e2976",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 93,
"avg_line_length": 35.9080118694362,
"alnum_prop": 0.6346582927030824,
"repo_name": "typemytype/Mechanic",
"id": "9bad178573218fd21cf8619198f103e2f034d8cc",
"size": "12101",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Mechanic.roboFontExt/lib/site-packages/requests/packages/urllib3/util/ssl_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1018"
},
{
"name": "Python",
"bytes": "2329076"
},
{
"name": "Ruby",
"bytes": "3412"
}
],
"symlink_target": ""
} |
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("advicer", "0012_auto_20151124_1541")]
operations = [
migrations.AlterField(
model_name="advice",
name="grant_on",
field=models.DateTimeField(
default=django.utils.timezone.now, verbose_name="Grant on"
),
)
]
| {
"content_hash": "1e41bf553e5d9506078237b3e059dd5a",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 74,
"avg_line_length": 25.352941176470587,
"alnum_prop": 0.5986078886310905,
"repo_name": "rwakulszowa/poradnia",
"id": "dca31f96eb8ab689848a42299485bdb76097b8ba",
"size": "431",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "poradnia/advicer/migrations/0013_auto_20151218_0035.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "261351"
},
{
"name": "HTML",
"bytes": "154406"
},
{
"name": "JavaScript",
"bytes": "1083760"
},
{
"name": "Makefile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "481049"
},
{
"name": "Shell",
"bytes": "320"
}
],
"symlink_target": ""
} |
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section, print_h2
from rx.internal import extensionmethod
from rx import Observable, AnonymousObservable, Observer
from rx.subjects import Subject
from logging.handlers import RotatingFileHandler
import logging
import string
import time
DEBUG = True if __name__ == '__main__' else False
@extensionmethod(Subject)
def fake_extensionmethod(self):
print('[custom-extensionmethod] Observer: {}'.format(self))
source = self
def subscribe(observer):
def on_next(x):
pass
return source.subscribe(
on_next, observer.on_error, observer.on_completed)
return AnonymousObservable(subscribe)
class TestObserver(Observer):
def on_next(self, x):
time.sleep(1)
class TestSubject(Subject):
def on_next(self, x):
print('[TEST subject] got: {}'.format(x))
super(TestSubject, self).on_next(x)
class MyObserver(Observer):
def on_next(self, x):
print('Got: {}'.format(x))
def on_error(self, x):
print('Got error: {}'.format(x))
def on_completed(self):
print('Completed')
class LogWriterObserver(Observer):
def __init__(self, *args, **kwargs):
super(LogWriterObserver, self).__init__(*args, **kwargs)
self.logger = logging.getLogger('rxpy_logs')
self.logger.addHandler(RotatingFileHandler('rxpy_logs'))
self.logger.setLevel(logging.INFO)
def on_next(self, x):
self.logger.info('Got item: {}'.format(x))
if x is None:
raise Exception
def on_error(self, x):
self.logger.error('Got Error: {}'.format(x))
def on_completed(self):
self.logger.info('Completed -------------------------------------')
def _fmt(*args):
return ''.join(map(str, args))
def _print(*args):
print('got: {}'.format(_fmt(*args)))
def print_operator(x):
print('[operator]: {}'.format(x))
def print_len3(x):
print('`{}` is at least 3 characters long.'.format(x))
if DEBUG:
with Section('Reactive (sort of functional) programming via RxPY'):
print_h2('Basic Observable')
xs = Observable.from_iterable(range(10) + [None])
d = xs.subscribe(MyObserver())
observable_iterable = Observable.from_iterable(xrange(100))
logwriter = observable_iterable.subscribe(LogWriterObserver())
print(logwriter)
print_h2('Observable from_')
xs = Observable.from_(range(10))
gobbledygook = Observable.from_(list(string.punctuation))
letters = Observable.from_(list(string.ascii_uppercase))
merged = xs.merge(letters, gobbledygook).subscribe(_print)
print_h2('Subjects')
stream = TestSubject()
stream.on_next(1)
stream.on_next(2)
d = stream.subscribe(_print)
map(stream.on_next, range(5))
stream.on_next(3)
map(stream.on_next, range(5))
stream.on_next(4)
d.dispose()
# Subclassed version prints, but the subscription object `d` does not
stream.on_next(5)
print_h2('Subjects + Operators')
stream = Subject()
stream.zip([1, 2, 3], ['a', 'b', 'c']).timestamp().scan('__scanned')
stream.on_next(1)
strm = stream.subscribe(print_operator)
stream.on_next(2)
strm.dispose()
# Does not print, as subscriber with print function was disposed.
stream.on_next(3)
strm2 = stream.subscribe(print_operator)
map(stream.on_next, range(10, 20))
allstream = stream.filter(lambda value: len(value) > 3)
allstream.subscribe(print_len3)
stream.on_next('f')
stream.on_next('ff')
stream.on_next('fff')
# Triggered
stream.on_next('true')
stream.on_next('fff')
stream.on_next('ff')
stream.on_next('f')
# Triggered
stream.on_next('truetrue')
| {
"content_hash": "4b53c72782db9a9ef9b64702911356b3",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 77,
"avg_line_length": 27.863013698630137,
"alnum_prop": 0.6081612586037365,
"repo_name": "christabor/MoAL",
"id": "84cd8539024319dd85c03b8ca48563007bee1bb1",
"size": "4093",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MOAL/languages/paradigms/functional/rxpy_examples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1102"
},
{
"name": "Clojure",
"bytes": "1089"
},
{
"name": "Gherkin",
"bytes": "793"
},
{
"name": "HTML",
"bytes": "3579"
},
{
"name": "JavaScript",
"bytes": "1647"
},
{
"name": "Makefile",
"bytes": "1436"
},
{
"name": "PLSQL",
"bytes": "415"
},
{
"name": "Python",
"bytes": "692840"
},
{
"name": "Shell",
"bytes": "4420"
},
{
"name": "TSQL",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
import os
import re
from typing import Dict, Mapping, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.oauth2 import service_account # type: ignore
import pkg_resources
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.retail_v2alpha.types import completion_service, import_config
from .transports.base import DEFAULT_CLIENT_INFO, CompletionServiceTransport
from .transports.grpc import CompletionServiceGrpcTransport
from .transports.grpc_asyncio import CompletionServiceGrpcAsyncIOTransport
class CompletionServiceClientMeta(type):
"""Metaclass for the CompletionService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[CompletionServiceTransport]]
_transport_registry["grpc"] = CompletionServiceGrpcTransport
_transport_registry["grpc_asyncio"] = CompletionServiceGrpcAsyncIOTransport
def get_transport_class(
cls,
label: str = None,
) -> Type[CompletionServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class CompletionServiceClient(metaclass=CompletionServiceClientMeta):
"""Auto-completion service for retail.
This feature is only available for users who have Retail Search
enabled. Please enable Retail Search on Cloud Console before
using this feature.
"""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "retail.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
CompletionServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> CompletionServiceTransport:
"""Returns the transport used by the client instance.
Returns:
CompletionServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def catalog_path(
project: str,
location: str,
catalog: str,
) -> str:
"""Returns a fully-qualified catalog string."""
return "projects/{project}/locations/{location}/catalogs/{catalog}".format(
project=project,
location=location,
catalog=catalog,
)
@staticmethod
def parse_catalog_path(path: str) -> Dict[str, str]:
"""Parses a catalog path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/catalogs/(?P<catalog>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(
billing_account: str,
) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(
folder: str,
) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(
folder=folder,
)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(
organization: str,
) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(
organization=organization,
)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(
project: str,
) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(
project=project,
)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(
project: str,
location: str,
) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
@classmethod
def get_mtls_endpoint_and_cert_source(
cls, client_options: Optional[client_options_lib.ClientOptions] = None
):
"""Return the API endpoint and client cert source for mutual TLS.
The client cert source is determined in the following order:
(1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the
client cert source is None.
(2) if `client_options.client_cert_source` is provided, use the provided one; if the
default client cert source exists, use the default one; otherwise the client cert
source is None.
The API endpoint is determined in the following order:
(1) if `client_options.api_endpoint` if provided, use the provided one.
(2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the
default mTLS endpoint; if the environment variabel is "never", use the default API
endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise
use the default API endpoint.
More details can be found at https://google.aip.dev/auth/4114.
Args:
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. Only the `api_endpoint` and `client_cert_source` properties may be used
in this method.
Returns:
Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the
client cert source to use.
Raises:
google.auth.exceptions.MutualTLSChannelError: If any errors happen.
"""
if client_options is None:
client_options = client_options_lib.ClientOptions()
use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false")
use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_client_cert not in ("true", "false"):
raise ValueError(
"Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`"
)
if use_mtls_endpoint not in ("auto", "never", "always"):
raise MutualTLSChannelError(
"Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`"
)
# Figure out the client cert source to use.
client_cert_source = None
if use_client_cert == "true":
if client_options.client_cert_source:
client_cert_source = client_options.client_cert_source
elif mtls.has_default_client_cert_source():
client_cert_source = mtls.default_client_cert_source()
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
elif use_mtls_endpoint == "always" or (
use_mtls_endpoint == "auto" and client_cert_source
):
api_endpoint = cls.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = cls.DEFAULT_ENDPOINT
return api_endpoint, client_cert_source
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, CompletionServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the completion service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, CompletionServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(
client_options
)
api_key_value = getattr(client_options, "api_key", None)
if api_key_value and credentials:
raise ValueError(
"client_options.api_key and credentials are mutually exclusive"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, CompletionServiceTransport):
# transport is a CompletionServiceTransport instance.
if credentials or client_options.credentials_file or api_key_value:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
import google.auth._default # type: ignore
if api_key_value and hasattr(
google.auth._default, "get_api_key_credentials"
):
credentials = google.auth._default.get_api_key_credentials(
api_key_value
)
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
always_use_jwt_access=True,
api_audience=client_options.api_audience,
)
def complete_query(
self,
request: Union[completion_service.CompleteQueryRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> completion_service.CompleteQueryResponse:
r"""Completes the specified prefix with keyword
suggestions.
This feature is only available for users who have Retail
Search enabled. Please enable Retail Search on Cloud
Console before using this feature.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import retail_v2alpha
def sample_complete_query():
# Create a client
client = retail_v2alpha.CompletionServiceClient()
# Initialize request argument(s)
request = retail_v2alpha.CompleteQueryRequest(
catalog="catalog_value",
query="query_value",
)
# Make the request
response = client.complete_query(request=request)
# Handle the response
print(response)
Args:
request (Union[google.cloud.retail_v2alpha.types.CompleteQueryRequest, dict]):
The request object. Auto-complete parameters.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.retail_v2alpha.types.CompleteQueryResponse:
Response of the auto-complete query.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a completion_service.CompleteQueryRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, completion_service.CompleteQueryRequest):
request = completion_service.CompleteQueryRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.complete_query]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("catalog", request.catalog),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
def import_completion_data(
self,
request: Union[import_config.ImportCompletionDataRequest, dict] = None,
*,
retry: OptionalRetry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Bulk import of processed completion dataset.
Request processing is asynchronous. Partial updating is
not supported.
The operation is successfully finished only after the
imported suggestions are indexed successfully and ready
for serving. The process takes hours.
This feature is only available for users who have Retail
Search enabled. Please enable Retail Search on Cloud
Console before using this feature.
.. code-block:: python
# This snippet has been automatically generated and should be regarded as a
# code template only.
# It will require modifications to work:
# - It may require correct/in-range values for request initialization.
# - It may require specifying regional endpoints when creating the service
# client as shown in:
# https://googleapis.dev/python/google-api-core/latest/client_options.html
from google.cloud import retail_v2alpha
def sample_import_completion_data():
# Create a client
client = retail_v2alpha.CompletionServiceClient()
# Initialize request argument(s)
input_config = retail_v2alpha.CompletionDataInputConfig()
input_config.big_query_source.dataset_id = "dataset_id_value"
input_config.big_query_source.table_id = "table_id_value"
request = retail_v2alpha.ImportCompletionDataRequest(
parent="parent_value",
input_config=input_config,
)
# Make the request
operation = client.import_completion_data(request=request)
print("Waiting for operation to complete...")
response = operation.result()
# Handle the response
print(response)
Args:
request (Union[google.cloud.retail_v2alpha.types.ImportCompletionDataRequest, dict]):
The request object. Request message for
ImportCompletionData methods.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.cloud.retail_v2alpha.types.ImportCompletionDataResponse` Response of the
[ImportCompletionDataRequest][google.cloud.retail.v2alpha.ImportCompletionDataRequest].
If the long running operation is done, this message
is returned by the
google.longrunning.Operations.response field if the
operation is successful.
"""
# Create or coerce a protobuf request object.
# Minor optimization to avoid making a copy if the user passes
# in a import_config.ImportCompletionDataRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, import_config.ImportCompletionDataRequest):
request = import_config.ImportCompletionDataRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.import_completion_data]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
import_config.ImportCompletionDataResponse,
metadata_type=import_config.ImportMetadata,
)
# Done; return the response.
return response
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-retail",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("CompletionServiceClient",)
| {
"content_hash": "160f997da18dc89ef80f9357d36addf0",
"timestamp": "",
"source": "github",
"line_count": 641,
"max_line_length": 145,
"avg_line_length": 40.58346333853354,
"alnum_prop": 0.6227416006765588,
"repo_name": "googleapis/python-retail",
"id": "cfed14db070e904b87a7374cc6a888537f22c1a7",
"size": "26614",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/retail_v2alpha/services/completion_service/client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "7420556"
},
{
"name": "Shell",
"bytes": "30660"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import MixinABC, _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
subscription_id: str,
resource_group_name: str,
vault_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-04-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/privateLinkResources") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
def build_get_request(
subscription_id: str,
resource_group_name: str,
vault_name: str,
private_link_resource_name: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-04-01")) # type: str
accept = _headers.pop('Accept', "application/json")
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/privateLinkResources/{privateLinkResourceName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"privateLinkResourceName": _SERIALIZER.url("private_link_resource_name", private_link_resource_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_headers['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_params,
headers=_headers,
**kwargs
)
class PrivateLinkResourcesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservices.RecoveryServicesClient`'s
:attr:`private_link_resources` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(
self,
resource_group_name: str,
vault_name: str,
**kwargs: Any
) -> Iterable[_models.PrivateLinkResources]:
"""Returns the list of private link resources that need to be created for Backup and SiteRecovery.
Returns the list of private link resources that need to be created for Backup and SiteRecovery.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkResources or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.recoveryservices.models.PrivateLinkResources]
:raises: ~azure.core.exceptions.HttpResponseError
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-04-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.PrivateLinkResources]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
vault_name=vault_name,
api_version=api_version,
template_url=self.list.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
vault_name=vault_name,
api_version=api_version,
template_url=next_link,
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateLinkResources", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/privateLinkResources"} # type: ignore
@distributed_trace
def get(
self,
resource_group_name: str,
vault_name: str,
private_link_resource_name: str,
**kwargs: Any
) -> _models.PrivateLinkResource:
"""Returns a specified private link resource that need to be created for Backup and SiteRecovery.
Returns a specified private link resource that need to be created for Backup and SiteRecovery.
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param private_link_resource_name:
:type private_link_resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservices.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2022-04-01")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.PrivateLinkResource]
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
vault_name=vault_name,
private_link_resource_name=private_link_resource_name,
api_version=api_version,
template_url=self.get.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/privateLinkResources/{privateLinkResourceName}"} # type: ignore
| {
"content_hash": "7aa0e17c4b5626fc6643f3508409c3b0",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 243,
"avg_line_length": 41.63537906137184,
"alnum_prop": 0.6394693488251105,
"repo_name": "Azure/azure-sdk-for-python",
"id": "70e06d467b99b8817d121953429bca1e396cff45",
"size": "12033",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/recoveryservices/azure-mgmt-recoveryservices/azure/mgmt/recoveryservices/operations/_private_link_resources_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
from scipy.sparse.linalg import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). Contrary to PCA, this
estimator does not center the data before computing the singular value
decomposition. This means it can work with scipy.sparse matrices
efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithms: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional (default 5)
Number of iterations for randomized SVD solver. Not used by ARPACK.
The default is larger than the default in `randomized_svd` to handle
sparse matrices that may have large slowly decaying spectrum.
random_state : int, RandomState instance or None, optional, default = None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ : array, shape (n_components,)
The variance of the training samples transformed by a projection to
each component.
explained_variance_ratio_ : array, shape (n_components,)
Percentage of variance explained by each of the selected components.
singular_values_ : array, shape (n_components,)
The singular values corresponding to each of the selected components.
The singular values are equal to the 2-norms of the ``n_components``
variables in the lower-dimensional space.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, n_iter=7, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=7,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[0.0606... 0.0584... 0.0497... 0.0434... 0.0372...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.249...
>>> print(svd.singular_values_) # doctest: +ELLIPSIS
[2.5841... 2.5245... 2.3201... 2.1753... 2.0443...]
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminacy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse=['csr', 'csc'])
random_state = check_random_state(self.random_state)
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = U * Sigma
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
self.singular_values_ = Sigma # Store the singular values.
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| {
"content_hash": "d133502e8b6b93e368c2f3992afed622",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 78,
"avg_line_length": 36.419213973799124,
"alnum_prop": 0.6173860911270983,
"repo_name": "BiaDarkia/scikit-learn",
"id": "049c165baea200b131eafe9ec6abaa17e0cc9adf",
"size": "8340",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/decomposition/truncated_svd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6330849"
},
{
"name": "Shell",
"bytes": "6748"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
#
# Appcelerator Titanium Module Packager
#
#
import os, subprocess, sys, glob, string
import zipfile
from datetime import date
cwd = os.path.abspath(os.path.dirname(sys._getframe(0).f_code.co_filename))
os.chdir(cwd)
required_module_keys = ['name','version','moduleid','description','copyright','license','copyright','platform','minsdk']
module_defaults = {
'description':'My module',
'author': 'Your Name',
'license' : 'Specify your license',
'copyright' : 'Copyright (c) %s by Your Company' % str(date.today().year),
}
module_license_default = "TODO: place your license here and we'll include it in the module distribution"
def find_sdk(config):
sdk = config['TITANIUM_SDK']
return os.path.expandvars(os.path.expanduser(sdk))
def replace_vars(config,token):
idx = token.find('$(')
while idx != -1:
idx2 = token.find(')',idx+2)
if idx2 == -1: break
key = token[idx+2:idx2]
if not config.has_key(key): break
token = token.replace('$(%s)' % key, config[key])
idx = token.find('$(')
return token
def read_ti_xcconfig():
contents = open(os.path.join(cwd,'titanium.xcconfig')).read()
config = {}
for line in contents.splitlines(False):
line = line.strip()
if line[0:2]=='//': continue
idx = line.find('=')
if idx > 0:
key = line[0:idx].strip()
value = line[idx+1:].strip()
config[key] = replace_vars(config,value)
return config
def generate_doc(config):
docdir = os.path.join(cwd,'documentation')
if not os.path.exists(docdir):
print "Couldn't find documentation file at: %s" % docdir
return None
try:
import markdown2 as markdown
except ImportError:
import markdown
documentation = []
for file in os.listdir(docdir):
if file in ignoreFiles or os.path.isdir(os.path.join(docdir, file)):
continue
md = open(os.path.join(docdir,file)).read()
html = markdown.markdown(md)
documentation.append({file:html});
return documentation
def compile_js(manifest,config):
js_file = os.path.join(cwd,'assets','com.dezinezync.ticolorart.js')
if not os.path.exists(js_file): return
from compiler import Compiler
try:
import json
except:
import simplejson as json
compiler = Compiler(cwd, manifest['moduleid'], manifest['name'], 'commonjs')
root_asset, module_assets = compiler.compile_module()
root_asset_content = """
%s
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[0]);
""" % root_asset
module_asset_content = """
%s
NSNumber *index = [map objectForKey:path];
if (index == nil) {
return nil;
}
return filterDataInRange([NSData dataWithBytesNoCopy:data length:sizeof(data) freeWhenDone:NO], ranges[index.integerValue]);
""" % module_assets
from tools import splice_code
assets_router = os.path.join(cwd,'Classes','ComDezinezyncTicolorartModuleAssets.m')
splice_code(assets_router, 'asset', root_asset_content)
splice_code(assets_router, 'resolve_asset', module_asset_content)
# Generate the exports after crawling all of the available JS source
exports = open('metadata.json','w')
json.dump({'exports':compiler.exports }, exports)
exports.close()
def die(msg):
print msg
sys.exit(1)
def warn(msg):
print "[WARN] %s" % msg
def validate_license():
c = open(os.path.join(cwd,'LICENSE')).read()
if c.find(module_license_default)!=-1:
warn('please update the LICENSE file with your license text before distributing')
def validate_manifest():
path = os.path.join(cwd,'manifest')
f = open(path)
if not os.path.exists(path): die("missing %s" % path)
manifest = {}
for line in f.readlines():
line = line.strip()
if line[0:1]=='#': continue
if line.find(':') < 0: continue
key,value = line.split(':')
manifest[key.strip()]=value.strip()
for key in required_module_keys:
if not manifest.has_key(key): die("missing required manifest key '%s'" % key)
if module_defaults.has_key(key):
defvalue = module_defaults[key]
curvalue = manifest[key]
if curvalue==defvalue: warn("please update the manifest key: '%s' to a non-default value" % key)
return manifest,path
ignoreFiles = ['.DS_Store','.gitignore','libTitanium.a','titanium.jar','README']
ignoreDirs = ['.DS_Store','.svn','.git','CVSROOT']
def zip_dir(zf,dir,basepath,ignore=[]):
for root, dirs, files in os.walk(dir):
for name in ignoreDirs:
if name in dirs:
dirs.remove(name) # don't visit ignored directories
for file in files:
if file in ignoreFiles: continue
e = os.path.splitext(file)
if len(e) == 2 and e[1] == '.pyc': continue
if len(e) == 2 and e[1] == '.js': continue
from_ = os.path.join(root, file)
to_ = from_.replace(dir, basepath, 1)
zf.write(from_, to_)
def glob_libfiles():
files = []
for libfile in glob.glob('build/**/*.a'):
if libfile.find('Release-')!=-1:
files.append(libfile)
return files
def build_module(manifest,config):
from tools import ensure_dev_path
ensure_dev_path()
rc = os.system("xcodebuild -sdk iphoneos -configuration Release")
if rc != 0:
die("xcodebuild failed")
rc = os.system("xcodebuild -sdk iphonesimulator -configuration Release")
if rc != 0:
die("xcodebuild failed")
# build the merged library using lipo
moduleid = manifest['moduleid']
libpaths = ''
for libfile in glob_libfiles():
libpaths+='%s ' % libfile
os.system("lipo %s -create -output build/lib%s.a" %(libpaths,moduleid))
def package_module(manifest,mf,config):
name = manifest['name'].lower()
moduleid = manifest['moduleid'].lower()
version = manifest['version']
modulezip = '%s-iphone-%s.zip' % (moduleid,version)
if os.path.exists(modulezip): os.remove(modulezip)
zf = zipfile.ZipFile(modulezip, 'w', zipfile.ZIP_DEFLATED)
modulepath = 'modules/iphone/%s/%s' % (moduleid,version)
zf.write(mf,'%s/manifest' % modulepath)
libname = 'lib%s.a' % moduleid
zf.write('build/%s' % libname, '%s/%s' % (modulepath,libname))
docs = generate_doc(config)
if docs!=None:
for doc in docs:
for file, html in doc.iteritems():
filename = string.replace(file,'.md','.html')
zf.writestr('%s/documentation/%s'%(modulepath,filename),html)
for dn in ('assets','example','platform'):
if os.path.exists(dn):
zip_dir(zf,dn,'%s/%s' % (modulepath,dn),['README'])
zf.write('LICENSE','%s/LICENSE' % modulepath)
zf.write('module.xcconfig','%s/module.xcconfig' % modulepath)
exports_file = 'metadata.json'
if os.path.exists(exports_file):
zf.write(exports_file, '%s/%s' % (modulepath, exports_file))
zf.close()
if __name__ == '__main__':
manifest,mf = validate_manifest()
validate_license()
config = read_ti_xcconfig()
sdk = find_sdk(config)
sys.path.insert(0,os.path.join(sdk,'iphone'))
sys.path.append(os.path.join(sdk, "common"))
compile_js(manifest,config)
build_module(manifest,config)
package_module(manifest,mf,config)
sys.exit(0)
| {
"content_hash": "209757977058bc7a768d9962cb880a63",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 125,
"avg_line_length": 30.443946188340806,
"alnum_prop": 0.6892031226984828,
"repo_name": "dezinezync/TiColorArt",
"id": "b8db8c951089172f5a6e3dc4604bba7320f9c8cf",
"size": "6789",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1027"
},
{
"name": "Objective-C",
"bytes": "22305"
},
{
"name": "Python",
"bytes": "8896"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(default=django.utils.timezone.now, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(help_text='Required. 30 characters or fewer. Letters, digits and @/./+/-/_ only.', unique=True, max_length=30, verbose_name='username', validators=[django.core.validators.RegexValidator('^[\\w.@+-]+$', 'Enter a valid username.', 'invalid')])),
('first_name', models.CharField(max_length=30, verbose_name='first name', blank=True)),
('last_name', models.CharField(max_length=30, verbose_name='last name', blank=True)),
('email', models.EmailField(max_length=75, verbose_name='email address', blank=True)),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('groups', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Group', blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of his/her group.', verbose_name='groups')),
('user_permissions', models.ManyToManyField(related_query_name='user', related_name='user_set', to='auth.Permission', blank=True, help_text='Specific permissions for this user.', verbose_name='user permissions')),
],
options={
'abstract': False,
'verbose_name': 'user',
'verbose_name_plural': 'users',
},
bases=(models.Model,),
),
]
| {
"content_hash": "9f4c819dae1a35a156c59e01fdbb3e9f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 289,
"avg_line_length": 68.64102564102564,
"alnum_prop": 0.6417631677250654,
"repo_name": "Larhard/Elgassia",
"id": "bb1460e6b247a75589d5c4be94120e9431fda77d",
"size": "2701",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "custom_auth/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4460"
},
{
"name": "HTML",
"bytes": "22649"
},
{
"name": "JavaScript",
"bytes": "2983"
},
{
"name": "Python",
"bytes": "27470"
},
{
"name": "Shell",
"bytes": "954"
}
],
"symlink_target": ""
} |
import numpy as np
import cgen as c
from sympy import And, Or, Max
from devito.data import FULL
from devito.ir import (DummyEq, Conditional, Dereference, Expression, ExpressionBundle,
List, ParallelTree, Prodder, FindSymbols, FindNodes, Return,
VECTORIZED, Transformer, IsPerfectIteration, filter_iterations,
retrieve_iteration_tree)
from devito.symbolics import CondEq, INT, ccode
from devito.passes.iet.engine import iet_pass
from devito.passes.iet.langbase import LangBB, LangTransformer, DeviceAwareMixin
from devito.passes.iet.misc import is_on_device
from devito.tools import as_tuple, prod
from devito.types import Symbol, NThreadsBase
__all__ = ['PragmaSimdTransformer', 'PragmaShmTransformer',
'PragmaDeviceAwareTransformer', 'PragmaLangBB']
class PragmaTransformer(LangTransformer):
"""
Abstract base class for LangTransformers that parallelize Iterations
as well as manage data allocation with pragmas.
"""
pass
class PragmaSimdTransformer(PragmaTransformer):
"""
Abstract base class for PragmaTransformers capable of emitting SIMD-parallel IETs.
"""
@property
def simd_reg_size(self):
return self.platform.simd_reg_size
@iet_pass
def make_simd(self, iet):
mapper = {}
for tree in retrieve_iteration_tree(iet):
candidates = [i for i in tree if i.is_ParallelRelaxed]
# As long as there's an outer level of parallelism, the innermost
# PARALLEL Iteration gets vectorized
if len(candidates) < 2:
continue
candidate = candidates[-1]
# Only fully-parallel Iterations will be SIMD-ized (ParallelRelaxed
# might not be enough then)
if not candidate.is_Parallel:
continue
# Add SIMD pragma
aligned = [j for j in FindSymbols('symbolics').visit(candidate)
if j.is_DiscreteFunction]
if aligned:
simd = self.lang['simd-for-aligned']
simd = as_tuple(simd(','.join([j.name for j in aligned]),
self.simd_reg_size))
else:
simd = as_tuple(self.lang['simd-for'])
pragmas = candidate.pragmas + simd
# Add VECTORIZED property
properties = list(candidate.properties) + [VECTORIZED]
mapper[candidate] = candidate._rebuild(pragmas=pragmas, properties=properties)
iet = Transformer(mapper).visit(iet)
return iet, {}
class PragmaShmTransformer(PragmaSimdTransformer):
"""
Abstract base class for PragmaTransformers capable of emitting SIMD-parallel
and shared-memory-parallel IETs.
"""
def __init__(self, sregistry, options, platform):
"""
Parameters
----------
sregistry : SymbolRegistry
The symbol registry, to access the symbols appearing in an IET.
options : dict
The optimization options. Accepted: ['par-collapse-ncores',
'par-collapse-work', 'par-chunk-nonaffine', 'par-dynamic-work', 'par-nested']
* 'par-collapse-ncores': use a collapse clause if the number of
available physical cores is greater than this threshold.
* 'par-collapse-work': use a collapse clause if the trip count of the
collapsable Iterations is statically known to exceed this threshold.
* 'par-chunk-nonaffine': coefficient to adjust the chunk size in
non-affine parallel Iterations.
* 'par-dynamic-work': use dynamic scheduling if the operation count per
iteration exceeds this threshold. Otherwise, use static scheduling.
* 'par-nested': nested parallelism if the number of hyperthreads per core
is greater than this threshold.
platform : Platform
The underlying platform.
"""
key = lambda i: i.is_ParallelRelaxed and not i.is_Vectorized
super().__init__(key, sregistry, platform)
self.collapse_ncores = options['par-collapse-ncores']
self.collapse_work = options['par-collapse-work']
self.chunk_nonaffine = options['par-chunk-nonaffine']
self.dynamic_work = options['par-dynamic-work']
self.nested = options['par-nested']
@property
def ncores(self):
return self.platform.cores_physical
@property
def nhyperthreads(self):
return self.platform.threads_per_core
@property
def nthreads(self):
return self.sregistry.nthreads
@property
def nthreads_nested(self):
return self.sregistry.nthreads_nested
@property
def nthreads_nonaffine(self):
return self.sregistry.nthreads_nonaffine
@property
def threadid(self):
return self.sregistry.threadid
def _find_collapsable(self, root, candidates):
collapsable = []
if self.ncores >= self.collapse_ncores:
for n, i in enumerate(candidates[1:], 1):
# The Iteration nest [root, ..., i] must be perfect
if not IsPerfectIteration(depth=i).visit(root):
break
# Loops are collapsable only if none of the iteration variables appear
# in initializer expressions. For example, the following two loops
# cannot be collapsed
#
# for (i = ... )
# for (j = i ...)
# ...
#
# Here, we make sure this won't happen
if any(j.dim in i.symbolic_min.free_symbols for j in candidates[:n]):
break
# Also, we do not want to collapse SIMD-vectorized Iterations
if i.is_Vectorized:
break
# Would there be enough work per parallel iteration?
nested = candidates[n+1:]
if nested:
try:
work = prod([int(j.dim.symbolic_size) for j in nested])
if work < self.collapse_work:
break
except TypeError:
pass
collapsable.append(i)
return collapsable
def _make_reductions(self, partree):
if not any(i.is_ParallelAtomic for i in partree.collapsed):
return partree
# Collect expressions inducing reductions
exprs = FindNodes(Expression).visit(partree)
exprs = [i for i in exprs if i.is_Increment and not i.is_ForeignExpression]
reduction = [i.output for i in exprs]
if all(i.is_Affine for i in partree.collapsed) or \
all(not i.is_Indexed for i in reduction):
# Implement reduction
mapper = {partree.root: partree.root._rebuild(reduction=reduction)}
else:
# Make sure the increment is atomic
mapper = {i: i._rebuild(pragmas=self.lang['atomic']) for i in exprs}
partree = Transformer(mapper).visit(partree)
return partree
def _make_threaded_prodders(self, partree):
mapper = {i: self.Prodder(i) for i in FindNodes(Prodder).visit(partree)}
partree = Transformer(mapper).visit(partree)
return partree
def _make_partree(self, candidates, nthreads=None):
assert candidates
root = candidates[0]
# Get the collapsable Iterations
collapsable = self._find_collapsable(root, candidates)
ncollapse = 1 + len(collapsable)
# Prepare to build a ParallelTree
if all(i.is_Affine for i in candidates):
bundles = FindNodes(ExpressionBundle).visit(root)
sops = sum(i.ops for i in bundles)
if sops >= self.dynamic_work:
schedule = 'dynamic'
else:
schedule = 'static'
if nthreads is None:
# pragma ... for ... schedule(..., 1)
nthreads = self.nthreads
body = self.HostIteration(schedule=schedule, ncollapse=ncollapse,
**root.args)
else:
# pragma ... parallel for ... schedule(..., 1)
body = self.HostIteration(schedule=schedule, parallel=True,
ncollapse=ncollapse, nthreads=nthreads,
**root.args)
prefix = []
else:
# pragma ... for ... schedule(..., expr)
assert nthreads is None
nthreads = self.nthreads_nonaffine
chunk_size = Symbol(name='chunk_size')
body = self.HostIteration(ncollapse=ncollapse, chunk_size=chunk_size,
**root.args)
niters = prod([root.symbolic_size] + [j.symbolic_size for j in collapsable])
value = INT(Max(niters / (nthreads*self.chunk_nonaffine), 1))
prefix = [Expression(DummyEq(chunk_size, value, dtype=np.int32))]
# Create a ParallelTree
partree = ParallelTree(prefix, body, nthreads=nthreads)
return root, partree
def _make_parregion(self, partree, parrays):
if not any(i.is_ParallelPrivate for i in partree.collapsed):
return self.Region(partree)
# Vector-expand all written Arrays within `partree`, since at least
# one of the parallelized Iterations requires thread-private Arrays
# E.g. a(x, y) -> b(tid, x, y), where `tid` is the ThreadID Dimension
writes = [i.write for i in FindNodes(Expression).visit(partree)]
vexpandeds = []
for i in writes:
if not (i.is_Array or i.is_TempFunction):
continue
elif i in parrays:
pi = parrays[i]
else:
pi = parrays.setdefault(i, i._make_pointer(dim=self.threadid))
vexpandeds.append(VExpanded(i, pi))
if vexpandeds:
init = c.Initializer(c.Value(self.threadid._C_typedata, self.threadid.name),
self.lang['thread-num'])
prefix = List(header=init,
body=vexpandeds + list(partree.prefix),
footer=c.Line())
partree = partree._rebuild(prefix=prefix)
return self.Region(partree)
def _make_guard(self, parregion):
# Do not enter the parallel region if the step increment is 0; this
# would raise a `Floating point exception (core dumped)` in some OpenMP
# implementations. Note that using an OpenMP `if` clause won't work
cond = Or(*[CondEq(i.step, 0) for i in parregion.collapsed
if isinstance(i.step, Symbol)])
if cond != False: # noqa: `cond` may be a sympy.False which would be == False
parregion = List(body=[Conditional(cond, Return()), parregion])
return parregion
def _make_nested_partree(self, partree):
# Apply heuristic
if self.nhyperthreads <= self.nested:
return partree
# Note: there might be multiple sub-trees amenable to nested parallelism,
# hence we loop over all of them
#
# for (i = ... ) // outer parallelism
# for (j0 = ...) // first source of nested parallelism
# ...
# for (j1 = ...) // second source of nested parallelism
# ...
mapper = {}
for tree in retrieve_iteration_tree(partree):
outer = tree[:partree.ncollapsed]
inner = tree[partree.ncollapsed:]
# Heuristic: nested parallelism is applied only if the top nested
# parallel Iteration iterates *within* the top outer parallel Iteration
# (i.e., the outer is a loop over blocks, while the nested is a loop
# within a block)
candidates = []
for i in inner:
if self.key(i) and any((j.dim.root is i.dim.root) for j in outer):
candidates.append(i)
elif candidates:
# If there's at least one candidate but `i` doesn't honor the
# heuristic above, then we break, as the candidates must be
# perfectly nested
break
if not candidates:
continue
# Introduce nested parallelism
subroot, subpartree = self._make_partree(candidates, self.nthreads_nested)
mapper[subroot] = subpartree
partree = Transformer(mapper).visit(partree)
return partree
def _make_parallel(self, iet):
mapper = {}
parrays = {}
for tree in retrieve_iteration_tree(iet):
# Get the parallelizable Iterations in `tree`
candidates = filter_iterations(tree, key=self.key)
if not candidates:
continue
# Outer parallelism
root, partree = self._make_partree(candidates)
if partree is None or root in mapper:
continue
# Nested parallelism
partree = self._make_nested_partree(partree)
# Handle reductions
partree = self._make_reductions(partree)
# Atomicize and optimize single-thread prodders
partree = self._make_threaded_prodders(partree)
# Wrap within a parallel region
parregion = self._make_parregion(partree, parrays)
# Protect the parallel region if necessary
parregion = self._make_guard(parregion)
mapper[root] = parregion
iet = Transformer(mapper).visit(iet)
# The new arguments introduced by this pass
args = [i for i in FindSymbols().visit(iet) if isinstance(i, (NThreadsBase))]
for n in FindNodes(VExpanded).visit(iet):
args.extend([(n.pointee, True), n.pointer])
return iet, {'args': args, 'includes': [self.lang['header']]}
@iet_pass
def make_parallel(self, iet):
return self._make_parallel(iet)
class PragmaDeviceAwareTransformer(DeviceAwareMixin, PragmaShmTransformer):
"""
Abstract base class for PragmaTransformers capable of emitting SIMD-parallel,
shared-memory-parallel, and device-parallel IETs.
"""
def __init__(self, sregistry, options, platform):
super().__init__(sregistry, options, platform)
self.gpu_fit = options['gpu-fit']
self.par_disabled = options['par-disabled']
def _make_threaded_prodders(self, partree):
if isinstance(partree.root, self.DeviceIteration):
# no-op for now
return partree
else:
return super()._make_threaded_prodders(partree)
def _make_partree(self, candidates, nthreads=None):
"""
Parallelize the `candidates` Iterations. In particular:
* All parallel Iterations not *writing* to a host Function, that
is a Function `f` such that `is_on_device(f) == False`, are offloaded
to the device.
* The remaining ones, that is those writing to a host Function,
are parallelized on the host.
"""
assert candidates
root = candidates[0]
if is_on_device(root, self.gpu_fit, only_writes=True):
# The typical case: all written Functions are device Functions, that is
# they're mapped in the device memory. Then we offload `root` to the device
# Get the collapsable Iterations
collapsable = self._find_collapsable(root, candidates)
ncollapse = 1 + len(collapsable)
body = self.DeviceIteration(gpu_fit=self.gpu_fit, ncollapse=ncollapse,
**root.args)
partree = ParallelTree([], body, nthreads=nthreads)
return root, partree
elif not self.par_disabled:
# Resort to host parallelism
return super()._make_partree(candidates, nthreads)
else:
return root, None
def _make_parregion(self, partree, *args):
if isinstance(partree.root, self.DeviceIteration):
# no-op for now
return partree
else:
return super()._make_parregion(partree, *args)
def _make_guard(self, parregion, *args):
partrees = FindNodes(ParallelTree).visit(parregion)
if not any(isinstance(i.root, self.DeviceIteration) for i in partrees):
return super()._make_guard(parregion, *args)
cond = []
# There must be at least one iteration or potential crash
if not parregion.is_Affine:
trees = retrieve_iteration_tree(parregion.root)
tree = trees[0][:parregion.ncollapsed]
cond.extend([i.symbolic_size > 0 for i in tree])
# SparseFunctions may occasionally degenerate to zero-size arrays. In such
# a case, a copy-in produces a `nil` pointer on the device. To fire up a
# parallel loop we must ensure none of the SparseFunction pointers are `nil`
symbols = FindSymbols().visit(parregion)
sfs = [i for i in symbols if i.is_SparseFunction]
if sfs:
size = [prod(f._C_get_field(FULL, d).size for d in f.dimensions) for f in sfs]
cond.extend([i > 0 for i in size])
# Combine all cond elements
if cond:
parregion = List(body=[Conditional(And(*cond), parregion)])
return parregion
def _make_nested_partree(self, partree):
if isinstance(partree.root, self.DeviceIteration):
# no-op for now
return partree
else:
return super()._make_nested_partree(partree)
class PragmaLangBB(LangBB):
@classmethod
def _make_sections_from_imask(cls, f, imask):
datasize = cls._map_data(f)
if imask is None:
imask = [FULL]*len(datasize)
assert len(imask) == len(datasize)
sections = []
for i, j in zip(imask, datasize):
if i is FULL:
start, size = 0, j
else:
try:
start, size = i
except TypeError:
start, size = i, 1
start = ccode(start)
sections.append('[%s:%s]' % (start, size))
return ''.join(sections)
@classmethod
def _map_data(cls, f):
if f.is_Array:
return f.symbolic_shape
else:
return tuple(f._C_get_field(FULL, d).size for d in f.dimensions)
@classmethod
def _map_to(cls, f, imask=None, queueid=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-enter-to'](f.name, sections)
_map_to_wait = _map_to
@classmethod
def _map_alloc(cls, f, imask=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-enter-alloc'](f.name, sections)
@classmethod
def _map_present(cls, f, imask=None):
return
@classmethod
def _map_update(cls, f):
return cls.mapper['map-update'](f.name, ''.join('[0:%s]' % i
for i in cls._map_data(f)))
@classmethod
def _map_update_host(cls, f, imask=None, queueid=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-update-host'](f.name, sections)
_map_update_wait_host = _map_update_host
@classmethod
def _map_update_device(cls, f, imask=None, queueid=None):
sections = cls._make_sections_from_imask(f, imask)
return cls.mapper['map-update-device'](f.name, sections)
_map_update_wait_device = _map_update_device
@classmethod
def _map_release(cls, f, devicerm=None):
return cls.mapper['map-release'](f.name,
''.join('[0:%s]' % i for i in cls._map_data(f)),
(' if(%s)' % devicerm.name) if devicerm else '')
@classmethod
def _map_delete(cls, f, imask=None, devicerm=None):
sections = cls._make_sections_from_imask(f, imask)
# This ugly condition is to avoid a copy-back when, due to
# domain decomposition, the local size of a Function is 0, which
# would cause a crash
items = []
if devicerm is not None:
items.append(devicerm.name)
items.extend(['(%s != 0)' % i for i in cls._map_data(f)])
cond = ' if(%s)' % ' && '.join(items)
return cls.mapper['map-exit-delete'](f.name, sections, cond)
# Utils
class VExpanded(Dereference):
pass
| {
"content_hash": "e673b34575559dfe350b28b5fe8ad625",
"timestamp": "",
"source": "github",
"line_count": 554,
"max_line_length": 90,
"avg_line_length": 37.6028880866426,
"alnum_prop": 0.5795410906298003,
"repo_name": "opesci/devito",
"id": "afad4ea3a1826d3a763169d72354c378212231bb",
"size": "20832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devito/passes/iet/parpragma.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "812"
},
{
"name": "Python",
"bytes": "1683413"
},
{
"name": "Shell",
"bytes": "3900"
}
],
"symlink_target": ""
} |
"""
Log Server
"""
import sys
from time import gmtime, strftime
from enum import Enum
from core.observer import Observer
from core.base_component import BaseComponent
class LogLevel(Enum):
""""
Enumeration for setting a log level
"""
INFO = '\033[0m'
DEBUG = '\033[0;37m'
SUCCESS = '\033[92m'
WARNING = '\033[93m'
ERROR = '\033[91m'
class LogServer(BaseComponent):
"""
Class for LogServer
"""
def __init__(self):
BaseComponent.__init__(self)
self.log_observer = LogServer.LogObserver(self)
class LogObserver(Observer):
"""
Class for log observers
"""
def __init__(self, outer):
self.outer = outer
def update(self, observable, package):
log_message = strftime("%Y-%m-%d %H:%M:%S", gmtime()) + " " + \
package[1] + ": " + \
package[0]
log_line = package[2].value + log_message + '\033[0m'
if package[2] == LogLevel.ERROR:
print(log_line, file=sys.stderr)
else:
print(log_line)
self.outer.send({'log_message': log_message, 'log_level': package[2].name})
| {
"content_hash": "e6b98ddc048715f8673b265f75610d6f",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 87,
"avg_line_length": 28.186046511627907,
"alnum_prop": 0.5412541254125413,
"repo_name": "thomaskuestner/MRIQA",
"id": "ccf26aea986ae3148448348cba754321528ee5a9",
"size": "1212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "components/log_server.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "HTML",
"bytes": "11246"
},
{
"name": "JavaScript",
"bytes": "68868"
},
{
"name": "Python",
"bytes": "28211"
},
{
"name": "Shell",
"bytes": "36"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import AreaTagModel
class MapAreaInlineAdmin(admin.TabularInline):
model = AreaTagModel
fk_name = 'mapa'
| {
"content_hash": "b9c5060110311b830ebf576a86d71326",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 46,
"avg_line_length": 23,
"alnum_prop": 0.7701863354037267,
"repo_name": "tourlines/cmsplugin-html",
"id": "a8ea0312eaadcf24dfcff16006e33f74261c8723",
"size": "177",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cmsplugin_html/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "941"
},
{
"name": "JavaScript",
"bytes": "12158"
},
{
"name": "Python",
"bytes": "87253"
},
{
"name": "Shell",
"bytes": "103"
}
],
"symlink_target": ""
} |
"""Check that it's not possible to start a second auroracoind instance using the same datadir or wallet."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
class FilelockTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
def setup_network(self):
self.add_nodes(self.num_nodes, extra_args=None)
self.nodes[0].start([])
self.nodes[0].wait_for_rpc_connection()
def run_test(self):
datadir = os.path.join(self.nodes[0].datadir, 'regtest')
self.log.info("Using datadir {}".format(datadir))
self.log.info("Check that we can't start a second auroracoind instance using the same datadir")
expected_msg = "Error: Cannot obtain a lock on data directory {0}. {1} is probably already running.".format(datadir, self.config['environment']['PACKAGE_NAME'])
self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg)
if self.is_wallet_compiled():
wallet_dir = os.path.join(datadir, 'wallets')
self.log.info("Check that we can't start a second auroracoind instance using the same wallet")
expected_msg = "Error: Error initializing wallet database environment"
self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX)
if __name__ == '__main__':
FilelockTest().main() | {
"content_hash": "deec91ee00347202f1eac0022028ab0e",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 177,
"avg_line_length": 50.8125,
"alnum_prop": 0.6845018450184502,
"repo_name": "aurarad/auroracoin",
"id": "ba57298b3e7626ae5e0656d1df6f55451a3544db",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/feature_filelock.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "721707"
},
{
"name": "C++",
"bytes": "3060648"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18860"
},
{
"name": "HTML",
"bytes": "50620"
},
{
"name": "Makefile",
"bytes": "31933"
},
{
"name": "Objective-C",
"bytes": "1052"
},
{
"name": "Objective-C++",
"bytes": "6330"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "110348"
},
{
"name": "QMake",
"bytes": "2022"
},
{
"name": "Shell",
"bytes": "51195"
}
],
"symlink_target": ""
} |
"""This file contains the Spotlight Volume Configuration plist in Plaso."""
from plaso.events import plist_event
from plaso.parsers.plist_plugins import interface
__author__ = 'Joaquin Moreno Garijo (Joaquin.MorenoGarijo.2013@live.rhul.ac.uk)'
class SpotlightVolumePlugin(interface.PlistPlugin):
"""Basic plugin to extract the Spotlight Volume Configuration."""
NAME = 'plist_spotligth_volume'
PLIST_PATH = 'VolumeConfiguration.plist'
PLIST_KEYS = frozenset(['Stores'])
def GetEntries(self, match, **unused_kwargs):
"""Extracts relevant VolumeConfiguration Spotlight entries.
Args:
match: A dictionary containing keys extracted from PLIST_KEYS.
Yields:
EventObject objects extracted from the plist.
"""
for volume_name, volume in match['Stores'].iteritems():
description = u'Spotlight Volume {} ({}) activated.'.format(
volume_name, volume['PartialPath'])
yield plist_event.PlistEvent(
u'/Stores', '', volume['CreationDate'], description)
| {
"content_hash": "e9d400a91ca6d7b1e41034270374862f",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 80,
"avg_line_length": 33,
"alnum_prop": 0.7116324535679375,
"repo_name": "iwm911/plaso",
"id": "29f501066df4a40fcfd5697ae9f39c76a7ef2601",
"size": "1721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plaso/parsers/plist_plugins/spotlight_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2431825"
},
{
"name": "Shell",
"bytes": "21885"
},
{
"name": "VHDL",
"bytes": "2100224"
}
],
"symlink_target": ""
} |
import random
import re
from sopel.module import commands, example
#Matches: <@+nickNames123>
_NICKNAME = '(<[^\s]+>)'
@commands('quote')
def quote(bot, trigger):
quotes = bot.mongodb.quotes
input = trigger.group(2) if trigger.group(2) else ''
quotable = re.findall(_NICKNAME, input)
if quotable:
who, quote = parse_quotable_params(input)
store_quote(quotes, who, quote)
bot.reply('quote stored.')
else:
nick, search = parse_search_params(input)
bot.say(get_random_quote(quotes, nick, search))
def parse_search_params(params):
param_re = '(?P<nick>\w+)?\s?(?P<search>\/.*\/)?$'
params = re.match(param_re, params)
if params:
search = params.group('search')
if search:
search = search.strip().replace('/','')
return (params.group('nick'), search)
else:
return (None, None)
def parse_quotable_params(params):
param_re = '(?P<realname>\w+)?\s*[\d:]{0,5}\s*(?P<quote><.*)'
params = re.match(param_re, params)
quoted = [params.group('realname')] if params.group('realname') else []
quote = params.group('quote')
nicks = re.findall(_NICKNAME, quote)
if nicks:
nicks = [re.subn(r'[@<>+]', '', n)[0] for n in nicks]
quoted = [nick.lower() for nick in set(quoted + nicks)]
return (quoted, quote)
def get_random_quote(quotes, nick, search):
query = {}
if nick:
query['nick'] = nick.lower()
if search:
query['quote'] = re.compile(search, re.IGNORECASE)
ids = quotes.find(query, [])
num_quotes = ids.count()
if num_quotes == 0:
return "No quotes found."
quote = random.randint(0, num_quotes - 1)
quote = quotes.find({'_id': ids[quote]['_id']}, ['quote'])
return quote[0]['quote']
def store_quote(quotes, quoted, quote):
quote_doc = {
'nick': quoted,
'network': 'slashnet',
'quote': quote
}
quotes.insert(quote_doc)
| {
"content_hash": "31c7fb920e6e1008cb74c6147b3c69d7",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 75,
"avg_line_length": 26.426666666666666,
"alnum_prop": 0.5817356205852674,
"repo_name": "jimj/sopelmodules",
"id": "37de50b9a91da397eadf34e48fdb6b9fc5219947",
"size": "1982",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4944"
}
],
"symlink_target": ""
} |
import os
import sys
import traceback
from zipfile import ZipFile
# Magic python path, based on http://djangosnippets.org/snippets/281/
from os.path import abspath, dirname, join
parentdir = dirname(dirname(abspath(__file__)))
# Insert our parent directory (the one containing the folder metashare/):
sys.path.insert(1, parentdir)
try:
import settings # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the " \
"directory containing %r. It appears you've customized things.\n" \
"You'll have to run django-admin.py, passing it your settings " \
"module.\n(If the file settings.py does indeed exist, it's causing" \
" an ImportError somehow.)\n" % __file__)
sys.exit(1)
if __name__ == "__main__":
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
PROJECT_HOME = os.path.normpath(os.getcwd() + "/..")
sys.path.append(PROJECT_HOME)
# Check command line parameters first.
if len(sys.argv) < 2:
print "\n\tusage: {0} <archive.zip> <upgrade-db>\n".format(
sys.argv[0])
print "\tWARNING: when providing the 'upgrade-db' switch, the " \
"database will be upgraded\n\tto the latest schema " \
"automatically. This step is not reversible which means the\n\t" \
"database cannot be used with older versions of the META-SHARE " \
"software anymore.\n"
print "\tYou have been warned.\n"
sys.exit(-1)
if len(sys.argv) > 2 and sys.argv[2] == 'upgrade-db':
# Automagically run syncdb to update database to the latest version.
from django.core.management import execute_from_command_line
execute_from_command_line(['manage.py', 'syncdb', '--noinput'])
# Make sure that ResourceInfo gets a schema update with AudioInfo,
# ToolServiceInfo, and LanguageDescriptionInfo as syncdb won't do it.
# Also patch in column textclassificationinfo as it seems necessary.
import sqlite3
conn = sqlite3.connect(settings.DATABASES['default']['NAME'])
patches = {
'repository_resourceinfo': ('AudioInfo_id', 'ToolServiceInfo_id',
'LanguageDescriptionInfo_id'),
'repository_textclassificationinfo':
('conformanceToClassificationScheme',)
}
for table, columns in patches.items():
try:
for column in columns:
result = conn.execute('ALTER TABLE {0} ADD COLUMN ' \
'"{1}";'.format(table, column))
except sqlite3.OperationalError:
continue
conn.close()
# Disable verbose debug output for the import process...
settings.DEBUG = False
SUCCESSFUL_EXPORTS = 0
ERRONEOUS_EXPORTS = 0
RESOURCE_NO = 0
from metashare.repository.models import resourceInfoType_model
from metashare.xml_utils import to_xml_string
with ZipFile(sys.argv[1], 'w') as out:
for resource in resourceInfoType_model.objects.all():
# skip rsources marked as deleted
if resource.storage_object.deleted == True:
continue
try:
RESOURCE_NO += 1
root_node = resource.export_to_elementtree()
xml_string = to_xml_string(
root_node, encoding="utf-8").encode('utf-8')
resource_filename = 'resource-{0}.xml'.format(RESOURCE_NO)
out.writestr(resource_filename, xml_string)
SUCCESSFUL_EXPORTS += 1
except Exception:
ERRONEOUS_EXPORTS += 1
print 'Could not export resource id={0}!'.format(resource.id)
print traceback.format_exc()
print "Done. Successfully exported {0} files from the database, errors " \
"occured in {1} cases.".format(SUCCESSFUL_EXPORTS, ERRONEOUS_EXPORTS)
| {
"content_hash": "ac3e4d5ae45717b4a94f711e931a6d4c",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 78,
"avg_line_length": 41.447916666666664,
"alnum_prop": 0.6142246795677306,
"repo_name": "zeehio/META-SHARE",
"id": "745f9c15a64c380b7d429546298f430c73bcae22",
"size": "4002",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "metashare/export_xml.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7362"
},
{
"name": "C",
"bytes": "321"
},
{
"name": "C++",
"bytes": "112277"
},
{
"name": "CSS",
"bytes": "125117"
},
{
"name": "HTML",
"bytes": "2956138"
},
{
"name": "Java",
"bytes": "12780"
},
{
"name": "JavaScript",
"bytes": "201032"
},
{
"name": "M4",
"bytes": "8416"
},
{
"name": "Makefile",
"bytes": "26172"
},
{
"name": "Python",
"bytes": "4084877"
},
{
"name": "Shell",
"bytes": "121386"
},
{
"name": "XSLT",
"bytes": "473763"
}
],
"symlink_target": ""
} |
from codecs import open
from ohoh import build_parser, DEFAULT_HOST, DEFAULT_PORT
from os import path
import os
import pytest
import sys
import time
@pytest.fixture
def parser():
return build_parser()
@pytest.fixture
def modfile(request, tmpdir):
def uninstall():
sys.path.remove(tmpdir.strpath)
sys.path.insert(0, tmpdir.strpath)
name = "rand_mod_name" + str(hash(time.time()))
pyfile = path.join(tmpdir.strpath, name + ".py")
with open(pyfile, "w", encoding="utf-8") as modfile:
modfile.write("#-*- coding: utf-8 -*-\n")
modfile.write("app = lambda: None\n")
request.addfinalizer(uninstall)
return pyfile
@pytest.fixture
def modname(modfile):
modname = path.splitext(path.basename(modfile))[0]
sys.modules.pop(modname, None)
return modname
@pytest.mark.parametrize("address,expected", [
(None, (DEFAULT_HOST, DEFAULT_PORT)),
("localhost", ("localhost", DEFAULT_PORT)),
("localhost:80", ("localhost", 80)),
("google.com", ("google.com", DEFAULT_PORT)),
("google.com:80", ("google.com", 80)),
(":5868", ("", 5868)),
])
def test_address_parse(address, expected, modname, parser):
args = ["-s", address, modname] if address else [modname]
parsed = parser.parse_args(args)
assert parsed.address == expected
def test_app_spec_parse_nomod(parser):
with pytest.raises(ImportError):
parser = parser.parse_args(["sys.RANDOM_FOO.MODULE_I.PRAY.DOESNT_EXIST"])
def test_app_spec_parse_mod(parser, modname):
parsed = parser.parse_args([modname])
assert parsed.app() is None
def test_app_spec_parse_noobj(parser, modname):
with pytest.raises(AttributeError):
parser.parse_args(["{0}:obj_doesnt_exist".format(modname)])
def test_app_spec_parse_obj_not_callable(parser, modname, modfile):
with open(modfile, "a", encoding="utf-8") as f:
f.write("appx = None")
with pytest.raises(SystemExit):
parser.parse_args(["{0}:appx".format(modname)])
def test_app_spec_parse_obj(parser, modname, modfile):
with open(modfile, "a", encoding="utf-8") as f:
f.write("appx = lambda: None")
parsed = parser.parse_args(["{0}:appx".format(modname)])
assert parsed.app() is None
def test_app_spec_parse_filename(parser, modfile):
sys.path.remove(path.dirname(modfile))
parsed = parser.parse_args([modfile])
assert parsed.app() is None
assert path.dirname(modfile) in sys.path
def test_path_parse(parser, tmpdir):
created = path.join(tmpdir.strpath, "created")
uncreated = path.join(tmpdir.strpath, "uncreated")
os.mkdir(created)
with pytest.raises(SystemExit):
parsed = parser.parse_args(["-p", created])
assert created in sys.path
with pytest.raises(SystemExit):
parsed = parser.parse_args(["-p", uncreated])
assert uncreated not in sys.path
def test_app_spec_parse_obj_factory(parser, modname, modfile):
with open(modfile, "a", encoding="utf-8") as f:
f.write("""
class App(object):
def __init__(self, name=None, **kwargs):
self.name = name
for kw in kwargs:
setattr(self, kw, kwargs[kw])
def __call__(self):
pass
""")
parsed = parser.parse_args(["{0}:App()".format(modname)])
assert parsed.app.name is None
assert getattr(parsed.app, 'age', None) is None
parsed = parser.parse_args(['{0}:App("Te-je")'.format(modname)])
assert parsed.app.name == "Te-je"
assert getattr(parsed.app, 'age', None) is None
parsed = parser.parse_args(['{0}:App(name="Jimmy", age=59)'.format(modname)])
assert parsed.app.name == "Jimmy"
assert parsed.app.age == 59
| {
"content_hash": "cc38afa18fc926a2dd1e58ce973c2377",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 81,
"avg_line_length": 31.076271186440678,
"alnum_prop": 0.6523043359694574,
"repo_name": "te-je/ohoh",
"id": "2ac2f2c7138d46cab1a9b43042ce9e5dad55ab02",
"size": "3667",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_script.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34866"
}
],
"symlink_target": ""
} |
"""
Openflow tests on an l2 table
"""
import sys
import os
import time
import logging
from oftest import config
import oftest.base_tests as base_tests
import ofp
from oftest.testutils import *
from oftest.parse import parse_mac
import openflow_base_tests
sys.path.append(os.path.join(sys.path[0], '..', '..', '..', '..',
'testutils'))
from utils import *
sys.path.append(os.path.join(sys.path[0], '..', '..', '..', '..',
'targets', 'switch', 'tests', 'pd_thrift'))
from p4_pd_rpc.ttypes import *
from res_pd_rpc.ttypes import *
sys.path.append(os.path.join(sys.path[0], '..', '..', '..', '..',
'targets', 'switch', 'openflow_mapping'))
from l2 import *
### TODO: generate expected packets
#######################
# SOME OPENFLOW UTILS #
#######################
# common shorthands
flow_add = ofp.message.flow_add
flow_delete = ofp.message.flow_delete
group_add = ofp.message.group_add
group_mod = ofp.message.group_mod
group_delete = ofp.message.group_delete
table_stats_req = ofp.message.table_stats_request
table_stats_reply = ofp.message.table_stats_reply
packet_in = ofp.message.packet_in
packet_out = ofp.message.packet_out
buf = ofp.OFP_NO_BUFFER
# dmac table fields
eth_dst_addr = "l2_metadata_lkp_mac_da"
ingress_vlan = "ingress_metadata_bd"
TEST_ETH_DST = "00:01:02:03:04:05"
TEST_VLAN = 3
def get_oxm(field_obj):
"""
Returns an oxm and an arg-dict for updating an arg-list to
simple_tcp_packet
"""
if field_obj.field == "OFPXMT_OFB_VLAN_VID":
return (ofp.oxm.vlan_vid(field_obj.testval),
{"vlan_vid": field_obj.testval, "dl_vlan_enable": True})
elif field_obj.field == "OFPXMT_OFB_ETH_DST":
return (ofp.oxm.eth_dst(parse_mac(field_obj.testval)),
{"eth_dst": field_obj.testval})
def get_match(match_fields):
"""
Returns a packet and an OXM list that the packet matches,
according to match_fields.
"""
match, args = ofp.match(), {}
for _, field_obj in match_fields.items():
oxm, pkt_arg = get_oxm(field_obj)
match.oxm_list.append(oxm)
args.update(pkt_arg)
return (str(simple_tcp_packet(**args)), match)
def get_action(action, arg):
if action == "OUTPUT":
ofpaction = ofp.action.output(arg, ofp.OFPCML_NO_BUFFER)
elif action == "PUSH_MPLS":
ofpaction = ofp.action.push_mpls()
elif action == "SET_MPLS_TTL":
ofpaction = ofp.action.set_mpls_ttl(arg)
elif action == "DEC_MPLS_TTL":
ofpaction = ofp.action.dec_mpls_ttl()
elif action == "POP_MPLS":
ofpaction = ofp.action.pop_mpls()
elif action == "SET_FIELD":
oxm, _ = get_oxm(arg)
ofpaction = ofp.action.set_field(oxm)
elif action == "PUSH_VLAN":
ofpaction = ofp.action.push_vlan()
elif action == "GROUP":
ofpaction = ofp.action.group(arg)
elif action == "SET_NW_TTL":
ofpaction = ofp.action.set_nw_ttl(arg)
else:
logging.info("No get_action for %s", action)
exit(1)
return ofpaction
def get_apply_actions(actions):
"""
Returns a 1 element list of APPLY_ACTIONS instructions,
with actions specified in actions.
"""
instruction = ofp.instruction.apply_actions()
for action, arg in actions.items():
instruction.actions.append(get_action(action, arg))
return [instruction]
def get_group_all(gid, action_sets):
buckets = []
for b in action_sets:
buckets.append(ofp.bucket(actions=[get_action(a, arg) for a, arg in b.items()]))
return group_add(group_type=ofp.OFPGT_ALL, group_id=gid, buckets=buckets)
def get_group_mod(gid, action_sets):
buckets = []
for b in action_sets:
buckets.append(ofp.bucket(actions=[get_action(a, arg) for a, arg in b.items()]))
return group_mod(group_type=ofp.OFPGT_ALL, group_id=gid, buckets=buckets)
##############################
# TABLE/TEST SETUP FUNCTIONS #
##############################
def setup_default_table_configurations(client, sess_hdl, dev_tgt):
ifindex = 1
action_spec = dc_set_bd_action_spec_t(
action_bd=TEST_VLAN,
action_vrf=0,
action_rmac_group=0,
action_ipv4_unicast_enabled=True,
action_ipv6_unicast_enabled=False,
action_bd_label=0,
action_igmp_snooping_enabled=0,
action_mld_snooping_enabled=0,
action_ipv4_urpf_mode=0,
action_ipv6_urpf_mode=0,
action_stp_group=0,
action_stats_idx=0,
action_learning_enabled=0)
mbr_hdl = client.bd_action_profile_add_member_with_set_bd(
sess_hdl, dev_tgt,
action_spec)
match_spec = dc_port_vlan_mapping_match_spec_t(
ingress_metadata_ifindex=ifindex,
vlan_tag__0__valid=True,
vlan_tag__0__vid=TEST_VLAN,
vlan_tag__1__valid=0,
vlan_tag__1__vid=0)
client.port_vlan_mapping_add_entry(
sess_hdl, dev_tgt,
match_spec, mbr_hdl)
def setup_pre(mc, sess_hdl, dev_tgt):
mgrp_hdl = mc.mc_mgrp_create(sess_hdl, dev_tgt.dev_id, 1)
port_map = [0] * 32
lag_map = [0] * 32
# port 1, port 2, port 3
port_map[0] = (1 << 1) | (1 << 2) | (1 << 3)
node_hdl = mc.mc_node_create(sess_hdl, dev_tgt.dev_id, 0,
bytes_to_string(port_map),
bytes_to_string(lag_map))
mc.mc_associate_node(sess_hdl, dev_tgt.dev_id, mgrp_hdl, node_hdl)
def setup(self):
sess_hdl = self.conn_mgr.client_init(16)
dev_tgt = DevTarget_t(0, hex_to_i16(0xFFFF))
setup_default_table_configurations(self.client, sess_hdl, dev_tgt)
setup_pre(self.mc, sess_hdl, dev_tgt)
##############
# TEST CASES #
##############
class Output(openflow_base_tests.OFTestInterface):
"""
Forwards matching packet.
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
ports = sorted(config["port_map"].keys())
table, out_port = openflow_tables["dmac"], ports[0]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": out_port
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=41)
exp_pkt = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
eth_dst=TEST_ETH_DST)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt, out_port)
req = flow_delete(cookie=41, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
class NWTTL(openflow_base_tests.OFTestInterface):
"""
Sets ttl of matching packet.
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
ttl, ports = 0x37, sorted(config["port_map"].keys())
table, out_port = openflow_tables["dmac"], ports[2]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
nw = {
"OUTPUT": out_port,
"SET_NW_TTL" : ttl
}
instr = get_apply_actions(nw)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=2, cookie=42)
exp_pkt = str(simple_tcp_packet(ip_ttl=ttl, dl_vlan_enable=True,
vlan_vid=TEST_VLAN, eth_dst=TEST_ETH_DST))
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt, out_port)
req = flow_delete(cookie=42, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
class GroupAdd(openflow_base_tests.OFTestInterface):
"""
Create a group that pushes a vlan, sets vlan id
and forwards out a port
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
group_id, ports = (1 << 24) + 4, sorted(config["port_map"].keys())
outport1, outport2 = ports[0], ports[1]
bucket1 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=10),
"OUTPUT" : outport1
}
bucket2 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=19),
"OUTPUT" : outport2
}
req = get_group_all(group_id, [bucket1, bucket2])
self.controller.message_send(req)
do_barrier(self.controller)
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
groupall = {
"GROUP": group_id
}
instr = get_apply_actions (groupall)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=3, cookie=43)
exp_pkt1 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=10,
eth_dst=TEST_ETH_DST)
exp_pkt2 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=19,
eth_dst=TEST_ETH_DST)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt1, outport1)
verify_packet(self, exp_pkt2, outport2)
req = flow_delete(cookie=43, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
req = group_delete(group_type=ofp.OFPGT_ALL, group_id=(1 << 24) + 4)
self.controller.message_send(req)
do_barrier(self.controller)
class GroupMod(openflow_base_tests.OFTestInterface):
"""
Modifies the group created in GroupAdd, then verifies.
This test must be run after GroupAdd.
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
group_id, ports = (1 << 24) + 9, sorted(config["port_map"].keys())
outport1, outport2 = ports[0], ports[1]
bucket1 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=6),
"OUTPUT" : outport1
}
bucket2 = {
"PUSH_VLAN": None,
"SET_FIELD": OFMatchField("OFPXMT_OFB_VLAN_VID", val=4),
"OUTPUT" : outport2
}
req = get_group_all(group_id, [bucket1, bucket2])
self.controller.message_send(req)
do_barrier(self.controller)
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
groupall = {
"GROUP": group_id
}
instr = get_apply_actions (groupall)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=3, cookie=44)
exp_pkt1 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=6,
eth_dst=TEST_ETH_DST)
exp_pkt2 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=4,
eth_dst=TEST_ETH_DST)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt1, outport1)
verify_packet(self, exp_pkt2, outport2)
outport1, outport2, outport3 = ports[1], ports[2], ports[3]
bucket1 = {
"SET_NW_TTL": 7,
"OUTPUT": outport1
}
bucket2 = {
"SET_NW_TTL": 17,
"OUTPUT": outport2
}
bucket3 = {
"SET_NW_TTL": 27,
"OUTPUT": outport3
}
req = get_group_mod(group_id, [bucket1, bucket2, bucket3])
self.controller.message_send(req)
do_barrier(self.controller)
exp_pkt1 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
ip_ttl=7, eth_dst=TEST_ETH_DST)
exp_pkt2 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
ip_ttl=17, eth_dst=TEST_ETH_DST)
exp_pkt3 = simple_tcp_packet(dl_vlan_enable=True, vlan_vid=TEST_VLAN,
ip_ttl=27, eth_dst=TEST_ETH_DST)
self.dataplane.send(ports[0], pkt)
verify_packet(self, exp_pkt1, outport1)
verify_packet(self, exp_pkt2, outport2)
verify_packet(self, exp_pkt3, outport3)
req = flow_delete(cookie=44, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
req = group_delete(group_type=ofp.OFPGT_ALL, group_id=(1 << 24) + 9)
self.controller.message_send(req)
do_barrier(self.controller)
class TableStatsGet(openflow_base_tests.OFTestInterface):
"""
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
req = table_stats_req()
(reply, pkt) = self.controller.transact(req)
initial_matched_count = reply.entries[0].matched_count
initial_lookup_count = reply.entries[0].lookup_count
ports = sorted(config["port_map"].keys())
out_port = ports[0]
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
hit_pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": out_port
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=45)
self.controller.message_send(req)
do_barrier(self.controller)
num_hit_packets = 10
for _ in xrange(num_hit_packets):
self.dataplane.send(ports[0], hit_pkt)
miss_pkt = str(simple_tcp_packet(eth_dst="00:77:22:55:99:11",
dl_vlan_enable=True, vlan_vid=3))
num_miss_packets = 7
for _ in xrange(num_miss_packets):
self.dataplane.send(ports[0], miss_pkt)
time.sleep(3)
req = table_stats_req()
(reply, pkt) = self.controller.transact(req)
req = flow_delete(cookie=45, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
assert reply.entries[0].lookup_count == num_miss_packets + num_hit_packets + initial_lookup_count
assert reply.entries[0].matched_count == num_hit_packets + initial_matched_count
class PacketIn(openflow_base_tests.OFTestInterface):
"""
"""
def __init__(self):
openflow_base_tests.OFTestInterface.__init__(self, "dc")
def runTest(self):
setup(self)
ports = sorted(config["port_map"].keys())
in_port = ports[0]
table = openflow_tables["dmac"]
table.match_fields[eth_dst_addr].testval = TEST_ETH_DST
table.match_fields[ingress_vlan].testval = TEST_VLAN
pkt, match = get_match(table.match_fields)
output = {
"OUTPUT": ofp.const.OFPP_CONTROLLER
}
instr = get_apply_actions(output)
req = flow_add(table_id=table.id, match=match, instructions=instr,
buffer_id=buf, priority=1, cookie=46)
self.controller.message_send(req)
do_barrier(self.controller)
self.dataplane.send(in_port, pkt)
verify_packet_in(self, str(pkt), in_port, ofp.const.OFPR_ACTION,
controller=self.controller)
req = flow_delete(cookie=46, table_id=0)
self.controller.message_send(req)
do_barrier(self.controller)
| {
"content_hash": "c95c2b1e8c089d5a249427e248cc76be",
"timestamp": "",
"source": "github",
"line_count": 505,
"max_line_length": 105,
"avg_line_length": 33.34653465346535,
"alnum_prop": 0.5716745843230404,
"repo_name": "pierce-m/p4factory",
"id": "6658bef1241bec2a23ccc1262bf1393ae855e4dc",
"size": "16840",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "targets/switch/tests/of-tests/openflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "558182"
},
{
"name": "C++",
"bytes": "33861"
},
{
"name": "CSS",
"bytes": "685"
},
{
"name": "HTML",
"bytes": "4074"
},
{
"name": "JavaScript",
"bytes": "33524"
},
{
"name": "Logos",
"bytes": "1434"
},
{
"name": "Makefile",
"bytes": "53780"
},
{
"name": "Objective-C",
"bytes": "8269"
},
{
"name": "Python",
"bytes": "883671"
},
{
"name": "Shell",
"bytes": "13660"
}
],
"symlink_target": ""
} |
"""
Email sharing of add-ons and collections with various services.
"""
from tower import ugettext_lazy as _, ungettext as ngettext
# string replacements in URLs are: url, title, description
class ServiceBase(object):
"""Base class for sharing services."""
@staticmethod
def count_term(count):
"""Render this service's share count with the right term."""
return ngettext('{0} post', '{0} posts', count).format(count)
class DELICIOUS(ServiceBase):
"""see: http://delicious.com/help/savebuttons"""
shortname = 'delicious'
label = _(u'Add to Delicious')
url = (u'http://delicious.com/save?url={url}&title={title}'
'¬es={description}')
class DIGG(ServiceBase):
"""see: http://digg.com/tools/integrate#3"""
shortname = 'digg'
label = _(u'Digg this!')
url = (u'http://digg.com/submit?url={url}&title={title}&bodytext='
'{description}&media=news&topic=tech_news')
@staticmethod
def count_term(count):
return ngettext('{0} digg', '{0} diggs', count).format(count)
class FACEBOOK(ServiceBase):
"""see: http://www.facebook.com/share_options.php"""
shortname = 'facebook'
label = _(u'Post to Facebook')
url = u'http://www.facebook.com/share.php?u={url}&t={title}'
class FRIENDFEED(ServiceBase):
"""see: http://friendfeed.com/embed/link"""
shortname = 'friendfeed'
label = _(u'Share on FriendFeed')
url = u'http://friendfeed.com/?url={url}&title={title}'
@staticmethod
def count_term(count):
return ngettext('{0} share', '{0} shares', count).format(count)
class MYSPACE(ServiceBase):
"""see: http://www.myspace.com/posttomyspace"""
shortname = 'myspace'
label = _(u'Post to MySpace')
url = (u'http://www.myspace.com/index.cfm?fuseaction=postto&t={title}'
'&c={description}&u={url}&l=1')
class TWITTER(ServiceBase):
shortname = 'twitter'
label = _(u'Post to Twitter')
url = u'https://twitter.com/home?status={title}%20{url}'
@staticmethod
def count_term(count):
return ngettext('{0} tweet', '{0} tweets', count).format(count)
SERVICES_LIST = [DIGG, FACEBOOK, DELICIOUS, MYSPACE, FRIENDFEED, TWITTER]
SERVICES = dict((service.shortname, service) for service in SERVICES_LIST)
| {
"content_hash": "3879671af7cfc79a7bb3aea249c0707c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 74,
"avg_line_length": 30.93243243243243,
"alnum_prop": 0.6443861948449104,
"repo_name": "jbalogh/zamboni",
"id": "e0239b48542646efae0a9be1f32fd4e9dc2f9014",
"size": "2289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/sharing/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "JavaScript",
"bytes": "1553612"
},
{
"name": "Python",
"bytes": "2860649"
},
{
"name": "Shell",
"bytes": "8095"
}
],
"symlink_target": ""
} |
import time
import unittest
import config
import node
PANID_INIT = 0xface
COMMISSIONER = 1
LEADER = 2
ROUTER = 3
LEADER_ACTIVE_TIMESTAMP = 10
ROUTER_ACTIVE_TIMESTAMP = 20
ROUTER_PENDING_TIMESTAMP = 30
ROUTER_PENDING_ACTIVE_TIMESTAMP = 25
COMMISSIONER_PENDING_CHANNEL = 20
COMMISSIONER_PENDING_PANID = 0xafce
class Cert_9_2_7_DelayTimer(unittest.TestCase):
def setUp(self):
self.simulator = config.create_default_simulator()
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i, simulator=self.simulator)
self.nodes[COMMISSIONER].set_active_dataset(LEADER_ACTIVE_TIMESTAMP)
self.nodes[COMMISSIONER].set_mode('rsdn')
self.nodes[COMMISSIONER].set_panid(PANID_INIT)
self.nodes[COMMISSIONER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[COMMISSIONER].enable_whitelist()
self.nodes[COMMISSIONER].set_router_selection_jitter(1)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].set_panid(PANID_INIT)
self.nodes[LEADER].set_partition_id(0xffffffff)
self.nodes[LEADER].add_whitelist(self.nodes[COMMISSIONER].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_router_selection_jitter(1)
self.nodes[ROUTER].set_active_dataset(ROUTER_ACTIVE_TIMESTAMP)
self.nodes[ROUTER].set_pending_dataset(ROUTER_PENDING_TIMESTAMP, ROUTER_PENDING_ACTIVE_TIMESTAMP)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].set_panid(PANID_INIT)
self.nodes[ROUTER].set_partition_id(0x1)
self.nodes[ROUTER].enable_whitelist()
self.nodes[ROUTER].set_router_selection_jitter(1)
def tearDown(self):
for node in list(self.nodes.values()):
node.stop()
del self.nodes
del self.simulator
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[COMMISSIONER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.nodes[COMMISSIONER].commissioner_start()
self.simulator.go(3)
self.nodes[ROUTER].start()
self.simulator.go(10)
self.assertEqual(self.nodes[ROUTER].get_state(), 'leader')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.simulator.go(30)
self.assertEqual(self.nodes[COMMISSIONER].get_state(), 'router')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
ipaddrs = self.nodes[ROUTER].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
self.nodes[COMMISSIONER].send_mgmt_pending_set(pending_timestamp=40,
active_timestamp=80,
delay_timer=10000,
channel=COMMISSIONER_PENDING_CHANNEL,
panid=COMMISSIONER_PENDING_PANID)
self.simulator.go(40)
self.assertEqual(self.nodes[LEADER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[COMMISSIONER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[ROUTER].get_panid(), COMMISSIONER_PENDING_PANID)
self.assertEqual(self.nodes[LEADER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.assertEqual(self.nodes[COMMISSIONER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
self.assertEqual(self.nodes[ROUTER].get_channel(), COMMISSIONER_PENDING_CHANNEL)
ipaddrs = self.nodes[ROUTER].get_addrs()
for ipaddr in ipaddrs:
if ipaddr[0:4] != 'fe80':
break
self.assertTrue(self.nodes[LEADER].ping(ipaddr))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "54071af2ead476cb315d77677e217acc",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 105,
"avg_line_length": 38.91588785046729,
"alnum_prop": 0.6332853025936599,
"repo_name": "georgecpr/openthread",
"id": "b15c63c35425910471b1abd74f0f9a6d63d54608",
"size": "5768",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/scripts/thread-cert/Cert_9_2_07_DelayTimer.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "15855"
},
{
"name": "C",
"bytes": "711259"
},
{
"name": "C#",
"bytes": "18077"
},
{
"name": "C++",
"bytes": "3784279"
},
{
"name": "M4",
"bytes": "51449"
},
{
"name": "Makefile",
"bytes": "94951"
},
{
"name": "Python",
"bytes": "1707571"
},
{
"name": "Ruby",
"bytes": "3397"
},
{
"name": "Shell",
"bytes": "39351"
}
],
"symlink_target": ""
} |
import logging
import StringIO
from webkitpy.common.system import outputcapture
from webkitpy.layout_tests.views.metered_stream import MeteredStream
_log = logging.getLogger(__name__)
class Printer(object):
def __init__(self, stream, options=None):
self.stream = stream
self.meter = None
self.options = options
self.num_tests = 0
self.num_completed = 0
self.num_errors = 0
self.num_failures = 0
self.running_tests = []
self.completed_tests = []
if options:
self.configure(options)
def configure(self, options):
self.options = options
if options.timing:
# --timing implies --verbose
options.verbose = max(options.verbose, 1)
log_level = logging.INFO
if options.quiet:
log_level = logging.WARNING
elif options.verbose == 2:
log_level = logging.DEBUG
self.meter = MeteredStream(self.stream, (options.verbose == 2))
handler = logging.StreamHandler(self.stream)
# We constrain the level on the handler rather than on the root
# logger itself. This is probably better because the handler is
# configured and known only to this module, whereas the root logger
# is an object shared (and potentially modified) by many modules.
# Modifying the handler, then, is less intrusive and less likely to
# interfere with modifications made by other modules (e.g. in unit
# tests).
handler.name = __name__
handler.setLevel(log_level)
formatter = logging.Formatter("%(message)s")
handler.setFormatter(formatter)
logger = logging.getLogger()
logger.addHandler(handler)
logger.setLevel(logging.NOTSET)
# Filter out most webkitpy messages.
#
# Messages can be selectively re-enabled for this script by updating
# this method accordingly.
def filter_records(record):
"""Filter out autoinstall and non-third-party webkitpy messages."""
# FIXME: Figure out a way not to use strings here, for example by
# using syntax like webkitpy.test.__name__. We want to be
# sure not to import any non-Python 2.4 code, though, until
# after the version-checking code has executed.
if (record.name.startswith("webkitpy.common.system.autoinstall") or
record.name.startswith("webkitpy.test")):
return True
if record.name.startswith("webkitpy"):
return False
return True
testing_filter = logging.Filter()
testing_filter.filter = filter_records
# Display a message so developers are not mystified as to why
# logging does not work in the unit tests.
_log.info("Suppressing most webkitpy logging while running unit tests.")
handler.addFilter(testing_filter)
if self.options.pass_through:
outputcapture.OutputCapture.stream_wrapper = _CaptureAndPassThroughStream
def write_update(self, msg):
self.meter.write_update(msg)
def print_started_test(self, source, test_name):
self.running_tests.append(test_name)
if len(self.running_tests) > 1:
suffix = ' (+%d)' % (len(self.running_tests) - 1)
else:
suffix = ''
if self.options.verbose:
write = self.meter.write_update
else:
write = self.meter.write_throttled_update
write(self._test_line(self.running_tests[0], suffix))
def print_finished_test(self, source, test_name, test_time, failures, errors):
write = self.meter.writeln
if failures:
lines = failures[0].splitlines() + ['']
suffix = ' failed:'
self.num_failures += 1
elif errors:
lines = errors[0].splitlines() + ['']
suffix = ' erred:'
self.num_errors += 1
else:
suffix = ' passed'
lines = []
if self.options.verbose:
write = self.meter.writeln
else:
write = self.meter.write_throttled_update
if self.options.timing:
suffix += ' %.4fs' % test_time
self.num_completed += 1
if test_name == self.running_tests[0]:
self.completed_tests.insert(0, [test_name, suffix, lines])
else:
self.completed_tests.append([test_name, suffix, lines])
self.running_tests.remove(test_name)
for test_name, msg, lines in self.completed_tests:
if lines:
self.meter.writeln(self._test_line(test_name, msg))
for line in lines:
self.meter.writeln(' ' + line)
else:
write(self._test_line(test_name, msg))
self.completed_tests = []
def _test_line(self, test_name, suffix):
return '[%d/%d] %s%s' % (self.num_completed, self.num_tests, test_name, suffix)
def print_result(self, run_time):
write = self.meter.writeln
write('Ran %d test%s in %.3fs' % (self.num_completed, self.num_completed != 1 and "s" or "", run_time))
if self.num_failures or self.num_errors:
write('FAILED (failures=%d, errors=%d)\n' % (self.num_failures, self.num_errors))
else:
write('\nOK\n')
class _CaptureAndPassThroughStream(object):
def __init__(self, stream):
self._buffer = StringIO.StringIO()
self._stream = stream
def write(self, msg):
self._stream.write(msg)
# Note that we don't want to capture any output generated by the debugger
# because that could cause the results of capture_output() to be invalid.
if not self._message_is_from_pdb():
self._buffer.write(msg)
def _message_is_from_pdb(self):
# We will assume that if the pdb module is in the stack then the output
# is being generated by the python debugger (or the user calling something
# from inside the debugger).
import inspect
import pdb
stack = inspect.stack()
return any(frame[1] == pdb.__file__.replace('.pyc', '.py') for frame in stack)
def flush(self):
self._stream.flush()
def getvalue(self):
return self._buffer.getvalue()
| {
"content_hash": "0c1ffeb466c51eb5776a067dd2d3a3a7",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 111,
"avg_line_length": 36.54545454545455,
"alnum_prop": 0.5931281094527363,
"repo_name": "leighpauls/k2cro4",
"id": "0ec3035b364eaa56afe1c2692d86a70bb42dd171",
"size": "7802",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/WebKit/Tools/Scripts/webkitpy/test/printer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
} |
"""Basic examples for building pandas objects using the Data Commons Pandas API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datacommons_pandas as dcpd
def build_time_series_example():
print("""
# Build a pd.Series of time series for one variable and one place.
$ dcpd.build_time_series('country/CAN', 'Count_WildlandFireEvent')
{}""".format(dcpd.build_time_series('country/CAN', 'Count_WildlandFireEvent')))
print("""
# Build a pd.Series of time series for one variable and one place and optional args.
$ dcpd.build_time_series('country/USA', 'Count_Person', 'CensusPEPSurvey')
{}""".format(
dcpd.build_time_series('country/USA', 'Count_Person', 'CensusPEPSurvey')))
def build_time_series_dataframe_example():
def demonstrate_build_time_series_dataframe(intro_str,
places,
stat_var,
desc_col=False):
arg_str = "{}, '{}'".format(places, stat_var)
if desc_col:
arg_str += ", desc_col=True"
print("""
# {}
$ dcpd.build_time_series_dataframe({})
{}""".format(intro_str, arg_str,
dcpd.build_time_series_dataframe(places, stat_var, desc_col)))
build_time_series_dataframe_params = [{
'intro_str':
'Build a DataFrame of time series for one variable in multiple places.',
'places': ['geoId/33', 'geoId/29', 'country/USA'],
'stat_var':
'Median_Income_Person'
}, {
'intro_str':
'Build a DataFrame of time series with columns sorted in descending order.',
'places': ['country/USA'],
'stat_var':
'Median_Income_Person',
'desc_col':
True
}]
for param_set in build_time_series_dataframe_params:
demonstrate_build_time_series_dataframe(**param_set)
def build_multivariate_dataframe_example():
def demonstrate_build_multivariate_dataframe(intro_str, places, stat_vars):
print("""
# {}
$ dcpd.build_multivariate_dataframe({}, {})
{}""".format(intro_str, places, stat_vars,
dcpd.build_multivariate_dataframe(places, stat_vars)))
build_multivariate_dataframe_params = [{
'intro_str':
'Build a DataFrame of latest observations for multiple variables in multiple places.',
'places': ['geoId/06', 'country/FRA'],
'stat_vars': ['Median_Age_Person', 'Count_Person', 'Count_Household']
}]
for param_set in build_multivariate_dataframe_params:
demonstrate_build_multivariate_dataframe(**param_set)
def expect_err_examples():
print("\n\nExpect 6 errors, starting HERE:")
try:
dcpd.build_time_series_dataframe(['geoId/33'],
['Median_Income_Person', 'Count_Person'])
except ValueError as e:
print("Successfully errored on: ", e)
try:
dcpd.build_time_series_dataframe(24, ['Median_Income_Person'])
except ValueError as e:
print("Successfully errored on: ", e)
try:
dcpd.build_multivariate_dataframe([3],
['Median_Income_Person', 'Count_Person'])
except ValueError as e:
print("Successfully errored on: ", e)
try:
dcpd.build_multivariate_dataframe('country/USA', True)
except ValueError as e:
print("Successfully errored on: ", e)
# If the following two do not error due to the addition of
# Median_Income_Person statistics for NUTS geos, then please
# replace either the places or the StatVar.
try:
dcpd.build_time_series_dataframe(['nuts/HU2', 'nuts/HU22'],
'Median_Income_Person')
except ValueError as e:
print("Successfully errored on: ", e)
try:
dcpd.build_multivariate_dataframe(['nuts/HU2', 'nuts/HU22'],
['Median_Income_Person'])
except ValueError as e:
print("Successfully errored on: ", e)
print("until HERE.")
def main():
build_time_series_example()
build_time_series_dataframe_example()
build_multivariate_dataframe_example()
expect_err_examples()
if __name__ == '__main__':
main()
| {
"content_hash": "3c6936f814a77d4e20c718eeb6f9f5ff",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 96,
"avg_line_length": 33.693548387096776,
"alnum_prop": 0.6213499281953088,
"repo_name": "datacommonsorg/api-python",
"id": "ea09ea1e68ec38839fe6de14dfae267fb256806f",
"size": "4751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "datacommons_pandas/examples/df_builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "55576119"
},
{
"name": "Python",
"bytes": "148544"
},
{
"name": "Shell",
"bytes": "1726"
}
],
"symlink_target": ""
} |
import datetime
from django.contrib.auth.models import User
from django.db import models
from tastypie.utils import now, aware_datetime
class Note(models.Model):
author = models.ForeignKey(User, blank=True, null=True)
title = models.CharField(max_length=100)
slug = models.SlugField()
content = models.TextField(blank=True)
is_active = models.BooleanField(default=True)
created = models.DateTimeField(default=now)
updated = models.DateTimeField(default=now)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.updated = now()
return super(Note, self).save(*args, **kwargs)
def what_time_is_it(self):
return aware_datetime(2010, 4, 1, 0, 48)
def get_absolute_url(self):
return '/some/fake/path/%s/' % self.pk
@property
def my_property(self):
return 'my_property'
| {
"content_hash": "5528b95f1a54291379099d21e89ad028",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 59,
"avg_line_length": 28.03125,
"alnum_prop": 0.6677814938684504,
"repo_name": "strets123/django-tastypie-tweaks",
"id": "1fe679e4e09ae0c41f052b51f57077604a883e03",
"size": "897",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/profilingtests/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "988"
},
{
"name": "Python",
"bytes": "783443"
},
{
"name": "Shell",
"bytes": "1480"
}
],
"symlink_target": ""
} |
import sys
from os.path import join, dirname
from absl import app
import tensorflow as tf
tf.compat.v1.enable_eager_execution()
sys.path.append('../')
import datasets
from util import io as ioutil
def main(_):
config_ini = join(dirname(__file__), '..', 'config', 'dragon_specular.ini')
config = ioutil.read_config(config_ini)
# Make training dataset
dataset_name = config.get('DEFAULT', 'dataset')
Dataset = datasets.get_dataset_class(dataset_name)
dataset = Dataset(config, 'train')
path = dataset.files[1]
ret = dataset._load_data(path)
# Iterate
no_batch = config.getboolean('DEFAULT', 'no_batch')
datapipe = dataset.build_pipeline(no_batch=no_batch)
for batch_i, batch in enumerate(datapipe):
from IPython import embed; embed()
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "c68d8d06312b503c8a2d1732ac96898c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 79,
"avg_line_length": 25.515151515151516,
"alnum_prop": 0.668646080760095,
"repo_name": "google/neural-light-transport",
"id": "7eab2e9233141229915ea54e3a9ad3980ec8ae2f",
"size": "1418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nlt/debug/dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "126786"
}
],
"symlink_target": ""
} |
"""
URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.views.generic import RedirectView
from . import views
app_name = 'squads'
urlpatterns = [
url(r'^$', views.main, name='main'),
url(r'^registration/$', views.registration, name='registration'),
url(r'^profile/$', views.profile, name='profile'),
url(r'^leave/$', views.leave, name='leave'),
url(r'^remove/$', views.remove, name='remove'),
url(r'^join/(?P<squad_id>\d+)/(?P<code>\w+)/$', views.join, name='join'),
]
| {
"content_hash": "1a03b04bcda251ef35d6b917ebedd6a9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 35.58064516129032,
"alnum_prop": 0.6672710788757933,
"repo_name": "Flyingfox646/flyingfox",
"id": "393867aad96c8901a95d14916e1d05eafacef1d1",
"size": "1103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/squads/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1515"
},
{
"name": "CSS",
"bytes": "103959"
},
{
"name": "HTML",
"bytes": "317380"
},
{
"name": "JavaScript",
"bytes": "17458"
},
{
"name": "Python",
"bytes": "415174"
},
{
"name": "Shell",
"bytes": "1759"
}
],
"symlink_target": ""
} |
"""Base classes for our unit tests.
Allows overriding of flags for use of fakes, and some black magic for
inline callbacks.
"""
import functools
import unittest
import uuid
import mox
import nose.plugins.skip
from oslo.config import cfg
import stubout
from cinder import flags
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
from cinder import service
from cinder import tests
from cinder.tests import fake_flags
test_opts = [
cfg.StrOpt('sqlite_clean_db',
default='clean.sqlite',
help='File name of clean sqlite db'),
cfg.BoolOpt('fake_tests',
default=True,
help='should we use everything for testing'), ]
FLAGS = flags.FLAGS
FLAGS.register_opts(test_opts)
LOG = logging.getLogger(__name__)
class skip_test(object):
"""Decorator that skips a test."""
# TODO(tr3buchet): remember forever what comstud did here
def __init__(self, msg):
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
raise nose.SkipTest(self.message)
return _skipper
class skip_if(object):
"""Decorator that skips a test if condition is true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
class skip_unless(object):
"""Decorator that skips a test if condition is not true."""
def __init__(self, condition, msg):
self.condition = condition
self.message = msg
def __call__(self, func):
@functools.wraps(func)
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if not self.condition:
raise nose.SkipTest(self.message)
func(*args, **kw)
return _skipper
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
def _skipper(*args, **kw):
"""Wrapped skipper function."""
if FLAGS.fake_tests:
raise unittest.SkipTest('Test cannot be run in fake mode')
else:
return func(*args, **kw)
return _skipper
class TestingException(Exception):
pass
class TestCase(unittest.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
"""Run before each test method to initialize test environment."""
super(TestCase, self).setUp()
fake_flags.set_defaults(FLAGS)
flags.parse_args([], default_config_files=[])
# NOTE(vish): We need a better method for creating fixtures for tests
# now that we have some required db setup for the system
# to work properly.
self.start = timeutils.utcnow()
tests.reset_db()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = stubout.StubOutForTesting()
self.injected = []
self._services = []
FLAGS.set_override('fatal_exception_format_errors', True)
def tearDown(self):
"""Runs after each test method to tear down test environment."""
try:
self.mox.UnsetStubs()
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
super(TestCase, self).tearDown()
finally:
# Reset any overridden flags
FLAGS.reset()
# Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
# Kill any services
for x in self._services:
try:
x.kill()
except Exception:
pass
# Delete attributes that don't start with _ so they don't pin
# memory around unnecessarily for the duration of the test
# suite
for key in [k for k in self.__dict__.keys() if k[0] != '_']:
del self.__dict__[key]
def flags(self, **kw):
"""Override flag variables for a test."""
for k, v in kw.iteritems():
FLAGS.set_override(k, v)
def start_service(self, name, host=None, **kwargs):
host = host and host or uuid.uuid4().hex
kwargs.setdefault('host', host)
kwargs.setdefault('binary', 'cinder-%s' % name)
svc = service.Service.create(**kwargs)
svc.start()
self._services.append(svc)
return svc
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
This is a 'deep' match in the sense that it handles nested
dictionaries appropriately.
NOTE:
If you don't care (or don't know) a given value, you can specify
the string DONTCARE as the value. This will cause that dict-item
to be skipped.
"""
def raise_assertion(msg):
d1str = str(d1)
d2str = str(d2)
base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s '
'd2: %(d2str)s' % locals())
raise AssertionError(base_msg)
d1keys = set(d1.keys())
d2keys = set(d2.keys())
if d1keys != d2keys:
d1only = d1keys - d2keys
d2only = d2keys - d1keys
raise_assertion('Keys in d1 and not d2: %(d1only)s. '
'Keys in d2 and not d1: %(d2only)s' % locals())
for key in d1keys:
d1value = d1[key]
d2value = d2[key]
try:
error = abs(float(d1value) - float(d2value))
within_tolerance = error <= tolerance
except (ValueError, TypeError):
# If both values aren't convertable to float, just ignore
# ValueError if arg is a str, TypeError if it's something else
# (like None)
within_tolerance = False
if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
self.assertDictMatch(d1value, d2value)
elif 'DONTCARE' in (d1value, d2value):
continue
elif approx_equal and within_tolerance:
continue
elif d1value != d2value:
raise_assertion("d1['%(key)s']=%(d1value)s != "
"d2['%(key)s']=%(d2value)s" % locals())
def assertDictListMatch(self, L1, L2, approx_equal=False, tolerance=0.001):
"""Assert a list of dicts are equivalent."""
def raise_assertion(msg):
L1str = str(L1)
L2str = str(L2)
base_msg = ('List of dictionaries do not match: %(msg)s '
'L1: %(L1str)s L2: %(L2str)s' % locals())
raise AssertionError(base_msg)
L1count = len(L1)
L2count = len(L2)
if L1count != L2count:
raise_assertion('Length mismatch: len(L1)=%(L1count)d != '
'len(L2)=%(L2count)d' % locals())
for d1, d2 in zip(L1, L2):
self.assertDictMatch(d1, d2, approx_equal=approx_equal,
tolerance=tolerance)
def assertSubDictMatch(self, sub_dict, super_dict):
"""Assert a sub_dict is subset of super_dict."""
self.assertTrue(set(sub_dict.keys()).issubset(set(super_dict.keys())))
for k, sub_value in sub_dict.items():
super_value = super_dict[k]
if isinstance(sub_value, dict):
self.assertSubDictMatch(sub_value, super_value)
elif 'DONTCARE' in (sub_value, super_value):
continue
else:
self.assertEqual(sub_value, super_value)
def assertIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' in 'b'"""
try:
f = super(TestCase, self).assertIn
except AttributeError:
self.assertTrue(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
def assertNotIn(self, a, b, *args, **kwargs):
"""Python < v2.7 compatibility. Assert 'a' NOT in 'b'"""
try:
f = super(TestCase, self).assertNotIn
except AttributeError:
self.assertFalse(a in b, *args, **kwargs)
else:
f(a, b, *args, **kwargs)
| {
"content_hash": "8df4b38515dd0450e1baefc1b2af3a2b",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 79,
"avg_line_length": 33.059925093632955,
"alnum_prop": 0.5521694800045316,
"repo_name": "tomasdubec/openstack-cinder",
"id": "967eadca37d7e3af03bcfd6ac0827d3e81d9b9e8",
"size": "9604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from unittest import TestCase
import msal
from office365.graph_client import GraphClient
from tests import load_settings
def acquire_token_by_username_password():
settings = load_settings()
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings.get('default', 'tenant'))
app = msal.PublicClientApplication(
authority=authority_url,
client_id=settings.get('client_credentials', 'client_id')
)
result = app.acquire_token_by_username_password(username=settings.get('user_credentials', "username"),
password=settings.get('user_credentials', "password"),
scopes=["https://graph.microsoft.com/.default"])
return result
def acquire_token_by_client_credentials():
settings = load_settings()
authority_url = 'https://login.microsoftonline.com/{0}'.format(settings.get('default', 'tenant'))
app = msal.ConfidentialClientApplication(
authority=authority_url,
client_id=settings.get('client_credentials', 'client_id'),
client_credential=settings.get('client_credentials', 'client_secret')
)
return app.acquire_token_for_client(scopes=["https://graph.microsoft.com/.default"])
class GraphTestCase(TestCase):
"""Microsoft Graph specific test case base class"""
client = None # type: GraphClient
@classmethod
def setUpClass(cls):
cls.client = GraphClient(acquire_token_by_username_password)
| {
"content_hash": "7571baab845d6bcca958faeff84931a0",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 106,
"avg_line_length": 38.84615384615385,
"alnum_prop": 0.6646864686468646,
"repo_name": "vgrem/Office365-REST-Python-Client",
"id": "c8da5513d71f78f1b0cbe1692172ffb21a62b788",
"size": "1515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/graph_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1659292"
}
],
"symlink_target": ""
} |
import datetime
import glob
import logging
import math
import os
import re
import time
import warnings
from typing import Optional, Union, List, Dict, Tuple, Any, Type
import torch
from torch.cuda import amp
from torch.nn.utils import clip_grad_norm_
import torch.distributed as dist
from torch.cuda.amp.grad_scaler import OptState
from allennlp.common.checks import ConfigurationError, check_for_gpu
from allennlp.common import util as common_util, Tqdm, Lazy
from allennlp.common.file_utils import hardlink_or_copy
from allennlp.data.data_loaders.data_loader import DataLoader, TensorDict
from allennlp.models.model import Model
from allennlp.nn.parallel import DdpAccelerator, DdpWrappedModel, TorchDdpAccelerator
from allennlp.nn.util import dist_reduce_sum
from allennlp.training.callbacks import ConsoleLoggerCallback
from allennlp.training.callbacks.confidence_checks import ConfidenceChecksCallback
from allennlp.training.callbacks.backward import MixedPrecisionBackwardCallback
from allennlp.training.checkpointer import Checkpointer
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
from allennlp.training.metric_tracker import MetricTracker
from allennlp.training.momentum_schedulers.momentum_scheduler import MomentumScheduler
from allennlp.training.moving_average import MovingAverage
from allennlp.training.optimizers import Optimizer
from allennlp.training.trainer import Trainer, TrainerCheckpoint
from allennlp.training.callbacks import TrainerCallback
from allennlp.training import util as training_util
logger = logging.getLogger(__name__)
@Trainer.register("gradient_descent", constructor="from_partial_objects")
class GradientDescentTrainer(Trainer):
"""
A trainer for doing supervised learning with gradient descent. It just takes a labeled dataset
and a `DataLoader`, and uses the supplied `Optimizer` to learn the weights for your model over
some fixed number of epochs. You can also pass in a validation data_loader and enable early
stopping. There are many other bells and whistles as well.
Registered as a `Trainer` with the name "gradient_descent" (and is also the default `Trainer`).
The constructor that is registered is [`from_partial_objects`](#from_partial_objects) -
see the arguments to that function for the exact keys that should be used, if you are using
a configuration file. They largely match the arguments to `__init__`, and we don't repeat their
docstrings in `from_partial_objects`.
[0]: https://tinyurl.com/y5mv44fw
# Parameters
model : `Model`, required.
An AllenNLP model to be optimized. Pytorch Modules can also be optimized if
their `forward` method returns a dictionary with a "loss" key, containing a
scalar tensor representing the loss function to be optimized.
If you are training your model using GPUs, your model should already be
on the correct device. (If you are using our `train` command this will be
handled for you.)
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
optimizer : `torch.nn.Optimizer`, required.
An instance of a Pytorch Optimizer, instantiated with the parameters of the
model to be optimized.
data_loader : `DataLoader`, required.
A `DataLoader` containing your `Dataset`, yielding padded indexed batches.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
patience : `Optional[int] > 0`, optional (default=`None`)
Number of epochs to be patient before early stopping: the training is stopped
after `patience` epochs with no improvement. If given, it must be `> 0`.
If None, early stopping is disabled.
validation_metric : `Union[str, List[str]]`, optional (default=`"-loss"`)
Validation metric to measure for whether to stop training using patience
and whether to serialize an `is_best` model each epoch. The metric name
must be prepended with either "+" or "-", which specifies whether the metric
is an increasing or decreasing function. If you specify more than one metric,
the metrics will be summed to make the `is_best` decision.
validation_data_loader : `DataLoader`, optional (default=`None`)
A `DataLoader` to use for the validation set. If `None`, then
use the training `DataLoader` with the validation data.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
num_epochs : `int`, optional (default = `20`)
Number of training epochs.
serialization_dir : `str`, optional (default=`None`)
Path to directory for saving and loading model files. Models will not be saved if
this parameter is not passed.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
checkpointer : `Checkpointer`, optional (default=`None`)
A `Checkpointer` is responsible for periodically saving model weights. If none is given
here, we will construct one with default parameters.
cuda_device : `Optional[Union[int, torch.device]]`, optional (default = `None`)
An integer or `torch.device` specifying the CUDA device to use for this process.
If -1, the CPU is used. If `None` and you have a GPU available, that GPU will be used.
!!! Note
If you *don't* intend to use a GPU, but you have one available, you'll need
to explicitly set `cuda_device=-1`.
!!! Note
If you intend to use a GPU, your model already needs to be on the correct device,
which you can do with `model = model.cuda()`.
!!! Note
Data parallelism is controlled at the allennlp train level, so each trainer will have a single GPU.
grad_norm : `Union[float, bool]`, optional (default = `False`)
If a float, gradient norms will be rescaled to have a maximum of this value.
If `True`, the gradient norms will be calculated and passed through to any `TrainerCallbacks`,
but won't be rescaled.
If `False`, gradient norms will not be calculated or rescaled.
grad_clipping : `float`, optional (default = `None`)
If provided, gradients will be clipped `during the backward pass` to have an (absolute)
maximum of this value. If you are getting `NaNs` in your gradients during training
that are not solved by using `grad_norm`, you may need this.
learning_rate_scheduler : `LearningRateScheduler`, optional (default = `None`)
If specified, the learning rate will be decayed with respect to
this schedule at the end of each epoch (or batch, if the scheduler implements
the `step_batch` method). If you use `torch.optim.lr_scheduler.ReduceLROnPlateau`,
this will use the `validation_metric` provided to determine if learning has plateaued.
To support updating the learning rate on every batch, this can optionally implement
`step_batch(batch_num_total)` which updates the learning rate given the batch number.
momentum_scheduler : `MomentumScheduler`, optional (default = `None`)
If specified, the momentum will be updated at the end of each batch or epoch
according to the schedule.
moving_average : `MovingAverage`, optional, (default = `None`)
If provided, we will maintain moving averages for all parameters. During training, we
employ a shadow variable for each parameter, which maintains the moving average. During
evaluation, we backup the original parameters and assign the moving averages to corresponding
parameters. Be careful that when saving the checkpoint, we will save the moving averages of
parameters. This is necessary because we want the saved model to perform as well as the validated
model if we load it later. But this may cause problems if you restart the training from checkpoint.
callbacks : `List[TrainerCallback]`, optional (default = `None`)
A list of callbacks that can be called at certain events: e.g. each batch, epoch, and at the start
and end of training, etc.
distributed : `bool`, optional, (default = `False`)
If set, PyTorch's `DistributedDataParallel` is used to train the model in multiple GPUs. This also
requires `world_size` to be greater than 1.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately (you need a top-level "distributed" key, next to
the "trainer" entry, that specifies a list of "cuda_devices").
local_rank : `int`, optional, (default = `0`)
This is the unique identifier of the `Trainer` in a distributed process group. The GPU device id is
used as the rank.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
world_size : `int`, (default = `1`)
The number of `Trainer` workers participating in the distributed training.
In a typical AllenNLP configuration file, this parameter does not get an entry under the
"trainer", it gets constructed separately.
num_gradient_accumulation_steps : `int`, optional, (default = `1`)
Gradients are accumulated for the given number of steps before doing an optimizer step. This can
be useful to accommodate batches that are larger than the RAM size. Refer [Thomas Wolf's
post][0] for details on Gradient Accumulation.
use_amp : `bool`, optional, (default = `False`)
If `True`, we'll train using [Automatic Mixed Precision](https://pytorch.org/docs/stable/amp.html).
enable_default_callbacks : `bool`, optional (default = `True`)
When `True`, the [`DEFAULT_CALLBACKS`](#default_callbacks) will be used in
addition to any other callbacks listed in the `callbacks` parameter.
When set to `False`, `DEFAULT_CALLBACKS` are not used.
run_confidence_checks : `bool`, optional (default = `True`)
Determines whether model confidence checks, such as
[`NormalizationBiasVerification`](../../confidence_checks/normalization_bias_verification/),
are run.
run_sanity_checks : `bool`, optional (default = `True`)
This parameter is deprecated. Please use `run_confidence_checks` instead.
grad_scaling : `bool`, optional (default = `True`)
When `use_amp` is `True`, this determines whether or not to use a [`GradScaler`]
(https://pytorch.org/docs/stable/amp.html?highlight=gradscaler#torch.cuda.amp.GradScaler).
!!! Note
This parameter is ignored when `use_amp` is `False`.
ddp_wrapped_model : `Optional[DdpWrappedModel]`, optional (default = `None`)
The `model` wrapped with a `DdpAccelerator` for distributed training.
!!! Note
This is required for distributed training.
"""
def __init__(
self,
model: Model,
optimizer: torch.optim.Optimizer,
data_loader: DataLoader,
patience: Optional[int] = None,
validation_metric: Union[str, List[str]] = "-loss",
validation_data_loader: DataLoader = None,
num_epochs: int = 20,
serialization_dir: Optional[Union[str, os.PathLike]] = None,
checkpointer: Optional[Checkpointer] = None,
cuda_device: Optional[Union[int, torch.device]] = None,
grad_norm: Union[float, bool] = False,
grad_clipping: Optional[float] = None,
learning_rate_scheduler: Optional[LearningRateScheduler] = None,
momentum_scheduler: Optional[MomentumScheduler] = None,
moving_average: Optional[MovingAverage] = None,
callbacks: List[TrainerCallback] = None,
distributed: bool = False,
local_rank: int = 0,
world_size: int = 1,
num_gradient_accumulation_steps: int = 1,
use_amp: bool = False,
enable_default_callbacks: bool = True,
run_confidence_checks: bool = True,
grad_scaling: bool = True,
ddp_wrapped_model: Optional[DdpWrappedModel] = None,
**kwargs,
) -> None:
super().__init__(
serialization_dir=serialization_dir,
cuda_device=cuda_device,
distributed=distributed,
local_rank=local_rank,
world_size=world_size,
)
if "run_sanity_checks" in kwargs:
warnings.warn(
"'run_sanity_checks' is deprecated, please use 'run_confidence_checks' instead.",
DeprecationWarning,
)
run_confidence_checks = kwargs["run_sanity_checks"]
# I am not calling move_to_gpu here, because if the model is
# not already on the GPU then the optimizer is going to be wrong.
self.model = model
self.data_loader = data_loader
self.data_loader.set_target_device(self.cuda_device)
self._validation_data_loader = validation_data_loader
if self._validation_data_loader is not None:
self._validation_data_loader.set_target_device(self.cuda_device)
self.optimizer = optimizer
if patience is None: # no early stopping
if validation_data_loader is not None:
logger.warning(
"You provided a validation dataset but patience was set to None, "
"meaning that early stopping is disabled"
)
elif (not isinstance(patience, int)) or patience <= 0:
raise ConfigurationError(
'{} is an invalid value for "patience": it must be a positive integer '
"or None (if you want to disable early stopping)".format(patience)
)
# For tracking is_best_so_far and should_stop_early
self._metric_tracker = MetricTracker(validation_metric, patience)
self._num_epochs = num_epochs
self._checkpointer: Optional[Checkpointer] = checkpointer
self._grad_norm = grad_norm
self._grad_clipping = grad_clipping
self._learning_rate_scheduler = learning_rate_scheduler
self._momentum_scheduler = momentum_scheduler
self._moving_average = moving_average
self._callbacks = callbacks or []
default_callbacks = list(DEFAULT_CALLBACKS) if enable_default_callbacks else []
if run_confidence_checks:
default_callbacks.append(ConfidenceChecksCallback)
for callback_cls in default_callbacks:
for callback in self._callbacks:
if callback.__class__ == callback_cls:
break
else:
self._callbacks.append(callback_cls(self._serialization_dir))
self._num_gradient_accumulation_steps = num_gradient_accumulation_steps
self._ddp_wrapped_model = ddp_wrapped_model
if distributed:
# The model needs to be wrapped before initializing the optimizer,
# so at this point it's too late to wrap the model.
if ddp_wrapped_model is None:
raise ValueError("trainer requires 'ddp_wrapped_model' for distributed training")
# Make sure checkpointer knows if we're working with a sharded model.
if self._checkpointer is not None:
self._checkpointer.state_is_sharded = ddp_wrapped_model.is_sharded
# Enable automatic mixed precision training.
self._scaler: Optional[amp.GradScaler] = None
self._use_amp = use_amp
if self._use_amp:
if self.cuda_device == torch.device("cpu"):
raise ValueError("Using AMP requires a cuda device")
if grad_scaling:
if self._ddp_wrapped_model is None:
self._scaler = amp.GradScaler()
else:
self._scaler = self._ddp_wrapped_model.init_grad_scaler()
# training state management
self._epochs_completed: int = 0
self._start_after_epochs_completed: int = 0
self._batches_in_epoch_completed: int = 0
self._start_after_batches_in_epoch_completed: int = 0
self._best_model_filename: Optional[str] = None
self._should_validate_this_epoch: bool = True
# This is a kind of training state, but it is not serialized with the trainer state, because we can
# re-create it with `epochs_completed` and `batches_in_epoch_completed`.
self._total_batches_completed: int = 0
@property
def _pytorch_model(self):
if self._ddp_wrapped_model is None:
return self.model
return self._ddp_wrapped_model.model
def clip_gradient(self):
"""
Performs gradient clipping.
If the model is in mixed precision training, we would first unscale the gradient.
"""
if self._grad_clipping is not None:
# 1. We have to unscale the gradient before clipping
if self._scaler is not None:
optimizer_state = self._scaler._per_optimizer_states[id(self.optimizer)]
# 2. The `unscale_` shouldn't be performed more than once per optimizer per step call,
# so we only perform `unscale_` if it has not already been called.
if optimizer_state["stage"] is not OptState.UNSCALED:
self._scaler.unscale_(self.optimizer)
torch.nn.utils.clip_grad_value_(
[p for p in self.model.parameters() if p.grad is not None], self._grad_clipping
)
def rescale_gradients(self) -> Optional[float]:
"""
Performs gradient rescaling. Is a no-op if gradient rescaling is not enabled.
Returns the norm of the gradients if `grad_norm` is `True` or a `float`,
otherwise returns `None`.
"""
if not isinstance(self._grad_norm, bool):
if self._scaler is not None:
# Need to first unscale gradients in order to clip as usual.
self._scaler.unscale_(self.optimizer)
# Sometimes logic for clipping has to implemented within the model, like
# with FairScale's FullyShardedDataParallel.
if self._ddp_wrapped_model is not None:
return self._ddp_wrapped_model.clip_grad_norm_(self._grad_norm).item()
else:
parameters_to_clip = [p for p in self.model.parameters() if p.grad is not None]
return clip_grad_norm_(parameters_to_clip, self._grad_norm).item()
elif self._grad_norm:
parameters_to_clip = [p for p in self.model.parameters() if p.grad is not None]
return torch.norm(
torch.stack([torch.norm(p.grad.detach()) for p in parameters_to_clip])
).item()
else:
return None
def batch_outputs(self, batch: TensorDict, for_training: bool) -> Dict[str, torch.Tensor]:
"""
Does a forward pass on the given batch and returns the output dictionary that the model
returns, after adding any specified regularization penalty to the loss (if training).
"""
output_dict = self._pytorch_model(**batch)
if for_training:
try:
assert "loss" in output_dict
regularization_penalty = self.model.get_regularization_penalty()
if regularization_penalty is not None:
output_dict["reg_loss"] = regularization_penalty
output_dict["loss"] += regularization_penalty
except AssertionError:
if for_training:
raise RuntimeError(
"The model you are trying to optimize does not contain a"
" 'loss' key in the output of model.forward(inputs)."
)
return output_dict
def _train_epoch(self, epoch: int) -> Dict[str, float]:
"""
Trains one epoch and returns metrics.
"""
logger.info("Epoch %d/%d", epoch, self._num_epochs - 1)
cpu_memory_usage = []
for worker, memory in common_util.peak_cpu_memory().items():
cpu_memory_usage.append((worker, memory))
logger.info(f"Worker {worker} memory usage: {common_util.format_size(memory)}")
gpu_memory_usage = []
for gpu, memory in common_util.peak_gpu_memory().items():
gpu_memory_usage.append((gpu, memory))
logger.info(f"GPU {gpu} memory usage: {common_util.format_size(memory)}")
regularization_penalty = self.model.get_regularization_penalty()
train_loss = 0.0
train_reg_loss = None if regularization_penalty is None else 0.0
batch_reg_loss = None if regularization_penalty is None else 0.0
# Set the model to "train" mode.
self._pytorch_model.train()
# Get tqdm for the training batches
batch_generator = iter(self.data_loader)
batch_group_generator = common_util.lazy_groups_of(
batch_generator, self._num_gradient_accumulation_steps
)
logger.info("Training")
num_training_batches: Union[int, float]
try:
len_data_loader = len(self.data_loader)
num_training_batches = math.ceil(
len_data_loader / self._num_gradient_accumulation_steps
)
except TypeError:
num_training_batches = float("inf")
# Having multiple tqdm bars in case of distributed training will be a mess. Hence only the primary's
# progress is shown
if self._primary:
batch_group_generator_tqdm = Tqdm.tqdm(
batch_group_generator, total=num_training_batches
)
else:
batch_group_generator_tqdm = batch_group_generator
done_early = False
for batch_group in batch_group_generator_tqdm:
if done_early:
break
if self._epochs_completed < self._start_after_epochs_completed or (
self._epochs_completed == self._start_after_epochs_completed
and self._batches_in_epoch_completed < self._start_after_batches_in_epoch_completed
):
self._batches_in_epoch_completed += 1
self._total_batches_completed += 1
continue
self.optimizer.zero_grad()
batch_loss = 0.0
batch_group_outputs = []
for batch in batch_group:
if self._distributed:
# Check whether the other workers have stopped already (due to differing amounts of
# data in each). If so, we can't proceed because we would hang when we hit the
# barrier implicit in Model.forward. We use a IntTensor instead a BoolTensor
# here because NCCL process groups apparently don't support BoolTensor.
done = torch.tensor(0, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
if done.item() > 0:
done_early = True
logger.warning(
f"Worker {torch.distributed.get_rank()} finishing training early! "
"This implies that there is an imbalance in your training "
"data across the workers and that some amount of it will be "
"ignored. A small amount of this is fine, but a major imbalance "
"should be avoided. Note: This warning will appear unless your "
"data is perfectly balanced."
)
break
with amp.autocast(self._use_amp):
batch_outputs = self.batch_outputs(batch, for_training=True)
batch_group_outputs.append(batch_outputs)
loss = batch_outputs["loss"]
reg_loss = batch_outputs.get("reg_loss")
if torch.isnan(loss):
raise ValueError("nan loss encountered")
loss = loss / len(batch_group)
batch_loss += loss.item()
if reg_loss is not None:
reg_loss = reg_loss / len(batch_group)
batch_reg_loss = reg_loss.item()
train_reg_loss += batch_reg_loss # type: ignore
backward_called = False
for callback in self._callbacks:
backward_called |= callback.on_backward(self, batch_outputs, backward_called)
if not backward_called:
if self._scaler is not None:
MixedPrecisionBackwardCallback(self._serialization_dir).on_backward(
self, batch_outputs, backward_called
)
else:
loss.backward()
if len(batch_group_outputs) <= 0:
continue
train_loss += batch_loss
batch_grad_norm = self.rescale_gradients()
self.clip_gradient()
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step_batch(self._total_batches_completed + 1)
if self._momentum_scheduler:
self._momentum_scheduler.step_batch(self._total_batches_completed + 1)
if self._scaler is not None:
self._scaler.step(self.optimizer)
self._scaler.update()
else:
self.optimizer.step()
# Update moving averages
if self._moving_average is not None:
self._moving_average.apply(self._total_batches_completed + 1)
self._batches_in_epoch_completed += 1
self._total_batches_completed += 1
# Update the description with the latest metrics
metrics = training_util.get_metrics(
self.model,
train_loss,
train_reg_loss,
batch_loss,
batch_reg_loss,
self._batches_in_epoch_completed,
)
for callback in self._callbacks:
callback.on_batch(
self,
batch_group,
batch_group_outputs,
metrics,
epoch,
self._batches_in_epoch_completed,
is_training=True,
is_primary=self._primary,
batch_grad_norm=batch_grad_norm,
)
if self._primary:
# Updating tqdm only for the primary as the trainers wouldn't have one
description = training_util.description_from_metrics(metrics)
batch_group_generator_tqdm.set_description(description, refresh=False)
if self._checkpointer is not None:
self._checkpointer.maybe_save_checkpoint(
self, self._epochs_completed, self._batches_in_epoch_completed
)
if self._distributed and not done_early:
logger.info(
f"Worker {torch.distributed.get_rank()} completed its entire epoch (training)."
)
# Indicate that we're done so that any workers that have remaining data stop the epoch early.
done = torch.tensor(1, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
assert done.item()
# Let all workers finish their epoch before computing
# the final statistics for the epoch.
if self._distributed:
dist.barrier()
if self._epochs_completed < self._start_after_epochs_completed or (
self._epochs_completed == self._start_after_epochs_completed
and self._batches_in_epoch_completed - 1 < self._start_after_batches_in_epoch_completed
):
metrics = {}
else:
train_loss = dist_reduce_sum(train_loss)
num_batches = dist_reduce_sum(self._batches_in_epoch_completed)
if train_reg_loss is not None:
train_reg_loss = dist_reduce_sum(train_reg_loss)
metrics = training_util.get_metrics(
self.model,
train_loss,
train_reg_loss,
batch_loss=None,
batch_reg_loss=None,
num_batches=num_batches,
reset=True,
)
for (worker, memory) in cpu_memory_usage:
metrics["worker_" + str(worker) + "_memory_MB"] = memory / (1024 * 1024)
for (gpu_num, memory) in gpu_memory_usage:
metrics["gpu_" + str(gpu_num) + "_memory_MB"] = memory / (1024 * 1024)
return metrics
def _validation_loss(self, epoch: int) -> Tuple[float, Optional[float], int]:
"""
Computes the validation loss. Returns it and the number of batches.
"""
logger.info("Validating")
self._pytorch_model.eval()
# Replace parameter values with the shadow values from the moving averages.
if self._moving_average is not None:
self._moving_average.assign_average_value()
try:
if self._validation_data_loader is not None:
validation_data_loader = self._validation_data_loader
else:
raise ConfigurationError(
"Validation results cannot be calculated without a validation_data_loader"
)
regularization_penalty = self.model.get_regularization_penalty()
# Having multiple tqdm bars in case of distributed training will be a mess. Hence only the primary's
# progress is shown
if self._primary:
val_generator_tqdm = Tqdm.tqdm(validation_data_loader)
else:
val_generator_tqdm = validation_data_loader
batches_this_epoch = 0
val_loss = 0.0
val_batch_loss = 0.0
val_reg_loss = None if regularization_penalty is None else 0.0
val_batch_reg_loss = None if regularization_penalty is None else 0.0
done_early = False
for batch in val_generator_tqdm:
if self._distributed:
# Check whether the other workers have stopped already (due to differing amounts of
# data in each). If so, we can't proceed because we would hang when we hit the
# barrier implicit in Model.forward. We use a IntTensor instead a BoolTensor
# here because NCCL process groups apparently don't support BoolTensor.
done = torch.tensor(0, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
if done.item() > 0:
done_early = True
logger.warning(
f"Worker {torch.distributed.get_rank()} finishing validation early! "
"This implies that there is an imbalance in your validation "
"data across the workers and that some amount of it will be "
"ignored. A small amount of this is fine, but a major imbalance "
"should be avoided. Note: This warning will appear unless your "
"data is perfectly balanced."
)
break
with amp.autocast(self._use_amp):
batch_outputs = self.batch_outputs(batch, for_training=False)
loss = batch_outputs.get("loss")
reg_loss = batch_outputs.get("reg_loss")
if loss is not None:
# You shouldn't necessarily have to compute a loss for validation, so we allow for
# `loss` to be None. We need to be careful, though - `batches_this_epoch` is
# currently only used as the divisor for the loss function, so we can safely only
# count those batches for which we actually have a loss. If this variable ever
# gets used for something else, we might need to change things around a bit.
batches_this_epoch += 1
val_batch_loss = loss.item()
val_loss += val_batch_loss
if reg_loss is not None:
val_batch_reg_loss = reg_loss.item()
val_reg_loss += val_batch_reg_loss # type: ignore
# Update the description with the latest metrics
val_metrics = training_util.get_metrics(
self.model,
val_loss,
val_reg_loss,
val_batch_loss,
val_batch_reg_loss,
batches_this_epoch,
)
description = training_util.description_from_metrics(val_metrics)
if self._primary:
val_generator_tqdm.set_description(description, refresh=False)
for callback in self._callbacks:
callback.on_batch(
self,
[batch],
[batch_outputs],
val_metrics,
epoch,
batches_this_epoch,
is_training=False,
is_primary=self._primary,
)
if self._distributed and not done_early:
logger.warning(
f"Worker {torch.distributed.get_rank()} completed its entire epoch (validation)."
)
# Indicate that we're done so that any workers that have remaining data stop validation early.
done = torch.tensor(1, device=self.cuda_device)
torch.distributed.all_reduce(done, torch.distributed.ReduceOp.SUM)
assert done.item()
return val_loss, val_reg_loss, batches_this_epoch
finally:
# Now restore the original parameter values.
if self._moving_average is not None:
self._moving_average.restore()
def train(self) -> Dict[str, Any]:
"""
Trains the supplied model with the supplied parameters.
"""
try:
self._maybe_restore_checkpoint()
except RuntimeError as e:
configuration_error = ConfigurationError(
f"Could not recover training from the checkpoint in {self._serialization_dir}. "
"Did you mean to output to a different serialization directory or delete the "
"existing serialization directory?"
)
configuration_error.__cause__ = e
raise configuration_error
# Callbacks get their `on_start` call even when we're starting from a checkpoint.
for callback in self._callbacks:
callback.on_start(self, is_primary=self._primary)
# Set default values in case of failure
epoch = None
metrics = None
try:
metrics, epoch = self._try_train()
return metrics
finally:
if self._primary:
self._finalize_best_model_state()
for callback in self._callbacks:
callback.on_end(self, metrics=metrics, epoch=epoch, is_primary=self._primary)
def _try_train(self) -> Tuple[Dict[str, Any], int]:
logger.info("Beginning training.")
val_metrics: Dict[str, float] = {}
metrics: Dict[str, Any] = {}
training_start_time = None
metrics["best_epoch"] = self._metric_tracker.best_epoch
for key, value in self._metric_tracker.best_epoch_metrics.items():
metrics["best_validation_" + key] = value
for epoch in range(self._num_epochs):
epoch_start_time = time.time()
train_metrics = self._train_epoch(epoch)
if self._epochs_completed < self._start_after_epochs_completed:
# We're still catching up with the checkpoint, so we do nothing.
# Note that we have to call _train_epoch() even when we know the epoch is skipped. We have to
# read from the data loader, because the data loader and dataset readers might use randomness,
# and we have to make sure we consume exactly the same instances in exactly the same way every
# time we train, even when starting from a checkpoint, so that we update the randomness
# generators in the same way each time.
self._epochs_completed += 1
self._batches_in_epoch_completed = 0
continue
if training_start_time is None:
training_start_time = epoch_start_time
# get peak of memory usage
for key, value in train_metrics.items():
if key.startswith("gpu_") and key.endswith("_memory_MB"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
elif key.startswith("worker_") and key.endswith("_memory_MB"):
metrics["peak_" + key] = max(metrics.get("peak_" + key, 0), value)
this_epoch_val_metric: Optional[float] = None
if self._should_validate_this_epoch and self._validation_data_loader is not None:
with torch.no_grad():
# We have a validation set, so compute all the metrics on it.
val_loss, val_reg_loss, num_batches = self._validation_loss(epoch)
# It is safe again to wait till the validation is done. This is
# important to get the metrics right.
if self._distributed:
dist.barrier()
val_loss = dist_reduce_sum(val_loss)
num_batches = dist_reduce_sum(num_batches)
if val_reg_loss is not None:
val_reg_loss = dist_reduce_sum(val_reg_loss)
val_metrics = training_util.get_metrics(
self.model,
val_loss,
val_reg_loss,
batch_loss=None,
batch_reg_loss=None,
num_batches=num_batches,
reset=True,
)
# Check validation metric for early stopping
this_epoch_val_metric = self._metric_tracker.combined_score(val_metrics)
self._metric_tracker.add_metrics(val_metrics)
# Create overall metrics dict
training_elapsed_time = time.time() - training_start_time
metrics["training_duration"] = str(datetime.timedelta(seconds=training_elapsed_time))
metrics["epoch"] = epoch
for key, value in train_metrics.items():
metrics["training_" + key] = value
for key, value in val_metrics.items():
metrics["validation_" + key] = value
if self._should_validate_this_epoch and self._metric_tracker.is_best_so_far():
# Update all the best_ metrics.
# (Otherwise they just stay the same as they were.)
metrics["best_epoch"] = epoch
for key, value in val_metrics.items():
metrics["best_validation_" + key] = value
self._metric_tracker.best_epoch_metrics = val_metrics
if self._serialization_dir and self._primary:
common_util.dump_metrics(
os.path.join(self._serialization_dir, f"metrics_epoch_{epoch}.json"),
metrics,
)
# The Scheduler API is agnostic to whether your schedule requires a validation metric -
# if it doesn't, the validation metric passed here is ignored.
if self._learning_rate_scheduler:
self._learning_rate_scheduler.step(this_epoch_val_metric)
if self._momentum_scheduler:
self._momentum_scheduler.step(this_epoch_val_metric)
for callback in self._callbacks:
callback.on_epoch(self, metrics=metrics, epoch=epoch, is_primary=self._primary)
self._epochs_completed += 1
self._batches_in_epoch_completed = 0
checkpoint_saved = False
if self._checkpointer is not None:
# The checkpointer saves state from the learning rate scheduler, momentum scheduler, moving
# average, and callbacks, so we have to make sure those are updated before we save the
# checkpoint here.
checkpoint_saved = self._checkpointer.maybe_save_checkpoint(
self, self._epochs_completed, self._batches_in_epoch_completed
)
# Wait for each primary process to finish saving the checkpoint
if self._distributed:
dist.barrier()
if (
self._should_validate_this_epoch
and self._serialization_dir
and self._metric_tracker.is_best_so_far()
):
should_save_model_state: bool
if self._ddp_wrapped_model is not None and self._ddp_wrapped_model.is_sharded:
# Each worker saves its own shard for now (we combine the shards later).
self._best_model_filename = os.path.join(
self._serialization_dir, f"best_w{self._rank}.th"
)
should_save_model_state = True
else:
self._best_model_filename = os.path.join(self._serialization_dir, "best.th")
should_save_model_state = self._primary
if should_save_model_state:
if self._moving_average is None:
# If we're not using a moving average and the checkpointer just saved a checkpoint,
# we can just copy over that model state checkpoint to the '_best_model_filename'.
# Otherwise we need to save the model state on our own.
if self._checkpointer is not None and checkpoint_saved:
last_checkpoint = self._checkpointer.find_latest_checkpoint()
assert last_checkpoint is not None
model_state_file, _ = last_checkpoint
if os.path.exists(self._best_model_filename):
os.remove(self._best_model_filename)
hardlink_or_copy(model_state_file, self._best_model_filename)
else:
self._save_model_state(self._best_model_filename)
else:
self._moving_average.assign_average_value()
try:
self._save_model_state(self._best_model_filename)
finally:
self._moving_average.restore()
# Wait for the primary process to finish saving the best
if self._distributed:
dist.barrier()
epoch_elapsed_time = time.time() - epoch_start_time
logger.info("Epoch duration: %s", datetime.timedelta(seconds=epoch_elapsed_time))
if self._metric_tracker.should_stop_early():
logger.info("Ran out of patience. Stopping training.")
break
if epoch < self._num_epochs - 1:
time_per_epoch = training_elapsed_time / (
(epoch + 1) - self._start_after_epochs_completed
)
# Note: If the first non-skipped epoch is half skipped (because it was checkpointed half-way
# through), then this estimate is going to be optimistic.
estimated_time_remaining = (
time_per_epoch * self._num_epochs
) - training_elapsed_time
formatted_time = str(datetime.timedelta(seconds=int(estimated_time_remaining)))
logger.info("Estimated training time remaining: %s", formatted_time)
else:
epoch = self._num_epochs - 1
# Load the best model state before returning
if self._best_model_filename is None or self._metric_tracker.is_best_so_far():
self._finalize_model()
else:
# The model we're loading here has already been finalized.
self._load_model_state(self._best_model_filename)
return metrics, epoch
def _save_model_state(self, path: str) -> None:
if self._ddp_wrapped_model is not None:
torch.save(self._ddp_wrapped_model.state_dict(), path)
else:
torch.save(self.model.state_dict(), path)
def _load_model_state(self, path: str) -> None:
# This function is only called after training. So load model on the CPU.
device = torch.device("cpu")
if self._ddp_wrapped_model is not None:
self._ddp_wrapped_model.load_state_dict(torch.load(path, map_location=device))
else:
self._pytorch_model.load_state_dict(torch.load(path, map_location=device))
def _finalize_model(self) -> None:
"""If we have a moving average, we have to finalize the model at the end of training."""
if self._moving_average is not None:
self._moving_average.assign_average_value()
def _finalize_best_model_state(self) -> None:
"""
The best model weights might be saved in sharded files, in which case we gather them
up and save them to a single 'best.th' file.
"""
if (
self._serialization_dir
and self._ddp_wrapped_model is not None
and self._ddp_wrapped_model.is_sharded
):
logger.info("Consolidating sharded model states")
sharded_model_state_files = list(
glob.iglob(os.path.join(self._serialization_dir, "best_w*.th"))
)
full_model_state = self._ddp_wrapped_model.consolidate_sharded_state(
sharded_model_state_files
)
self._best_model_filename = os.path.join(self._serialization_dir, "best.th")
torch.save(full_model_state, self._best_model_filename)
def get_checkpoint_state(self) -> Optional[TrainerCheckpoint]:
if self._distributed:
assert self._ddp_wrapped_model is not None
if self._ddp_wrapped_model.is_sharded or self._primary:
model_state = self._ddp_wrapped_model.state_dict()
else:
return None
else:
model_state = self.model.state_dict()
# These are the training states we need to persist.
training_states = {
"version": 1,
"metric_tracker": self._metric_tracker.state_dict(),
"optimizer": self.optimizer.state_dict(),
"callbacks": [cb.state_dict() for cb in self._callbacks],
"epochs_completed": self._epochs_completed,
"batches_in_epoch_completed": self._batches_in_epoch_completed,
"best_model_filename": self._best_model_filename,
}
# If we have any of these optional objects, we should persist them too.
if self._learning_rate_scheduler is not None:
training_states["learning_rate_scheduler"] = self._learning_rate_scheduler.state_dict()
if self._momentum_scheduler is not None:
training_states["momentum_scheduler"] = self._momentum_scheduler.state_dict()
if self._moving_average is not None:
training_states["moving_average"] = self._moving_average.state_dict()
return TrainerCheckpoint(model_state, training_states)
def _maybe_restore_checkpoint(self) -> None:
"""
Restores the model and training state from the last saved checkpoint.
This includes an epoch count and optimizer state, which is serialized separately
from model parameters. This function should only be used to continue training -
if you wish to load a model for inference/load parts of a model into a new
computation graph, you should use the native Pytorch functions:
`model.load_state_dict(torch.load("/path/to/model/weights.th"))`
If `self._serialization_dir` does not exist or does not contain any checkpointed weights,
this function will do nothing.
"""
if self._checkpointer is None:
return
state = self._checkpointer.load_checkpoint()
if state is None:
self._start_after_epochs_completed = 0
self._start_after_batches_in_epoch_completed = 0
self._best_model_filename = None
return
model_state, training_state = state
if training_state["version"] != 1:
raise ValueError(
f"This version of {self.__class__.__name__} only supports checkpoints of version 1. "
f"Found version {training_state['version']}"
)
if self._distributed:
assert self._ddp_wrapped_model is not None
self._ddp_wrapped_model.load_state_dict(model_state)
else:
self._pytorch_model.load_state_dict(model_state)
self._metric_tracker.load_state_dict(training_state["metric_tracker"])
self.optimizer.load_state_dict(training_state["optimizer"])
for cb, state_dict in zip(self._callbacks, training_state["callbacks"]):
cb.load_state_dict(state_dict)
if self._learning_rate_scheduler is not None:
self._learning_rate_scheduler.load_state_dict(training_state["learning_rate_scheduler"])
if self._momentum_scheduler is not None:
self._momentum_scheduler.load_state_dict(training_state["momentum_scheduler"])
if self._moving_average is not None:
self._moving_average.load_state_dict(training_state["moving_average"])
self._start_after_epochs_completed = training_state["epochs_completed"]
self._start_after_batches_in_epoch_completed = training_state["batches_in_epoch_completed"]
self._best_model_filename = training_state["best_model_filename"]
@classmethod
def from_partial_objects(
cls,
model: Model,
serialization_dir: str,
data_loader: DataLoader,
validation_data_loader: DataLoader = None,
local_rank: int = 0,
patience: int = None,
validation_metric: Union[str, List[str]] = "-loss",
num_epochs: int = 20,
cuda_device: Optional[Union[int, torch.device]] = None,
grad_norm: Union[float, bool] = False,
grad_clipping: float = None,
distributed: bool = False,
world_size: int = 1,
num_gradient_accumulation_steps: int = 1,
use_amp: bool = False,
no_grad: List[str] = None,
optimizer: Lazy[Optimizer] = Lazy(Optimizer.default),
learning_rate_scheduler: Lazy[LearningRateScheduler] = None,
momentum_scheduler: Lazy[MomentumScheduler] = None,
moving_average: Lazy[MovingAverage] = None,
checkpointer: Optional[Lazy[Checkpointer]] = Lazy(Checkpointer),
callbacks: List[Lazy[TrainerCallback]] = None,
enable_default_callbacks: bool = True,
run_confidence_checks: bool = True,
grad_scaling: bool = True,
ddp_accelerator: Optional[DdpAccelerator] = None,
**kwargs,
) -> Trainer:
"""
This method exists so that we can have a documented method to construct this class using
`FromParams`. If you are not using `FromParams` or config files, you can safely ignore this
method.
The reason we can't just use `__init__` with `FromParams` here is because there are
sequential dependencies to this class's arguments. Anything that has a `Lazy[]` type
annotation needs something from one of the non-`Lazy` arguments. The `Optimizer` needs to
have the parameters from the `Model` before it's constructed, and the `Schedulers` need to
have the `Optimizer`. Because of this, the typical way we construct things `FromParams`
doesn't work, so we use `Lazy` to allow for constructing the objects sequentially.
If you're not using `FromParams`, you can just construct these arguments in the right order
yourself in your code and call the constructor directly.
"""
if cuda_device is None:
from torch import cuda
if cuda.device_count() > 0:
cuda_device = 0
else:
cuda_device = -1
check_for_gpu(cuda_device)
# Need to wrap model with a DdpAccelerator ("Distributed data-parallel wrapper")
# or move model to right device before initializing the optimizer.
# Using DDP brings in a quirk wrt AllenNLP's `Model` interface and its
# usage. A `Model` object is wrapped by `DdpAccelerator`, but assigning the wrapped model to `self.model`
# will break the usages such as `Model.get_regularization_penalty`, `Model.get_metrics`, etc.
# Hence a reference to Pytorch's object is maintained in the case of distributed training and in the
# normal case, reference to `Model` is retained. This reference is only used in
# these places: `model.__call__`, `model.train` and `model.eval`.
ddp_wrapped_model: Optional[DdpWrappedModel] = None
if distributed:
if ddp_accelerator is None:
ddp_accelerator = TorchDdpAccelerator(cuda_device=cuda_device)
# DdpAccelerator will move the model to the right device(s).
model, ddp_wrapped_model = ddp_accelerator.wrap_model(model)
else:
if cuda_device >= 0:
model = model.cuda(cuda_device)
pytorch_model = model if ddp_wrapped_model is None else ddp_wrapped_model.model
if no_grad:
for name, parameter in pytorch_model.named_parameters():
if any(re.search(regex, name) for regex in no_grad):
parameter.requires_grad_(False)
parameters = [[n, p] for n, p in pytorch_model.named_parameters() if p.requires_grad]
optimizer_ = optimizer.construct(model_parameters=parameters)
common_util.log_frozen_and_tunable_parameter_names(pytorch_model)
batches_per_epoch: Optional[int]
try:
batches_per_epoch = len(data_loader)
batches_per_epoch = math.ceil(batches_per_epoch / num_gradient_accumulation_steps)
except TypeError:
batches_per_epoch = None
moving_average_ = (
None if moving_average is None else moving_average.construct(parameters=parameters)
)
learning_rate_scheduler_ = (
None
if learning_rate_scheduler is None
else learning_rate_scheduler.construct(
optimizer=optimizer_, num_epochs=num_epochs, num_steps_per_epoch=batches_per_epoch
)
)
momentum_scheduler_ = (
None
if momentum_scheduler is None
else momentum_scheduler.construct(optimizer=optimizer_)
)
checkpointer_ = (
None
if checkpointer is None
else checkpointer.construct(serialization_dir=serialization_dir)
)
callbacks_: List[TrainerCallback] = []
for callback_ in callbacks or []:
callbacks_.append(callback_.construct(serialization_dir=serialization_dir))
return cls(
model,
optimizer_,
data_loader,
patience=patience,
validation_metric=validation_metric,
validation_data_loader=validation_data_loader,
num_epochs=num_epochs,
serialization_dir=serialization_dir,
cuda_device=cuda_device,
grad_norm=grad_norm,
grad_clipping=grad_clipping,
learning_rate_scheduler=learning_rate_scheduler_,
momentum_scheduler=momentum_scheduler_,
checkpointer=checkpointer_,
moving_average=moving_average_,
callbacks=callbacks_,
distributed=distributed,
local_rank=local_rank,
world_size=world_size,
num_gradient_accumulation_steps=num_gradient_accumulation_steps,
use_amp=use_amp,
enable_default_callbacks=enable_default_callbacks,
run_confidence_checks=run_confidence_checks,
grad_scaling=grad_scaling,
ddp_wrapped_model=ddp_wrapped_model,
**kwargs,
)
def get_best_weights_path(self) -> Optional[str]:
if self._best_model_filename is not None:
return os.path.abspath(self._best_model_filename)
else:
return None
DEFAULT_CALLBACKS: Tuple[Type[TrainerCallback]] = (ConsoleLoggerCallback,)
"""
The default callbacks used by `GradientDescentTrainer`.
"""
| {
"content_hash": "c14451f40dbed0980b064fd32372a7f6",
"timestamp": "",
"source": "github",
"line_count": 1244,
"max_line_length": 113,
"avg_line_length": 46.72106109324759,
"alnum_prop": 0.599679977976979,
"repo_name": "allenai/allennlp",
"id": "cfabbc3237f6097afb8fa1f1cb25f906a47fa30c",
"size": "58121",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "allennlp/training/gradient_descent_trainer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39870"
},
{
"name": "Dockerfile",
"bytes": "1190"
},
{
"name": "Jsonnet",
"bytes": "4469"
},
{
"name": "Makefile",
"bytes": "5306"
},
{
"name": "Perl",
"bytes": "101"
},
{
"name": "Python",
"bytes": "3575059"
},
{
"name": "Scilab",
"bytes": "4085"
},
{
"name": "Shell",
"bytes": "2092"
}
],
"symlink_target": ""
} |
from flask_restful import Resource, reqparse, fields, marshal_with
from dateutil import parser
from sqlalchemy import desc
from flask_jwt_extended import jwt_required
from .revisions import revision_fields
from ... import db
from ...models import Revision, Futuremark3DMarkResult
futuremark3dmarkresult_fields = {
'id': fields.Integer,
'result_date': fields.DateTime(dt_format='iso8601'),
'icestorm_score': fields.Integer(default=None),
'icestorm_result_url': fields.String,
'cloudgate_score': fields.Integer(default=None),
'cloudgate_result_url': fields.String,
'firestrike_score': fields.Integer(default=None),
'firestrike_result_url': fields.String,
'skydiver_score': fields.Integer(default=None),
'skydiver_result_url': fields.String,
'overall_result_url': fields.String,
'revision': fields.Nested(revision_fields),
'uri': fields.Url('.futuremark3dmarkresult', absolute=True)
}
class Futuremark3DMarkResultListAPI(Resource):
@marshal_with(futuremark3dmarkresult_fields,
envelope='futuremark3dmarkresults')
def get(self):
# order by sum of all scores to come up with kinda "aggregate score"
return Futuremark3DMarkResult.query.order_by(desc(
Futuremark3DMarkResult.icestorm_score +
Futuremark3DMarkResult.cloudgate_score +
Futuremark3DMarkResult.firestrike_score +
Futuremark3DMarkResult.skydiver_score)).all()
class Futuremark3DMarkResultAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('result_date', type=str,
location='json')
self.reqparse.add_argument('icestorm_score', type=int,
location='json')
self.reqparse.add_argument('icestorm_result_url', type=str,
location='json')
self.reqparse.add_argument('cloudgate_score', type=int,
location='json')
self.reqparse.add_argument('cloudgate_result_url', type=str,
location='json')
self.reqparse.add_argument('firestrike_score', type=int,
location='json')
self.reqparse.add_argument('firestrike_result_url', type=str,
location='json')
self.reqparse.add_argument('skydiver_score', type=int,
location='json')
self.reqparse.add_argument('skydiver_result_url', type=str,
location='json')
self.reqparse.add_argument('overall_result_url', type=str,
location='json')
super(Futuremark3DMarkResultAPI, self).__init__()
@marshal_with(futuremark3dmarkresult_fields,
envelope='futuremark3dmarkresult')
def get(self, id):
return Futuremark3DMarkResult.query.get_or_404(id)
@jwt_required
@marshal_with(futuremark3dmarkresult_fields,
envelope='futuremark3dmarkresult')
def put(self, id):
futuremark3dmarkresult = Futuremark3DMarkResult.query.get_or_404(id)
args = self.reqparse.parse_args()
for k, v in args.items():
if v is not None:
# ew.
if k == 'result_date':
setattr(futuremark3dmarkresult, k, parser.parse(v))
else:
setattr(futuremark3dmarkresult, k, v)
db.session.commit()
return futuremark3dmarkresult
@jwt_required
def delete(self, id):
Futuremark3DMarkResult.query.filter(
Futuremark3DMarkResult.id == id).delete()
db.session.commit()
return {'result': True}
class RevisionFuturemark3DMarkResultListAPI(Resource):
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('result_date', type=str,
location='json')
self.reqparse.add_argument('icestorm_score', type=int,
location='json')
self.reqparse.add_argument('icestorm_result_url', type=str,
location='json')
self.reqparse.add_argument('cloudgate_score', type=int,
location='json')
self.reqparse.add_argument('cloudgate_result_url', type=str,
location='json')
self.reqparse.add_argument('firestrike_score', type=int,
location='json')
self.reqparse.add_argument('firestrike_result_url', type=str,
location='json')
self.reqparse.add_argument('skydiver_score', type=int,
location='json')
self.reqparse.add_argument('skydiver_result_url', type=str,
location='json')
self.reqparse.add_argument('overall_result_url',
type=str, location='json')
super(RevisionFuturemark3DMarkResultListAPI, self).__init__()
@marshal_with(futuremark3dmarkresult_fields,
envelope='futuremark3dmarkresults')
def get(self, id):
revision = Revision.query.get_or_404(id)
return revision.futuremark3dmarkresults.all()
@jwt_required
@marshal_with(futuremark3dmarkresult_fields,
envelope='futuremark3dmarkresult')
def post(self, id):
args = self.reqparse.parse_args()
# parse the datetime provided
rd = None
if args['result_date'] is not None:
rd = parser.parse(args['result_date'])
revision = Revision.query.get_or_404(id)
futuremark3dmarkresult = Futuremark3DMarkResult(
result_date=rd,
icestorm_score=args['icestorm_score'],
icestorm_result_url=args['icestorm_result_url'],
cloudgate_score=args['cloudgate_score'],
cloudgate_result_url=args['cloudgate_result_url'],
firestrike_score=args['firestrike_score'],
firestrike_result_url=args['firestrike_result_url'],
skydiver_score=args['skydiver_score'],
skydiver_result_url=args['skydiver_result_url'],
overall_result_url=args['overall_result_url'])
futuremark3dmarkresult.revision_id = revision.id
db.session.add(futuremark3dmarkresult)
db.session.commit()
return futuremark3dmarkresult, 201
| {
"content_hash": "005c1772c00ac5cc82d2a2210037e10d",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 77,
"avg_line_length": 43.20261437908497,
"alnum_prop": 0.594553706505295,
"repo_name": "rivalrockets/rivalrockets-api",
"id": "84787a92b1daa847561e2825cb204a8dc1d4387c",
"size": "6610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/api_1_0/resources/futuremark3dmarkresults.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "65375"
}
],
"symlink_target": ""
} |
'''
Parsing
'''
from __future__ import absolute_import
##
# Import Modules
#
import os.path
import re
from Library.StringUtils import RaiseParserError
from Library.StringUtils import GetSplitValueList
from Library.StringUtils import CheckFileType
from Library.StringUtils import CheckFileExist
from Library.StringUtils import CleanString
from Library.StringUtils import NormPath
from Logger.ToolError import FILE_NOT_FOUND
from Logger.ToolError import FatalError
from Logger.ToolError import FORMAT_INVALID
from Library import DataType
from Library.Misc import GuidStructureStringToGuidString
from Library.Misc import CheckGuidRegFormat
from Logger import StringTable as ST
import Logger.Log as Logger
from Parser.DecParser import Dec
from . import GlobalData
gPKG_INFO_DICT = {}
## GetBuildOption
#
# Parse a string with format "[<Family>:]<ToolFlag>=Flag"
# Return (Family, ToolFlag, Flag)
#
# @param String: String with BuildOption statement
# @param File: The file which defines build option, used in error report
#
def GetBuildOption(String, File, LineNo= -1):
(Family, ToolChain, Flag) = ('', '', '')
if String.find(DataType.TAB_EQUAL_SPLIT) < 0:
RaiseParserError(String, 'BuildOptions', File, \
'[<Family>:]<ToolFlag>=Flag', LineNo)
else:
List = GetSplitValueList(String, DataType.TAB_EQUAL_SPLIT, MaxSplit=1)
if List[0].find(':') > -1:
Family = List[0][ : List[0].find(':')].strip()
ToolChain = List[0][List[0].find(':') + 1 : ].strip()
else:
ToolChain = List[0].strip()
Flag = List[1].strip()
return (Family, ToolChain, Flag)
## Get Library Class
#
# Get Library of Dsc as <LibraryClassKeyWord>|<LibraryInstance>
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClass(Item, ContainerFile, WorkspaceDir, LineNo= -1):
List = GetSplitValueList(Item[0])
SupMod = DataType.SUP_MODULE_LIST_STRING
if len(List) != 2:
RaiseParserError(Item[0], 'LibraryClasses', ContainerFile, \
'<LibraryClassKeyWord>|<LibraryInstance>')
else:
CheckFileType(List[1], '.Inf', ContainerFile, \
'library class instance', Item[0], LineNo)
CheckFileExist(WorkspaceDir, List[1], ContainerFile, \
'LibraryClasses', Item[0], LineNo)
if Item[1] != '':
SupMod = Item[1]
return (List[0], List[1], SupMod)
## Get Library Class
#
# Get Library of Dsc as <LibraryClassKeyWord>[|<LibraryInstance>]
# [|<TokenSpaceGuidCName>.<PcdCName>]
#
# @param Item: String as <LibraryClassKeyWord>|<LibraryInstance>
# @param ContainerFile: The file which describes the library class, used for
# error report
#
def GetLibraryClassOfInf(Item, ContainerFile, WorkspaceDir, LineNo= -1):
ItemList = GetSplitValueList((Item[0] + DataType.TAB_VALUE_SPLIT * 2))
SupMod = DataType.SUP_MODULE_LIST_STRING
if len(ItemList) > 5:
RaiseParserError\
(Item[0], 'LibraryClasses', ContainerFile, \
'<LibraryClassKeyWord>[|<LibraryInstance>]\
[|<TokenSpaceGuidCName>.<PcdCName>]')
else:
CheckFileType(ItemList[1], '.Inf', ContainerFile, 'LibraryClasses', \
Item[0], LineNo)
CheckFileExist(WorkspaceDir, ItemList[1], ContainerFile, \
'LibraryClasses', Item[0], LineNo)
if ItemList[2] != '':
CheckPcdTokenInfo(ItemList[2], 'LibraryClasses', \
ContainerFile, LineNo)
if Item[1] != '':
SupMod = Item[1]
return (ItemList[0], ItemList[1], ItemList[2], SupMod)
## CheckPcdTokenInfo
#
# Check if PcdTokenInfo is following <TokenSpaceGuidCName>.<PcdCName>
#
# @param TokenInfoString: String to be checked
# @param Section: Used for error report
# @param File: Used for error report
#
def CheckPcdTokenInfo(TokenInfoString, Section, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>'
if TokenInfoString != '' and TokenInfoString is not None:
TokenInfoList = GetSplitValueList(TokenInfoString, DataType.TAB_SPLIT)
if len(TokenInfoList) == 2:
return True
RaiseParserError(TokenInfoString, Section, File, Format, LineNo)
## Get Pcd
#
# Get Pcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<Value>
# [|<Type>|<MaximumDatumSize>]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# <Value>[|<Type>|<MaximumDatumSize>]
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, MaximumDatumSize, Token = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 4 or len(List) > 6:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
[|<Type>|<MaximumDatumSize>]', LineNo)
else:
Value = List[1]
MaximumDatumSize = List[2]
Token = List[3]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, MaximumDatumSize, Token, Type)
## Get FeatureFlagPcd
#
# Get FeatureFlagPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE
#
# @param Item: String as <PcdTokenSpaceGuidCName>
# .<TokenCName>|TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetFeatureFlagPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value = '', '', ''
List = GetSplitValueList(Item)
if len(List) != 2:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|TRUE/FALSE', \
LineNo)
else:
Value = List[1]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, Type)
## Get DynamicDefaultPcd
#
# Get DynamicDefaultPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>
# |<Value>[|<DatumTyp>[|<MaxDatumSize>]]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicDefaultPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, Value, DatumTyp, MaxDatumSize = '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 4 or len(List) > 8:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<Value>\
[|<DatumTyp>[|<MaxDatumSize>]]', LineNo)
else:
Value = List[1]
DatumTyp = List[2]
MaxDatumSize = List[3]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, Value, DatumTyp, MaxDatumSize, Type)
## Get DynamicHiiPcd
#
# Get DynamicHiiPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|<String>|
# <VariableGuidCName>|<VariableOffset>[|<DefaultValue>[|<MaximumDatumSize>]]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>|
# TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicHiiPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2, List3, List4, List5 = \
'', '', '', '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT * 2)
if len(List) < 6 or len(List) > 8:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<String>|\
<VariableGuidCName>|<VariableOffset>[|<DefaultValue>\
[|<MaximumDatumSize>]]', LineNo)
else:
List1, List2, List3, List4, List5 = \
List[1], List[2], List[3], List[4], List[5]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, List1, List2, List3, List4, List5, Type)
## Get DynamicVpdPcd
#
# Get DynamicVpdPcd of Dsc as <PcdTokenSpaceGuidCName>.<TokenCName>|
# <VpdOffset>[|<MaximumDatumSize>]
#
# @param Item: String as <PcdTokenSpaceGuidCName>.<TokenCName>
# |TRUE/FALSE
# @param ContainerFile: The file which describes the pcd, used for error
# report
#
def GetDynamicVpdPcd(Item, Type, ContainerFile, LineNo= -1):
TokenGuid, TokenName, List1, List2 = '', '', '', ''
List = GetSplitValueList(Item + DataType.TAB_VALUE_SPLIT)
if len(List) < 3 or len(List) > 4:
RaiseParserError(Item, 'Pcds' + Type, ContainerFile, \
'<PcdTokenSpaceGuidCName>.<TokenCName>|<VpdOffset>\
[|<MaximumDatumSize>]', LineNo)
else:
List1, List2 = List[1], List[2]
if CheckPcdTokenInfo(List[0], 'Pcds' + Type, ContainerFile, LineNo):
(TokenGuid, TokenName) = GetSplitValueList(List[0], DataType.TAB_SPLIT)
return (TokenName, TokenGuid, List1, List2, Type)
## GetComponent
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3],
# [bo1, bo2, bo3], [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
# @param KeyValues: To store data after parsing
#
def GetComponent(Lines, KeyValues):
(FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
FindPcdsDynamicEx) = (False, False, False, False, False, False, False, \
False)
ListItem = None
LibraryClassItem = []
BuildOption = []
Pcd = []
for Line in Lines:
Line = Line[0]
#
# Ignore !include statement
#
if Line.upper().find(DataType.TAB_INCLUDE.upper() + ' ') > -1 or \
Line.upper().find(DataType.TAB_DEFINE + ' ') > -1:
continue
if FindBlock == False:
ListItem = Line
#
# find '{' at line tail
#
if Line.endswith('{'):
FindBlock = True
ListItem = CleanString(Line.rsplit('{', 1)[0], \
DataType.TAB_COMMENT_SPLIT)
#
# Parse a block content
#
if FindBlock:
if Line.find('<LibraryClasses>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(True, False, False, False, False, False, False)
continue
if Line.find('<BuildOptions>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, True, False, False, False, False, False)
continue
if Line.find('<PcdsFeatureFlag>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, True, False, False, False, False)
continue
if Line.find('<PcdsPatchableInModule>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, True, False, False, False)
continue
if Line.find('<PcdsFixedAtBuild>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, True, False, False)
continue
if Line.find('<PcdsDynamic>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, True, False)
continue
if Line.find('<PcdsDynamicEx>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, True)
continue
if Line.endswith('}'):
#
# find '}' at line tail
#
KeyValues.append([ListItem, LibraryClassItem, \
BuildOption, Pcd])
(FindBlock, FindLibraryClass, FindBuildOption, \
FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
LibraryClassItem, BuildOption, Pcd = [], [], []
continue
if FindBlock:
if FindLibraryClass:
LibraryClassItem.append(Line)
elif FindBuildOption:
BuildOption.append(Line)
elif FindPcdsFeatureFlag:
Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG_NULL, Line))
elif FindPcdsPatchableInModule:
Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE_NULL, Line))
elif FindPcdsFixedAtBuild:
Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD_NULL, Line))
elif FindPcdsDynamic:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_DEFAULT_NULL, Line))
elif FindPcdsDynamicEx:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX_DEFAULT_NULL, Line))
else:
KeyValues.append([ListItem, [], [], []])
return True
## GetExec
#
# Parse a string with format "InfFilename [EXEC = ExecFilename]"
# Return (InfFilename, ExecFilename)
#
# @param String: String with EXEC statement
#
def GetExec(String):
InfFilename = ''
ExecFilename = ''
if String.find('EXEC') > -1:
InfFilename = String[ : String.find('EXEC')].strip()
ExecFilename = String[String.find('EXEC') + len('EXEC') : ].strip()
else:
InfFilename = String.strip()
return (InfFilename, ExecFilename)
## GetComponents
#
# Parse block of the components defined in dsc file
# Set KeyValues as [ ['component name', [lib1, lib2, lib3], [bo1, bo2, bo3],
# [pcd1, pcd2, pcd3]], ...]
#
# @param Lines: The content to be parsed
# @param Key: Reserved
# @param KeyValues: To store data after parsing
# @param CommentCharacter: Comment char, used to ignore comment content
#
# @retval True Get component successfully
#
def GetComponents(Lines, KeyValues, CommentCharacter):
if Lines.find(DataType.TAB_SECTION_END) > -1:
Lines = Lines.split(DataType.TAB_SECTION_END, 1)[1]
(FindBlock, FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, FindPcdsDynamic, \
FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
ListItem = None
LibraryClassItem = []
BuildOption = []
Pcd = []
LineList = Lines.split('\n')
for Line in LineList:
Line = CleanString(Line, CommentCharacter)
if Line is None or Line == '':
continue
if FindBlock == False:
ListItem = Line
#
# find '{' at line tail
#
if Line.endswith('{'):
FindBlock = True
ListItem = CleanString(Line.rsplit('{', 1)[0], CommentCharacter)
#
# Parse a block content
#
if FindBlock:
if Line.find('<LibraryClasses>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(True, False, False, False, False, False, False)
continue
if Line.find('<BuildOptions>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, True, False, False, False, False, False)
continue
if Line.find('<PcdsFeatureFlag>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, True, False, False, False, False)
continue
if Line.find('<PcdsPatchableInModule>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, True, False, False, False)
continue
if Line.find('<PcdsFixedAtBuild>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, True, False, False)
continue
if Line.find('<PcdsDynamic>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, True, False)
continue
if Line.find('<PcdsDynamicEx>') != -1:
(FindLibraryClass, FindBuildOption, FindPcdsFeatureFlag, \
FindPcdsPatchableInModule, FindPcdsFixedAtBuild, \
FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, True)
continue
if Line.endswith('}'):
#
# find '}' at line tail
#
KeyValues.append([ListItem, LibraryClassItem, BuildOption, \
Pcd])
(FindBlock, FindLibraryClass, FindBuildOption, \
FindPcdsFeatureFlag, FindPcdsPatchableInModule, \
FindPcdsFixedAtBuild, FindPcdsDynamic, FindPcdsDynamicEx) = \
(False, False, False, False, False, False, False, False)
LibraryClassItem, BuildOption, Pcd = [], [], []
continue
if FindBlock:
if FindLibraryClass:
LibraryClassItem.append(Line)
elif FindBuildOption:
BuildOption.append(Line)
elif FindPcdsFeatureFlag:
Pcd.append((DataType.TAB_PCDS_FEATURE_FLAG, Line))
elif FindPcdsPatchableInModule:
Pcd.append((DataType.TAB_PCDS_PATCHABLE_IN_MODULE, Line))
elif FindPcdsFixedAtBuild:
Pcd.append((DataType.TAB_PCDS_FIXED_AT_BUILD, Line))
elif FindPcdsDynamic:
Pcd.append((DataType.TAB_PCDS_DYNAMIC, Line))
elif FindPcdsDynamicEx:
Pcd.append((DataType.TAB_PCDS_DYNAMIC_EX, Line))
else:
KeyValues.append([ListItem, [], [], []])
return True
## Get Source
#
# Get Source of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
#
# @param Item: String as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class, used
# for error report
#
def GetSource(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT * 4
List = GetSplitValueList(ItemNew)
if len(List) < 5 or len(List) > 9:
RaiseParserError(Item, 'Sources', ContainerFile, \
'<Filename>[|<Family>[|<TagName>[|<ToolCode>\
[|<PcdFeatureFlag>]]]]', LineNo)
List[0] = NormPath(List[0])
CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Sources', \
Item, LineNo)
if List[4] != '':
CheckPcdTokenInfo(List[4], 'Sources', ContainerFile, LineNo)
return (List[0], List[1], List[2], List[3], List[4])
## Get Binary
#
# Get Binary of Inf as <Filename>[|<Family>[|<TagName>[|<ToolCode>
# [|<PcdFeatureFlag>]]]]
#
# @param Item: String as <Filename>[|<Family>[|<TagName>
# [|<ToolCode>[|<PcdFeatureFlag>]]]]
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetBinary(Item, ContainerFile, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
if len(List) < 3 or len(List) > 5:
RaiseParserError(Item, 'Binaries', ContainerFile, \
"<FileType>|<Filename>[|<Target>\
[|<TokenSpaceGuidCName>.<PcdCName>]]", LineNo)
if len(List) >= 4:
if List[3] != '':
CheckPcdTokenInfo(List[3], 'Binaries', ContainerFile, LineNo)
return (List[0], List[1], List[2], List[3])
elif len(List) == 3:
return (List[0], List[1], List[2], '')
## Get Guids/Protocols/Ppis
#
# Get Guids/Protocols/Ppis of Inf as <GuidCName>[|<PcdFeatureFlag>]
#
# @param Item: String as <GuidCName>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfInf(Item):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
return (List[0], List[1])
## Get Guids/Protocols/Ppis
#
# Get Guids/Protocols/Ppis of Dec as <GuidCName>=<GuidValue>
#
# @param Item: String as <GuidCName>=<GuidValue>
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetGuidsProtocolsPpisOfDec(Item, Type, ContainerFile, LineNo= -1):
List = GetSplitValueList(Item, DataType.TAB_EQUAL_SPLIT)
if len(List) != 2:
RaiseParserError(Item, Type, ContainerFile, '<CName>=<GuidValue>', \
LineNo)
#
#convert C-Format Guid to Register Format
#
if List[1][0] == '{' and List[1][-1] == '}':
RegisterFormatGuid = GuidStructureStringToGuidString(List[1])
if RegisterFormatGuid == '':
RaiseParserError(Item, Type, ContainerFile, \
'CFormat or RegisterFormat', LineNo)
else:
if CheckGuidRegFormat(List[1]):
RegisterFormatGuid = List[1]
else:
RaiseParserError(Item, Type, ContainerFile, \
'CFormat or RegisterFormat', LineNo)
return (List[0], RegisterFormatGuid)
## GetPackage
#
# Get Package of Inf as <PackagePath>[|<PcdFeatureFlag>]
#
# @param Item: String as <PackagePath>[|<PcdFeatureFlag>]
# @param Type: Type of parsing string
# @param ContainerFile: The file which describes the library class,
# used for error report
#
def GetPackage(Item, ContainerFile, FileRelativePath, LineNo= -1):
ItemNew = Item + DataType.TAB_VALUE_SPLIT
List = GetSplitValueList(ItemNew)
CheckFileType(List[0], '.Dec', ContainerFile, 'package', List[0], LineNo)
CheckFileExist(FileRelativePath, List[0], ContainerFile, 'Packages', \
List[0], LineNo)
if List[1] != '':
CheckPcdTokenInfo(List[1], 'Packages', ContainerFile, LineNo)
return (List[0], List[1])
## Get Pcd Values of Inf
#
# Get Pcd of Inf as <TokenSpaceGuidCName>.<PcdCName>[|<Value>]
#
# @param Item: The string describes pcd
# @param Type: The type of Pcd
# @param File: The file which describes the pcd, used for error report
#
def GetPcdOfInf(Item, Type, File, LineNo):
Format = '<TokenSpaceGuidCName>.<PcdCName>[|<Value>]'
TokenGuid, TokenName, Value, InfType = '', '', '', ''
if Type == DataType.TAB_PCDS_FIXED_AT_BUILD:
InfType = DataType.TAB_INF_FIXED_PCD
elif Type == DataType.TAB_PCDS_PATCHABLE_IN_MODULE:
InfType = DataType.TAB_INF_PATCH_PCD
elif Type == DataType.TAB_PCDS_FEATURE_FLAG:
InfType = DataType.TAB_INF_FEATURE_PCD
elif Type == DataType.TAB_PCDS_DYNAMIC_EX:
InfType = DataType.TAB_INF_PCD_EX
elif Type == DataType.TAB_PCDS_DYNAMIC:
InfType = DataType.TAB_INF_PCD
List = GetSplitValueList(Item, DataType.TAB_VALUE_SPLIT, 1)
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
if len(TokenInfo) != 2:
RaiseParserError(Item, InfType, File, Format, LineNo)
else:
TokenGuid = TokenInfo[0]
TokenName = TokenInfo[1]
if len(List) > 1:
Value = List[1]
else:
Value = None
return (TokenGuid, TokenName, Value, InfType)
## Get Pcd Values of Dec
#
# Get Pcd of Dec as <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
# @param Item: Pcd item
# @param Type: Pcd type
# @param File: Dec file
# @param LineNo: Line number
#
def GetPcdOfDec(Item, Type, File, LineNo= -1):
Format = '<TokenSpaceGuidCName>.<PcdCName>|<Value>|<DatumType>|<Token>'
TokenGuid, TokenName, Value, DatumType, Token = '', '', '', '', ''
List = GetSplitValueList(Item)
if len(List) != 4:
RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
else:
Value = List[1]
DatumType = List[2]
Token = List[3]
TokenInfo = GetSplitValueList(List[0], DataType.TAB_SPLIT)
if len(TokenInfo) != 2:
RaiseParserError(Item, 'Pcds' + Type, File, Format, LineNo)
else:
TokenGuid = TokenInfo[0]
TokenName = TokenInfo[1]
return (TokenGuid, TokenName, Value, DatumType, Token, Type)
## Parse DEFINE statement
#
# Get DEFINE macros
#
# @param LineValue: A DEFINE line value
# @param StartLine: A DEFINE start line
# @param Table: A table
# @param FileID: File ID
# @param Filename: File name
# @param SectionName: DEFINE section name
# @param SectionModel: DEFINE section model
# @param Arch: DEFINE arch
#
def ParseDefine(LineValue, StartLine, Table, FileID, SectionName, \
SectionModel, Arch):
Logger.Debug(Logger.DEBUG_2, ST.MSG_DEFINE_STATEMENT_FOUND % (LineValue, \
SectionName))
Define = \
GetSplitValueList(CleanString\
(LineValue[LineValue.upper().\
find(DataType.TAB_DEFINE.upper() + ' ') + \
len(DataType.TAB_DEFINE + ' ') : ]), \
DataType.TAB_EQUAL_SPLIT, 1)
Table.Insert(DataType.MODEL_META_DATA_DEFINE, Define[0], Define[1], '', \
'', '', Arch, SectionModel, FileID, StartLine, -1, \
StartLine, -1, 0)
## InsertSectionItems
#
# Insert item data of a section to a dict
#
# @param Model: A model
# @param CurrentSection: Current section
# @param SectionItemList: Section item list
# @param ArchList: Arch list
# @param ThirdList: Third list
# @param RecordSet: Record set
#
def InsertSectionItems(Model, SectionItemList, ArchList, \
ThirdList, RecordSet):
#
# Insert each item data of a section
#
for Index in range(0, len(ArchList)):
Arch = ArchList[Index]
Third = ThirdList[Index]
if Arch == '':
Arch = DataType.TAB_ARCH_COMMON
Records = RecordSet[Model]
for SectionItem in SectionItemList:
LineValue, StartLine, Comment = SectionItem[0], \
SectionItem[1], SectionItem[2]
Logger.Debug(4, ST.MSG_PARSING % LineValue)
#
# And then parse DEFINE statement
#
if LineValue.upper().find(DataType.TAB_DEFINE.upper() + ' ') > -1:
continue
#
# At last parse other sections
#
IdNum = -1
Records.append([LineValue, Arch, StartLine, IdNum, Third, Comment])
if RecordSet != {}:
RecordSet[Model] = Records
## GenMetaDatSectionItem
#
# @param Key: A key
# @param Value: A value
# @param List: A list
#
def GenMetaDatSectionItem(Key, Value, List):
if Key not in List:
List[Key] = [Value]
else:
List[Key].append(Value)
## GetPkgInfoFromDec
#
# get package name, guid, version info from dec files
#
# @param Path: File path
#
def GetPkgInfoFromDec(Path):
PkgName = None
PkgGuid = None
PkgVersion = None
Path = Path.replace('\\', '/')
if not os.path.exists(Path):
Logger.Error("\nUPT", FILE_NOT_FOUND, File=Path)
if Path in gPKG_INFO_DICT:
return gPKG_INFO_DICT[Path]
try:
DecParser = None
if Path not in GlobalData.gPackageDict:
DecParser = Dec(Path)
GlobalData.gPackageDict[Path] = DecParser
else:
DecParser = GlobalData.gPackageDict[Path]
PkgName = DecParser.GetPackageName()
PkgGuid = DecParser.GetPackageGuid()
PkgVersion = DecParser.GetPackageVersion()
gPKG_INFO_DICT[Path] = (PkgName, PkgGuid, PkgVersion)
return PkgName, PkgGuid, PkgVersion
except FatalError:
return None, None, None
## GetWorkspacePackage
#
# Get a list of workspace package information.
#
def GetWorkspacePackage():
DecFileList = []
WorkspaceDir = GlobalData.gWORKSPACE
PackageDir = GlobalData.gPACKAGE_PATH
for PkgRoot in [WorkspaceDir] + PackageDir:
for Root, Dirs, Files in os.walk(PkgRoot):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for FileSp in Files:
if FileSp.startswith('.'):
continue
Ext = os.path.splitext(FileSp)[1]
if Ext.lower() in ['.dec']:
DecFileList.append\
(os.path.normpath(os.path.join(Root, FileSp)))
#
# abstract package guid, version info from DecFile List
#
PkgList = []
for DecFile in DecFileList:
(PkgName, PkgGuid, PkgVersion) = GetPkgInfoFromDec(DecFile)
if PkgName and PkgGuid and PkgVersion:
PkgList.append((PkgName, PkgGuid, PkgVersion, DecFile))
return PkgList
## GetWorkspaceModule
#
# Get a list of workspace modules.
#
def GetWorkspaceModule():
InfFileList = []
WorkspaceDir = GlobalData.gWORKSPACE
for Root, Dirs, Files in os.walk(WorkspaceDir):
if 'CVS' in Dirs:
Dirs.remove('CVS')
if '.svn' in Dirs:
Dirs.remove('.svn')
if 'Build' in Dirs:
Dirs.remove('Build')
for Dir in Dirs:
if Dir.startswith('.'):
Dirs.remove(Dir)
for FileSp in Files:
if FileSp.startswith('.'):
continue
Ext = os.path.splitext(FileSp)[1]
if Ext.lower() in ['.inf']:
InfFileList.append\
(os.path.normpath(os.path.join(Root, FileSp)))
return InfFileList
## MacroParser used to parse macro definition
#
# @param Line: The content contain linestring and line number
# @param FileName: The meta-file file name
# @param SectionType: Section for the Line belong to
# @param FileLocalMacros: A list contain Macro defined in [Defines] section.
#
def MacroParser(Line, FileName, SectionType, FileLocalMacros):
MacroDefPattern = re.compile("^(DEFINE)[ \t]+")
LineContent = Line[0]
LineNo = Line[1]
Match = MacroDefPattern.match(LineContent)
if not Match:
#
# Not 'DEFINE/EDK_GLOBAL' statement, call decorated method
#
return None, None
TokenList = GetSplitValueList(LineContent[Match.end(1):], \
DataType.TAB_EQUAL_SPLIT, 1)
#
# Syntax check
#
if not TokenList[0]:
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACRONAME_NOGIVEN,
ExtraData=LineContent, File=FileName, Line=LineNo)
if len(TokenList) < 2:
Logger.Error('Parser', FORMAT_INVALID, ST.ERR_MACROVALUE_NOGIVEN,
ExtraData=LineContent, File=FileName, Line=LineNo)
Name, Value = TokenList
#
# DEFINE defined macros
#
if SectionType == DataType.MODEL_META_DATA_HEADER:
FileLocalMacros[Name] = Value
ReIsValidMacroName = re.compile(r"^[A-Z][A-Z0-9_]*$", re.DOTALL)
if ReIsValidMacroName.match(Name) is None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACRONAME_INVALID % (Name),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
# Validate MACRO Value
#
# <MacroDefinition> ::= [<Comments>]{0,}
# "DEFINE" <MACRO> "=" [{<PATH>} {<VALUE>}] <EOL>
# <Value> ::= {<NumVal>} {<Boolean>} {<AsciiString>} {<GUID>}
# {<CString>} {<UnicodeString>} {<CArray>}
#
# The definition of <NumVal>, <PATH>, <Boolean>, <GUID>, <CString>,
# <UnicodeString>, <CArray> are subset of <AsciiString>.
#
ReIsValidMacroValue = re.compile(r"^[\x20-\x7e]*$", re.DOTALL)
if ReIsValidMacroValue.match(Value) is None:
Logger.Error('Parser',
FORMAT_INVALID,
ST.ERR_MACROVALUE_INVALID % (Value),
ExtraData=LineContent,
File=FileName,
Line=LineNo)
return Name, Value
## GenSection
#
# generate section contents
#
# @param SectionName: indicate the name of the section, details refer to
# INF, DEC specs
# @param SectionDict: section statement dict, key is SectionAttrs(arch,
# moduletype or platform may exist as needed) list
# seperated by space,
# value is statement
#
def GenSection(SectionName, SectionDict, SplitArch=True, NeedBlankLine=False):
Content = ''
for SectionAttrs in SectionDict:
StatementList = SectionDict[SectionAttrs]
if SectionAttrs and SectionName != 'Defines' and SectionAttrs.strip().upper() != DataType.TAB_ARCH_COMMON:
if SplitArch:
ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_SPACE_SPLIT)
else:
if SectionName != 'UserExtensions':
ArchList = GetSplitValueList(SectionAttrs, DataType.TAB_COMMENT_SPLIT)
else:
ArchList = [SectionAttrs]
for Index in xrange(0, len(ArchList)):
ArchList[Index] = ConvertArchForInstall(ArchList[Index])
Section = '[' + SectionName + '.' + (', ' + SectionName + '.').join(ArchList) + ']'
else:
Section = '[' + SectionName + ']'
Content += '\n' + Section + '\n'
if StatementList is not None:
for Statement in StatementList:
LineList = Statement.split('\n')
NewStatement = ""
for Line in LineList:
# ignore blank comment
if not Line.replace("#", '').strip() and SectionName not in ('Defines', 'Hob', 'Event', 'BootMode'):
continue
# add two space before non-comments line except the comments in Defines section
if Line.strip().startswith('#') and SectionName == 'Defines':
NewStatement += "%s\n" % Line
continue
NewStatement += " %s\n" % Line
if NeedBlankLine:
Content += NewStatement + '\n'
else:
Content += NewStatement
if NeedBlankLine:
Content = Content[:-1]
if not Content.replace('\\n', '').strip():
return ''
return Content
## ConvertArchForInstall
# if Arch.upper() is in "IA32", "X64", "IPF", and "EBC", it must be upper case. "common" must be lower case.
# Anything else, the case must be preserved
#
# @param Arch: the arch string that need to be converted, it should be stripped before pass in
# @return: the arch string that get converted
#
def ConvertArchForInstall(Arch):
if Arch.upper() in [DataType.TAB_ARCH_IA32, DataType.TAB_ARCH_X64,
DataType.TAB_ARCH_IPF, DataType.TAB_ARCH_EBC]:
Arch = Arch.upper()
elif Arch.upper() == DataType.TAB_ARCH_COMMON:
Arch = Arch.lower()
return Arch
| {
"content_hash": "463424d248e7e0db168308f23284215a",
"timestamp": "",
"source": "github",
"line_count": 1007,
"max_line_length": 120,
"avg_line_length": 39.15690168818272,
"alnum_prop": 0.5742182546727195,
"repo_name": "MattDevo/edk2",
"id": "81729d6cdbf7b3a2c43e6bfaf6072117e128296d",
"size": "40051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "BaseTools/Source/Python/UPT/Library/Parsing.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "4545237"
},
{
"name": "Batchfile",
"bytes": "93042"
},
{
"name": "C",
"bytes": "94289702"
},
{
"name": "C++",
"bytes": "20170310"
},
{
"name": "CSS",
"bytes": "1905"
},
{
"name": "DIGITAL Command Language",
"bytes": "13695"
},
{
"name": "GAP",
"bytes": "698245"
},
{
"name": "GDB",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "472114"
},
{
"name": "Lua",
"bytes": "249"
},
{
"name": "Makefile",
"bytes": "231845"
},
{
"name": "NSIS",
"bytes": "2229"
},
{
"name": "Objective-C",
"bytes": "4147834"
},
{
"name": "PHP",
"bytes": "674"
},
{
"name": "PLSQL",
"bytes": "24782"
},
{
"name": "Perl",
"bytes": "6218"
},
{
"name": "Python",
"bytes": "27130096"
},
{
"name": "R",
"bytes": "21094"
},
{
"name": "Roff",
"bytes": "28192"
},
{
"name": "Shell",
"bytes": "104362"
},
{
"name": "SourcePawn",
"bytes": "29427"
},
{
"name": "Visual Basic",
"bytes": "494"
}
],
"symlink_target": ""
} |
from msrest.paging import Paged
class LoadBalancerPaged(Paged):
"""
A paging container for iterating over a list of :class:`LoadBalancer <azure.mgmt.network.v2016_12_01.models.LoadBalancer>` object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[LoadBalancer]'}
}
def __init__(self, *args, **kwargs):
super(LoadBalancerPaged, self).__init__(*args, **kwargs)
| {
"content_hash": "b7e93fad753d489616f2705a17e3f066",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 133,
"avg_line_length": 29.875,
"alnum_prop": 0.6129707112970711,
"repo_name": "AutorestCI/azure-sdk-for-python",
"id": "df9453cdfabf53f931d6c1209068050b4d714e96",
"size": "952",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/load_balancer_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34619070"
}
],
"symlink_target": ""
} |
"""Support for the worldtides.info API."""
from __future__ import annotations
from datetime import timedelta
import logging
import time
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by WorldTides"
DEFAULT_NAME = "WorldTidesInfo"
SCAN_INTERVAL = timedelta(seconds=3600)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the WorldTidesInfo sensor."""
name = config.get(CONF_NAME)
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
key = config.get(CONF_API_KEY)
if None in (lat, lon):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
tides = WorldTidesInfoSensor(name, lat, lon, key)
tides.update()
if tides.data.get("error") == "No location found":
_LOGGER.error("Location not available")
return
add_entities([tides])
class WorldTidesInfoSensor(SensorEntity):
"""Representation of a WorldTidesInfo sensor."""
def __init__(self, name, lat, lon, key):
"""Initialize the sensor."""
self._name = name
self._lat = lat
self._lon = lon
self._key = key
self.data = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def extra_state_attributes(self):
"""Return the state attributes of this device."""
attr = {ATTR_ATTRIBUTION: ATTRIBUTION}
if "High" in str(self.data["extremes"][0]["type"]):
attr["high_tide_time_utc"] = self.data["extremes"][0]["date"]
attr["high_tide_height"] = self.data["extremes"][0]["height"]
attr["low_tide_time_utc"] = self.data["extremes"][1]["date"]
attr["low_tide_height"] = self.data["extremes"][1]["height"]
elif "Low" in str(self.data["extremes"][0]["type"]):
attr["high_tide_time_utc"] = self.data["extremes"][1]["date"]
attr["high_tide_height"] = self.data["extremes"][1]["height"]
attr["low_tide_time_utc"] = self.data["extremes"][0]["date"]
attr["low_tide_height"] = self.data["extremes"][0]["height"]
return attr
@property
def native_value(self):
"""Return the state of the device."""
if self.data:
if "High" in str(self.data["extremes"][0]["type"]):
tidetime = time.strftime(
"%I:%M %p", time.localtime(self.data["extremes"][0]["dt"])
)
return f"High tide at {tidetime}"
if "Low" in str(self.data["extremes"][0]["type"]):
tidetime = time.strftime(
"%I:%M %p", time.localtime(self.data["extremes"][0]["dt"])
)
return f"Low tide at {tidetime}"
return None
return None
def update(self):
"""Get the latest data from WorldTidesInfo API."""
start = int(time.time())
resource = (
"https://www.worldtides.info/api?extremes&length=86400"
"&key={}&lat={}&lon={}&start={}"
).format(self._key, self._lat, self._lon, start)
try:
self.data = requests.get(resource, timeout=10).json()
_LOGGER.debug("Data: %s", self.data)
_LOGGER.debug("Tide data queried with start time set to: %s", start)
except ValueError as err:
_LOGGER.error("Error retrieving data from WorldTidesInfo: %s", err.args)
self.data = None
| {
"content_hash": "06d62725c8dfc2291fdbe2a85874f07b",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 84,
"avg_line_length": 33.62595419847328,
"alnum_prop": 0.6086265607264473,
"repo_name": "GenericStudent/home-assistant",
"id": "533328490c84fff786b414fab5259bf1e1d57caf",
"size": "4405",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/worldtidesinfo/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
import logging
from ask_amy.core.object_dictionary import ObjectDictionary
from ask_amy.core.directive import AudioPlayer
logger = logging.getLogger()
class Reply(ObjectDictionary):
def __init__(self, card_dict=None):
super().__init__(card_dict)
@classmethod
def constr(cls, response, session_attributes=None):
logger.debug("**************** entering Reply.constr")
reply = {'version': '1.0'}
if session_attributes is not None:
reply['sessionAttributes'] = session_attributes
reply['response'] = response.json()
return cls(reply)
@staticmethod
def build(dialog_dict, event=None):
logger.debug("**************** entering Reply.build")
prompt = None
reprompt = None
card = None
if 'speech_out_ssml' in dialog_dict:
prompt = Prompt.ssml(dialog_dict['speech_out_ssml'], event)
if 're_prompt_ssml' in dialog_dict:
reprompt = Prompt.ssml(dialog_dict['re_prompt_ssml'], event)
# Note speech_out_text will take precedence over speech_out_ssml
if 'speech_out_text' in dialog_dict:
prompt = Prompt.text(dialog_dict['speech_out_text'], event)
if 're_prompt_text' in dialog_dict:
reprompt = Prompt.text(dialog_dict['re_prompt_text'], event)
if 'should_end_session' in dialog_dict:
should_end_session = dialog_dict['should_end_session']
else:
should_end_session = True
if 'card_title' in dialog_dict:
card = Card.simple(dialog_dict['card_title'], dialog_dict['speech_out_text'], event)
if 'card' in dialog_dict:
card_dict = dialog_dict['card']
if 'small_image' in card_dict:
card = Card.standard(card_dict['title'], card_dict['content'], card_dict['small_image'],
card_dict['large_image'], event)
elif 'type' in card_dict:
card = Card.link_account(None, None)
else:
card = Card.simple(card_dict['title'], card_dict['content'], event)
response = Response.constr(prompt, reprompt, card, should_end_session)
attributes = {}
if event is not None:
if event.session is not None:
attributes = event.session.attributes
reply = Reply.constr(response, attributes)
return reply.json()
@staticmethod
def build_audio(dialog_dict, event=None):
logger.debug("**************** entering Reply.build")
prompt = None
card = None
should_end_session = True
command = event.request.attributes['command']
if command == 'play':
url = event.session.attributes['active_url']
offset = event.session.attributes['offset']
audio_player = AudioPlayer.play(url, offset)
else: # command must be stop
audio_player = AudioPlayer.stop()
if 'speech_out_ssml' in dialog_dict:
prompt = Prompt.ssml(dialog_dict['speech_out_ssml'], event)
# Note speech_out_text will take precedence over speech_out_ssml
if 'speech_out_text' in dialog_dict:
prompt = Prompt.text(dialog_dict['speech_out_text'], event)
if 'card_title' in dialog_dict:
card = Card.simple(dialog_dict['card_title'], dialog_dict['speech_out_text'], event)
if 'card' in dialog_dict:
card_dict = dialog_dict['card']
if 'small_image' in card_dict:
card = Card.standard(card_dict['title'], card_dict['content'], card_dict['small_image'],
card_dict['large_image'], event)
else:
card = Card.simple(card_dict['title'], card_dict['content'], event)
response = Response.audio_play(audio_player, prompt, card)
attributes = {}
if event is not None:
if event.session is not None:
attributes = event.session.attributes
reply = Reply.constr(response, attributes)
return reply.json()
class Response(ObjectDictionary):
def __init__(self, card_dict=None):
super().__init__(card_dict)
@classmethod
def constr(cls, out_speech, reprompt=None, card=None, should_end_session=True):
logger.debug("**************** entering Response.constr")
response = {}
if out_speech is not None:
response['outputSpeech'] = out_speech.json()
if reprompt is not None:
output_speech = {'outputSpeech': reprompt.json()}
response['reprompt'] = output_speech
if card is not None:
response['card'] = card.json()
if should_end_session is not None:
response['shouldEndSession'] = should_end_session
return cls(response)
@classmethod
def audio_play(cls, audio_player, out_speech=None, card=None):
logger.debug("**************** entering Response.audio_play")
response = {}
if out_speech is not None:
response['outputSpeech'] = out_speech.json()
if card is not None:
response['card'] = card.json()
response['directives'] = []
response['directives'].append(audio_player.json())
response['shouldEndSession'] = True
return cls(response)
@classmethod
def audio_stop(cls, audio_stop, out_speech=None, card=None):
logger.debug("**************** entering Response.audio_play")
response = {}
if out_speech is not None:
response['outputSpeech'] = out_speech.json()
if card is not None:
response['card'] = card.json()
response['directives'] = []
response['directives'].append(audio_stop.json())
response['shouldEndSession'] = True
return cls(response)
class CommunicationChannel(ObjectDictionary):
def __init__(self, card_dict=None):
super().__init__(card_dict)
@staticmethod
def concat_text_if_list(text_obj):
logger.debug("**************** entering OutText.concat_text_if_list")
text_str = ''
if type(text_obj) is str:
text_str = text_obj
elif type(text_obj) is list:
for text_line in text_obj:
text_str += text_line
return text_str
@staticmethod
def inject_event_data(text, event):
logger.debug("**************** entering OutText.inject_session_data")
if text is None:
return text
out_list = []
indx = 0
# len_text = len(text)
done = False
while not done:
start_token_index = text.find("{")
if start_token_index == -1:
out_list.append(text)
done = True
else:
end_token_index = text.find("}")
fragment = text[indx:start_token_index]
token = text[start_token_index + 1:end_token_index]
text = text[end_token_index + 1:len(text)]
if fragment != '':
out_list.append(fragment)
out_list.append(CommunicationChannel.process_token(token, event))
if text.find("{") == -1:
if text != '':
out_list.append(text)
done = True
return "".join(out_list)
@staticmethod
def process_token(token, event):
logger.debug("**************** entering OutText.process_token")
if event.request.attribute_exists(token):
value = event.request.attributes[token]
elif event.session.attribute_exists(token):
value = event.session.attributes[token]
else:
value = ''
return str(value)
class Prompt(CommunicationChannel):
def __init__(self, card_dict=None):
super().__init__(card_dict)
@classmethod
def ssml(cls, ssml, event=None):
logger.debug("**************** entering Prompt.ssml")
ssml = Prompt.concat_text_if_list(ssml)
if event is not None:
ssml = Prompt.inject_event_data(ssml, event)
prompt = {'type': 'SSML', 'ssml': "<speak>{}</speak>".format(ssml)}
return cls(prompt)
@classmethod
def text(cls, text, event=None):
logger.debug("**************** entering Prompt.text")
text = Prompt.concat_text_if_list(text)
if event is not None:
text = Prompt.inject_event_data(text, event)
prompt = {'type': 'PlainText', 'text': text}
return cls(prompt)
class Card(CommunicationChannel):
def __init__(self, card_dict=None):
super().__init__(card_dict)
@classmethod
def simple(cls, title, content, event=None):
logger.debug("**************** entering Card.simple")
content = Card.concat_text_if_list(content)
if event is not None:
content = Card.inject_event_data(content, event)
title = Card.inject_event_data(title, event)
card = {'type': 'Simple', 'title': title, 'content': content}
return cls(card)
@classmethod
def standard(cls, title, content, small_image_url, large_image_url, event=None):
logger.debug("**************** entering Card.standard")
content = Card.concat_text_if_list(content)
if event is not None:
content = Card.inject_event_data(content, event)
title = Card.inject_event_data(title, event)
small_image_url = Card.inject_event_data(small_image_url, event)
large_image_url = Card.inject_event_data(large_image_url, event)
card = {'type': 'Standard', 'title': title, 'text': content}
image = {}
card['image'] = image
image['smallImageUrl'] = small_image_url # 720w x 480h pixels
image['largeImageUrl'] = large_image_url # 1200w x 800h pixels
return cls(card)
@classmethod
def link_account(cls, title, content, event=None):
logger.debug("**************** entering Card.link_account")
content = Card.concat_text_if_list(content)
if event is not None:
content = Card.inject_event_data(content, event)
card = {'type': 'LinkAccount'}
return cls(card)
| {
"content_hash": "d5bed67ad85d84de495a6e275a3df6ff",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 104,
"avg_line_length": 37.93014705882353,
"alnum_prop": 0.567994572065523,
"repo_name": "dphiggs01/ask_amy",
"id": "29c19c6a414691a1bda775f60e4f924148138d7e",
"size": "10317",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ask_amy/core/reply.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7300"
},
{
"name": "HTML",
"bytes": "2620"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "109419"
},
{
"name": "Shell",
"bytes": "152"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lizard_auth_client', '0006_auto_20181115_1313'),
]
operations = [
migrations.AlterModelOptions(
name='organisation',
options={'ordering': ['name']},
),
]
| {
"content_hash": "15680f62e3f1778356c45124a8e4aabe",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 58,
"avg_line_length": 21.294117647058822,
"alnum_prop": 0.6022099447513812,
"repo_name": "lizardsystem/lizard-auth-client",
"id": "53c9a4f9560bb222a902cf24d3be042b2c8149ef",
"size": "386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lizard_auth_client/migrations/0007_auto_20181116_1015.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "51"
},
{
"name": "Dockerfile",
"bytes": "335"
},
{
"name": "HTML",
"bytes": "8323"
},
{
"name": "JavaScript",
"bytes": "109"
},
{
"name": "Python",
"bytes": "207259"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class 檢查工具Config(AppConfig):
name = '檢查工具'
def ready(self):
# registering signals
from 檢查工具 import signals
signals
| {
"content_hash": "e781d597febfacb9f2ea6f7e0327b925",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 33,
"avg_line_length": 18.4,
"alnum_prop": 0.6467391304347826,
"repo_name": "i3thuan5/gi2_liau7_khoo3",
"id": "ffa57f8e50d55f5777d983d40443bd6980026f6c",
"size": "208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "檢查工具/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1420"
},
{
"name": "HTML",
"bytes": "6927"
},
{
"name": "Python",
"bytes": "120840"
}
],
"symlink_target": ""
} |
"""
Django settings for deckbuilder project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o$xyt*wc+d5ld#ux8t6sv5&3qmb$r$rmvs6*5ca+ng11g98pys'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'card',
'deck',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'deckbuilder.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'deckbuilder.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'PAGE_SIZE': 100,
'TEST_REQUEST_DEFAULT_FORMAT': 'json',
}
| {
"content_hash": "6c224a78c10d499d5216aeaf0f1ca61d",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 91,
"avg_line_length": 25.689922480620154,
"alnum_prop": 0.6864815932407966,
"repo_name": "vokal/deckbuilder-api",
"id": "1b2640defffe25e1f3d7a9a50be92e71141bdf67",
"size": "3314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deckbuilder/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20023"
}
],
"symlink_target": ""
} |
import json
import struct
import time
from calendar import timegm
from binascii import hexlify, unhexlify
from .objecttypes import object_type
from .utils import unicodify
timeformat = "%Y-%m-%dT%H:%M:%S%Z"
def varint(n):
""" Varint encoding
"""
data = b""
while n >= 0x80:
data += bytes([(n & 0x7F) | 0x80])
n >>= 7
data += bytes([n])
return data
def varintdecode(data): # pragma: no cover
""" Varint decoding
"""
shift = 0
result = 0
for b in bytes(data):
result |= (b & 0x7F) << shift
if not (b & 0x80):
break
shift += 7
return result
def variable_buffer(s):
""" Encode variable length buffer
"""
return varint(len(s)) + s
def JsonObj(data):
""" Return json object from data
If data has a __json__() method, use that, else assume it follows the
convention that its string representation is interprettable as valid json.
(The latter can be problematic if str(data) returns, e.g., "1234". Was
this supposed to be the string "1234" or the number 1234? If this
ambiguity exists, the data type must implement __json__().)
"""
try:
return data.__json__()
except Exception:
return json.loads(str(data))
class Uint8:
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return struct.pack("<B", self.data)
def __str__(self):
return "%d" % self.data
class Int16:
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return struct.pack("<h", int(self.data))
def __str__(self):
return "%d" % self.data
class Uint16:
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return struct.pack("<H", self.data)
def __str__(self):
return "%d" % self.data
class Uint32:
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return struct.pack("<I", self.data)
def __str__(self):
return "%d" % self.data
class Uint64:
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return struct.pack("<Q", self.data)
def __str__(self):
return "%d" % self.data
class Varint32:
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return varint(self.data)
def __str__(self):
return "%d" % self.data
class Int64:
def __init__(self, d):
self.data = int(d)
def __bytes__(self):
return struct.pack("<q", self.data)
def __str__(self):
return "%d" % self.data
class String:
def __init__(self, d):
self.data = d
def __bytes__(self):
if self.data:
d = unicodify(self.data)
else:
d = b""
return varint(len(d)) + d
def __str__(self):
return "%s" % str(self.data)
class Bytes:
"""Bytes
Initializes from and stores internally as a string of hex digits.
Byte-serializes as a length-prefixed series of bytes represented
by those hex digits.
Ex: len(str(Bytes("deadbeef")) == 8 # Eight hex chars
len(bytes(Bytes("deadbeef")) == 5 # Four data bytes plus varint length
Implements __json__() method to disambiguate between string and numeric in
event where hex digits include only numeric digits and no alpha digits.
"""
def __init__(self, d):
self.data = d
def __bytes__(self):
d = unhexlify(bytes(self.data, "utf-8"))
return varint(len(d)) + d
def __json__(self):
return str(self.data)
def __str__(self):
return str(self.data)
class Hash(Bytes):
def json(self):
return str(self.data)
def __bytes__(self):
return unhexlify(bytes(self.data, "utf-8"))
class Ripemd160(Hash):
def __init__(self, a):
assert len(a) == 40, "Require 40 char long hex"
super().__init__(a)
class Sha1(Hash):
def __init__(self, a):
assert len(a) == 40, "Require 40 char long hex"
super().__init__(a)
class Sha256(Hash):
def __init__(self, a):
assert len(a) == 64, "Require 64 char long hex"
super().__init__(a)
class Hash160(Hash):
def __init__(self, a):
assert len(a) == 40, "Require 40 char long hex"
super().__init__(a)
class Void:
def __init__(self):
pass
def __bytes__(self):
return b""
def __str__(self):
return ""
class Array:
def __init__(self, d):
self.data = d or []
self.length = Varint32(len(self.data))
def __bytes__(self):
return bytes(self.length) + b"".join([bytes(a) for a in self.data])
def __str__(self):
r = []
for a in self.data:
try:
r.append(JsonObj(a))
except Exception:
r.append(str(a))
return json.dumps(r)
class PointInTime:
def __init__(self, d):
self.data = d
def __bytes__(self):
return struct.pack("<I", timegm(time.strptime((self.data + "UTC"), timeformat)))
def __str__(self):
return self.data
class Signature:
def __init__(self, d):
self.data = d
def __bytes__(self):
return self.data
def __str__(self):
return json.dumps(hexlify(self.data).decode("ascii"))
class Bool(Uint8): # Bool = Uint8
def __init__(self, d):
super().__init__(d)
def __str__(self):
return json.dumps(True) if self.data else json.dumps(False)
class Set(Array): # Set = Array
def __init__(self, d):
super().__init__(d)
class Fixed_array:
pass
class Optional:
def __init__(self, d):
self.data = d
def __bytes__(self):
if not bool(self.data):
return bytes(Bool(0))
else:
return (
bytes(Bool(1)) + bytes(self.data)
if bytes(self.data)
else bytes(Bool(0))
)
def __str__(self):
return str(self.data)
def isempty(self):
if self.data is None:
return True
if not bool(str(self.data)): # pragma: no cover
return True
return not bool(bytes(self.data))
class Static_variant:
def __init__(self, d, type_id):
self.data = d
self.type_id = type_id
def __bytes__(self):
return varint(self.type_id) + bytes(self.data)
def __str__(self):
return json.dumps([self.type_id, self.data.json()])
class Map:
def __init__(self, data):
self.data = data
def __bytes__(self):
b = b""
b += varint(len(self.data))
for e in self.data:
b += bytes(e[0]) + bytes(e[1])
return b
def __str__(self):
r = []
for e in self.data:
r.append([str(e[0]), str(e[1])])
return json.dumps(r)
class Id:
def __init__(self, d):
self.data = Varint32(d)
def __bytes__(self):
return bytes(self.data)
def __str__(self):
return str(self.data)
class VoteId:
def __init__(self, vote):
parts = vote.split(":")
assert len(parts) == 2
self.type = int(parts[0])
self.instance = int(parts[1])
def __bytes__(self):
binary = (self.type & 0xFF) | (self.instance << 8)
return struct.pack("<I", binary)
def __str__(self):
return "%d:%d" % (self.type, self.instance)
class ObjectId:
""" Encodes protocol ids - serializes to the *instance* only!
"""
object_types = object_type
def __init__(self, object_str, type_verify=None):
if len(object_str.split(".")) == 3:
space, type, id = object_str.split(".")
self.space = int(space)
self.type = int(type)
self.instance = Id(int(id))
self.Id = object_str
if type_verify:
assert (
type_verify in self.object_types
), "Type {} is not defined!".format(type_verify)
assert self.object_types[type_verify] == int(type), (
"Object id does not match object type! "
+ "Excpected %d, got %d"
% (self.object_types[type_verify], int(type))
)
else:
raise Exception("Object id is invalid")
def __bytes__(self):
return bytes(self.instance) # only yield instance
def __str__(self):
return self.Id
class FullObjectId:
""" Encodes object ids - serializes to a full object id
"""
def __init__(self, object_str):
if len(object_str.split(".")) == 3:
space, type, id = object_str.split(".")
self.space = int(space)
self.type = int(type)
self.id = int(id)
self.instance = Id(int(id))
self.Id = object_str
else:
raise ValueError("Object id is invalid")
def __bytes__(self):
return (self.space << 56 | self.type << 48 | self.id).to_bytes(
8, byteorder="little", signed=False
)
def __str__(self):
return self.Id
class Enum8(Uint8):
# List needs to be provided by super class
options = []
def __init__(self, selection):
if selection not in self.options or (
isinstance(selection, int) and len(self.options) < selection
):
raise ValueError(
"Options are {}. Given '{}'".format(str(self.options), selection)
)
super(Enum8, self).__init__(self.options.index(selection))
def __str__(self):
return str(self.options[self.data])
| {
"content_hash": "c94c9568a5315429baf25af97803d0ab",
"timestamp": "",
"source": "github",
"line_count": 431,
"max_line_length": 88,
"avg_line_length": 22.533642691415313,
"alnum_prop": 0.5270799011532126,
"repo_name": "xeroc/python-graphenelib",
"id": "d19d7264453db463ef0bc64bf6b2213ada8993d1",
"size": "9736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "graphenebase/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "872"
},
{
"name": "Python",
"bytes": "922435"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
from pandas.util.testing import assert_frame_equal
import itertools
import pytest
import pandas as pd
from freediscovery.ingestion import DocumentIndex
from .run_suite import check_cache
from freediscovery.exceptions import (NotFound)
basename = os.path.dirname(__file__)
data_dir = os.path.join(basename, "..", "data", "ds_001", "raw")
cache_dir = check_cache()
fnames_in = ['0.7.47.101442.txt',
'0.7.47.117435.txt',
'0.7.6.28635.txt',
'0.7.6.28636.txt',
'0.7.6.28637.txt',
'0.7.6.28638.txt']
fnames_in_abs = [os.path.join(data_dir, el) for el in fnames_in]
def test_ingestion_base_dir():
dbi = DocumentIndex.from_folder(data_dir)
data_dir_res, filenames, db = dbi.data_dir, dbi.filenames, dbi.data
assert data_dir_res == os.path.normpath(data_dir)
assert_array_equal(db.columns.values, ['file_path', 'internal_id'])
assert_array_equal(db.file_path.values, fnames_in)
assert_array_equal([os.path.normpath(el) for el in filenames],
[os.path.join(data_dir_res, el) for el in db.file_path.values])
def test_search_2fields():
dbi = DocumentIndex.from_folder(data_dir)
query = pd.DataFrame([{'internal_id': 3},
{'internal_id': 1},
{'internal_id': 2}])
sres = dbi.search(query)
assert_equal(sres.internal_id.values, [3, 1, 2])
assert_array_equal(sorted(sres.columns), sorted(['internal_id', 'file_path']))
# make sure that if we have some additional field we still use the internal_id
query = pd.DataFrame([{'internal_id': 1, 'a': 2},
{'internal_id': 2, 'b': 4},
{'internal_id': 1, 'a': 3}])
sres = dbi.search(query)
assert_equal(sres.internal_id.values, [1, 2, 1])
assert_array_equal(sorted(sres.columns), sorted(['internal_id', 'file_path']))
sres = dbi.search(query, drop=False)
assert_equal(sres.internal_id.values, [1, 2, 1])
assert_array_equal(sorted(sres.columns), sorted(['internal_id', 'file_path', 'a', 'b']))
query = pd.DataFrame([{'file_path': "0.7.6.28637.txt"},
{'file_path': "0.7.47.117435.txt"}])
sres = dbi.search(query)
query_res = [dbi.data.file_path.values.tolist().index(el) for el in query.file_path.values]
assert_array_equal(query_res, sres.internal_id)
def test_search_not_found():
dbi = DocumentIndex.from_folder(data_dir)
query = pd.DataFrame([{'file_path': "DOES_NOT_EXISTS"},
{'file_path': "0.7.6.28637.txt"}])
with pytest.raises(NotFound):
sres = dbi.search(query)
@pytest.mark.parametrize('return_file_path', ['return_file_path', 'dont_return_file_path'])
def test_ingestion_render(return_file_path):
def _process_results(rd):
rd = pd.DataFrame(rd)
if return_file_path:
assert 'file_path' in rd.columns
del rd['file_path']
return rd
# make it a binary variable
return_file_path = (return_file_path == 'return_file_path')
md = [{'file_path': '/test', 'document_id': 2},
{'file_path': '/test2', 'document_id': 1},
{'file_path': '/test3', 'document_id': 7},
{'file_path': '/test8', 'document_id': 9},
{'file_path': '/test9', 'document_id': 4},
]
for idx, el in enumerate(md):
el['internal_id'] = idx
dbi = DocumentIndex.from_list(md)
query = pd.DataFrame([{'a': 2, 'internal_id': 3},
{'a': 4, 'internal_id': 1}])
res = pd.DataFrame([{'a': 2, 'internal_id': 3, 'document_id': 9},
{'a': 4, 'internal_id': 1, 'document_id': 1}])
rd = dbi.render_dict(query, return_file_path=return_file_path)
rd = _process_results(rd)
assert_frame_equal(rd, res)
rd = dbi.render_dict(return_file_path=return_file_path)
rd = _process_results(rd)
assert_frame_equal(rd.loc[[0]],
pd.DataFrame([{'internal_id': 0, 'document_id': 2}]))
assert len(rd) == len(md)
rd = dbi.render_list(res, return_file_path=return_file_path)
rd = _process_results(rd)
assert sorted(rd.keys()) == sorted(['internal_id', 'document_id', 'a'])
assert_frame_equal(pd.DataFrame(rd),
pd.DataFrame([{'a': 2, 'internal_id': 3, 'document_id': 9},
{'a': 4, 'internal_id': 1, 'document_id': 1}]))
rd = dbi.render_list()
assert sorted(rd.keys()) == sorted(['internal_id', 'document_id'])
def test_search_document_id():
md = [{'file_path': '/test', 'document_id': 2},
{'file_path': '/test2', 'document_id': 1},
{'file_path': '/test3', 'document_id': 7},
{'file_path': '/test8', 'document_id': 9},
{'file_path': '/test9', 'document_id': 4},
]
for idx, el in enumerate(md):
el['internal_id'] = idx
dbi = DocumentIndex.from_list(md)
query = pd.DataFrame([{'internal_id': 1},
{'internal_id': 2},
{'internal_id': 1}])
sres = dbi.search(query)
assert_equal(sres.internal_id.values, [1, 2, 1])
assert_array_equal(sorted(sres.columns), sorted(['internal_id', 'file_path', 'document_id']))
# make sure we use internal id first
query = pd.DataFrame([{'internal_id': 1, 'document_id': 2},
{'internal_id': 2, 'document_id': 2},
{'internal_id': 1, 'document_id': 2}])
sres = dbi.search(query)
assert_equal(sres.internal_id.values, [1, 2, 1])
query = pd.DataFrame([{'document_id': 4},
{'document_id': 9},
{'document_id': 2}])
sres = dbi.search(query)
assert_equal(sres.internal_id.values, [4, 3, 0])
def test_search_document_rendition_id():
md = [{'file_path': '/test', 'document_id': 0, 'rendition_id': 0},
{'file_path': '/test2', 'document_id': 0, 'rendition_id': 1},
{'file_path': '/test3', 'document_id': 1, 'rendition_id': 0},
{'file_path': '/test8', 'document_id': 2, 'rendition_id': 0},
{'file_path': '/test9', 'document_id': 3, 'rendition_id': 0},
]
for idx, el in enumerate(md):
el['internal_id'] = idx
# can always index with internal_id
dbi = DocumentIndex.from_list(md)
query = pd.DataFrame([{'internal_id': 1},
{'internal_id': 2},
{'internal_id': 1}])
sres = dbi.search(query)
assert_equal(sres.internal_id.values, [1, 2, 1])
assert_array_equal(sorted(sres.columns), sorted(['internal_id', 'file_path',
'document_id', 'rendition_id']))
# the internal id is not sufficient to fully index documents in this case
query = pd.DataFrame([{'document_id': 0},
{'document_id': 1},
{'document_id': 2}])
with pytest.raises(ValueError):
sres = dbi.search(query)
query = pd.DataFrame([{'document_id': 0, 'rendition_id': 0},
{'document_id': 1, 'rendition_id': 0},
{'document_id': 2, 'rendition_id': 0}])
sres = dbi.search(query)
assert_equal(sres.internal_id.values, [0, 2, 3])
def test_bad_search_document_rendition_id():
md = [{'file_path': '/test', 'document_id': 0, 'rendition_id': 0},
{'file_path': '/test2', 'document_id': 0, 'rendition_id': 1},
{'file_path': '/test3', 'document_id': 1, 'rendition_id': 0},
{'file_path': '/test8', 'document_id': 2, 'rendition_id': 0},
{'file_path': '/test9', 'document_id': 3, 'rendition_id': 0},
]
for idx, el in enumerate(md):
el['internal_id'] = idx
# can always index with internal_id
dbi = DocumentIndex.from_list(md)
query = pd.DataFrame([{'internal_id': 1},
{'internal_id': 2},
{'document_id': 1}])
with pytest.raises(NotFound):
sres = dbi.search(query)
def test_ingestion_pickling():
from sklearn.externals import joblib
db = DocumentIndex.from_folder(data_dir)
fname = os.path.join(cache_dir, 'document_index')
# check that db is picklable
joblib.dump(db, fname)
db2 = joblib.load(fname)
os.remove(fname)
@pytest.mark.parametrize('n_fields', [1, 2, 3])
def test_ingestion_metadata(n_fields):
metadata = []
for idx, fname in enumerate(fnames_in_abs):
el = {'file_path': fname }
if n_fields >= 2:
el['document_id'] = 'a' + str(idx + 100)
if n_fields >= 3:
el['rendition_id'] = 1
metadata.append(el)
dbi = DocumentIndex.from_list(metadata)
data_dir_res, filenames, db = dbi.data_dir, dbi.filenames, dbi.data
assert data_dir_res == os.path.normpath(data_dir)
assert filenames == fnames_in_abs
if n_fields == 1:
columns_ref = sorted(['file_path', 'internal_id'])
elif n_fields == 2:
columns_ref = sorted(['file_path', 'document_id', 'internal_id'])
elif n_fields == 3:
columns_ref = sorted(['file_path', 'document_id', 'rendition_id', 'internal_id'])
assert_array_equal(sorted(db.columns.values), columns_ref)
assert_array_equal([os.path.normpath(el) for el in filenames],
[os.path.join(data_dir_res, el) for el in db.file_path.values])
| {
"content_hash": "277aa92c60faef61709b86139d44c239",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 97,
"avg_line_length": 38.677290836653384,
"alnum_prop": 0.5636588380716935,
"repo_name": "kcompher/FreeDiscovUI",
"id": "187ea2ee280b9ff3928a8202d5619a18d72d9c56",
"size": "9733",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "freediscovery/tests/test_ingestion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "404"
},
{
"name": "Makefile",
"bytes": "598"
},
{
"name": "Nginx",
"bytes": "451"
},
{
"name": "Python",
"bytes": "333007"
},
{
"name": "Shell",
"bytes": "3721"
}
],
"symlink_target": ""
} |
import sys
import M2Crypto
def extract_DN(fname):
"""
Extract a Distinguished Name from an X.509 proxy.
@type fname: string
@param fname: Filename containing the X.509 proxy
"""
fd = open(fname,"r")
try:
data = fd.read()
finally:
fd.close()
while 1:
try:
data_idx = data.rindex('-----BEGIN CERTIFICATE-----')
old_data = data[:data_idx]
data = data[data_idx:]
except ValueError:
print "%s not a valid certificate file" % fname
sys.exit(3)
m = M2Crypto.X509.load_cert_string(data)
if m.check_ca():
# oops, this is the CA part
# get the previous in the chain
data = old_data
else:
break # ok, found it, end the loop
return str(m.get_subject())
| {
"content_hash": "0411190e64e94cdea3b7f07d82d55b70",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 65,
"avg_line_length": 24.314285714285713,
"alnum_prop": 0.5299647473560517,
"repo_name": "holzman/glideinwms-old",
"id": "243131bd4bd5c1dfc4db15f9b978b0ccdee06977",
"size": "851",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/x509Support.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "3557"
},
{
"name": "CSS",
"bytes": "7061"
},
{
"name": "JavaScript",
"bytes": "12599"
},
{
"name": "Python",
"bytes": "1821602"
},
{
"name": "Shell",
"bytes": "167704"
},
{
"name": "XSLT",
"bytes": "4667"
}
],
"symlink_target": ""
} |
def __bootstrap__():
global __bootstrap__, __loader__, __file__
import sys, pkg_resources, imp
__file__ = pkg_resources.resource_filename(__name__, 'surface.so')
__loader__ = None; del __bootstrap__, __loader__
imp.load_dynamic(__name__,__file__)
__bootstrap__()
| {
"content_hash": "8cd5dd898a94ace68df36df81cdb64b5",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 70,
"avg_line_length": 40.42857142857143,
"alnum_prop": 0.5830388692579506,
"repo_name": "JRock007/boxxy",
"id": "51db70bfe47a20bc3de513bc91007108d775b724",
"size": "283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dist/Boxxy server.app/Contents/Resources/lib/python2.7/pygame/surface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "662738"
},
{
"name": "C++",
"bytes": "119316"
},
{
"name": "FORTRAN",
"bytes": "7414"
},
{
"name": "Java",
"bytes": "11586"
},
{
"name": "Python",
"bytes": "12647184"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""This code example gets all proposal line items.
To create proposal line items, run create_proposal_line_items.py.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService', version='v201411')
# Create a filter statement.
statement = dfp.FilterStatement('ORDER BY id ASC')
# Get proposal line items by statement.
while True:
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for idx, proposal_line_item in enumerate(response['results'],
start=statement.offset):
print ('%s) Proposal line item with id \'%s\', belonging to proposal id'
' \'%s\', and named \'%s\' was found.' %
(idx, proposal_line_item['id'], proposal_line_item['proposalId'],
proposal_line_item['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| {
"content_hash": "3ac8faee2cfe95f515d0ec923f1bfcba",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 32.73170731707317,
"alnum_prop": 0.650521609538003,
"repo_name": "wubr2000/googleads-python-lib",
"id": "dbfe9db9da7382c1a43fd6f74eb02738086a6498",
"size": "1960",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201411/proposal_line_item_service/get_all_proposal_line_items.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from congress.api import api_utils
from congress.api import webservice
from congress.dse import deepsix
from congress.managers import datasource as datasource_manager
LOG = logging.getLogger(__name__)
def d6service(name, keys, inbox, datapath, args):
return SchemaModel(name, keys, inbox=inbox, dataPath=datapath, **args)
class SchemaModel(deepsix.deepSix):
"""Model for handling API requests about Schemas."""
def __init__(self, name, keys, inbox=None, dataPath=None,
policy_engine=None, datasource_mgr=None):
super(SchemaModel, self).__init__(name, keys, inbox=inbox,
dataPath=dataPath)
self.datasource_mgr = datasource_mgr
def rpc(self, caller, name, *args, **kwargs):
f = getattr(caller, name)
return f(*args, **kwargs)
def get_item(self, id_, params, context=None):
"""Retrieve item with id id_ from model.
Args:
id_: The ID of the item to retrieve
params: A dict-like object containing parameters
from the request query string and body.
context: Key-values providing frame of reference of request
Returns:
The matching item or None if item with id_ does not exist.
"""
datasource = context.get('ds_id')
table = context.get('table_id')
try:
schema = self.rpc(self.datasource_mgr, 'get_datasource_schema',
datasource)
except (datasource_manager.DatasourceNotFound,
datasource_manager.DriverNotFound) as e:
raise webservice.DataModelException(e.code, e.message,
http_status_code=e.code)
# request to see the schema for one table
if table:
if table not in schema:
raise webservice.DataModelException(
404, ("Table '{}' for datasource '{}' has no "
"schema ".format(id_, datasource)),
http_status_code=404)
return api_utils.create_table_dict(table, schema)
tables = [api_utils.create_table_dict(table_, schema)
for table_ in schema]
return {'tables': tables}
| {
"content_hash": "d7fa3097603b4d68a4e5188e5dd06a9a",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 38.14754098360656,
"alnum_prop": 0.587881392350666,
"repo_name": "ekcs/congress",
"id": "ddde96eec88abca6a289efae35bb040a95ea2f70",
"size": "2959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "congress/api/schema_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2744"
},
{
"name": "GAP",
"bytes": "7778"
},
{
"name": "HTML",
"bytes": "19644"
},
{
"name": "JavaScript",
"bytes": "9896"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "1874341"
},
{
"name": "Shell",
"bytes": "8824"
}
],
"symlink_target": ""
} |
"""
Pages in Django can are served up with custom HTTP headers containing useful
information about those pages -- namely, the content type and object ID.
This module contains utility functions for retrieving and doing interesting
things with these special "X-Headers" (so called because the HTTP spec demands
that custom headers are prefxed with "X-").
Next time you're at slashdot.org, watch out for X-Fry and X-Bender. :)
"""
def populate_xheaders(request, response, package, python_module_name, object_id):
"""
Adds the "X-Object-Type" and "X-Object-Id" headers to the given
HttpResponse according to the given package, python_module_name and
object_id -- but only if the given HttpRequest object has an IP address
within the INTERNAL_IPS setting.
"""
from django.conf.settings import INTERNAL_IPS
if request.META.get('REMOTE_ADDR') in INTERNAL_IPS:
response['X-Object-Type'] = "%s.%s" % (package, python_module_name)
response['X-Object-Id'] = str(object_id)
| {
"content_hash": "1dc1fdf30d5215b7f941157b882ce2dc",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 81,
"avg_line_length": 46.04545454545455,
"alnum_prop": 0.7255676209279368,
"repo_name": "tungvx/deploy",
"id": "98d2586b75c72bb13e30131f4e912f3490045bfc",
"size": "1013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Django-0.90/django/core/xheaders.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400492"
},
{
"name": "JavaScript",
"bytes": "477245"
},
{
"name": "Python",
"bytes": "16861113"
},
{
"name": "Shell",
"bytes": "8221"
}
],
"symlink_target": ""
} |
from operator import attrgetter
from mkt.search.indexers import BaseIndexer
from mkt.translations.models import attach_trans_dict
class WebsiteIndexer(BaseIndexer):
translated_fields = ('description', 'name', 'short_name', 'title')
fields_with_language_analyzers = ('description', 'name', 'short_name')
hidden_fields = (
'*.raw',
'*_sort',
'popularity_*',
'trending_*',
'boost',
# 'name', 'short_name' and 'description', as well as their locale
# variants ('name_l10n_<language>', etc.) are only used for the query
# matches, and are never returned to the client through the API. The
# fields that are returned to the API are '*_translations'.
'description',
'name',
'short_name',
'description_l10n_*',
'name_l10n_*',
'short_name_l10n_*',
# Title is not analyzed with language-specific analyzers but again, we
# need `title_translations` for the API, `title` is only used for
# querying.
'title',
)
@classmethod
def get_mapping_type_name(cls):
return 'website'
@classmethod
def get_model(cls):
"""Returns the Django model this MappingType relates to"""
from mkt.websites.models import Website
return Website
@classmethod
def get_mapping(cls):
"""Returns an Elasticsearch mapping for this MappingType"""
doc_type = cls.get_mapping_type_name()
mapping = {
doc_type: {
'_all': {'enabled': False},
'properties': {
'id': {'type': 'long'},
'category': cls.string_not_analyzed(),
'created': {'type': 'date', 'format': 'dateOptionalTime'},
'description': {'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100},
'default_locale': cls.string_not_indexed(),
'device': {'type': 'byte'},
'icon_hash': cls.string_not_indexed(),
'icon_type': cls.string_not_indexed(),
'is_disabled': {'type': 'boolean'},
'last_updated': {'format': 'dateOptionalTime',
'type': 'date'},
'modified': {'type': 'date', 'format': 'dateOptionalTime'},
'name': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
# For exact matches. Referenced as `name.raw`.
'fields': {
'raw': cls.string_not_analyzed(
position_offset_gap=100)
},
},
# Name for sorting.
'name_sort': cls.string_not_analyzed(doc_values=True),
'region_exclusions': {'type': 'short'},
'short_name': {'type': 'string',
'analyzer': 'default_icu'},
'status': {'type': 'byte'},
'title': {
'type': 'string',
'analyzer': 'default_icu',
'position_offset_gap': 100,
},
# FIXME: Add custom analyzer for url, that strips http,
# https, maybe also www. and any .tld ?
'url': {'type': 'string', 'analyzer': 'simple'},
}
}
}
# Attach boost field, because we are going to need search by relevancy.
cls.attach_boost_mapping(mapping)
# Attach popularity and trending.
cls.attach_trending_and_popularity_mappings(mapping)
# Add extra mapping for translated fields, containing the "raw"
# translations.
cls.attach_translation_mappings(mapping, cls.translated_fields)
# Add language-specific analyzers.
cls.attach_language_specific_analyzers(
mapping, cls.fields_with_language_analyzers)
return mapping
@classmethod
def extract_document(cls, pk=None, obj=None):
"""Converts this instance into an Elasticsearch document"""
if obj is None:
obj = cls.get_model().objects.get(pk=pk)
# Attach translations for searching and indexing.
attach_trans_dict(cls.get_model(), [obj])
attrs = ('created', 'default_locale', 'id', 'icon_hash', 'icon_type',
'is_disabled', 'last_updated', 'modified', 'status', 'url')
doc = dict(zip(attrs, attrgetter(*attrs)(obj)))
doc['category'] = obj.categories or []
doc['device'] = obj.devices or []
doc['name_sort'] = unicode(obj.name).lower()
doc['region_exclusions'] = obj.region_exclusions or []
# Add boost, popularity, trending values.
doc.update(cls.extract_popularity_trending_boost(obj))
# Handle localized fields. This adds both the field used for search and
# the one with all translations for the API.
for field in cls.translated_fields:
doc.update(cls.extract_field_translations(
obj, field, include_field_for_search=True))
# Handle language-specific analyzers.
for field in cls.fields_with_language_analyzers:
doc.update(cls.extract_field_analyzed_translations(obj, field))
return doc
| {
"content_hash": "273f536b36a82a07141e79922671f953",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 79,
"avg_line_length": 40.07142857142857,
"alnum_prop": 0.5201426024955437,
"repo_name": "eviljeff/zamboni",
"id": "0a6d77d641c7d2e090a6581dc989a073b5d611d0",
"size": "5610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/websites/indexers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "356800"
},
{
"name": "HTML",
"bytes": "2208233"
},
{
"name": "JavaScript",
"bytes": "532502"
},
{
"name": "Makefile",
"bytes": "4172"
},
{
"name": "Python",
"bytes": "3935682"
},
{
"name": "Shell",
"bytes": "10972"
},
{
"name": "Smarty",
"bytes": "1369"
}
],
"symlink_target": ""
} |
import asyncio
import re
from unittest import mock
from kafka.protocol.group import (
JoinGroupRequest_v0 as JoinGroupRequest,
SyncGroupResponse_v0 as SyncGroupResponse,
LeaveGroupRequest_v0 as LeaveGroupRequest,
HeartbeatRequest_v0 as HeartbeatRequest,
)
from kafka.protocol.commit import (
OffsetCommitRequest, OffsetCommitResponse_v2,
OffsetFetchRequest_v1 as OffsetFetchRequest
)
import kafka.errors as Errors
from ._testutil import KafkaIntegrationTestCase, run_until_complete
from aiokafka import ConsumerRebalanceListener
from aiokafka.client import AIOKafkaClient
from aiokafka.structs import OffsetAndMetadata, TopicPartition
from aiokafka.consumer.group_coordinator import (
GroupCoordinator, CoordinatorGroupRebalance, NoGroupCoordinator)
from aiokafka.consumer.subscription_state import SubscriptionState
from aiokafka.util import create_future, create_task, get_running_loop
UNKNOWN_MEMBER_ID = JoinGroupRequest.UNKNOWN_MEMBER_ID
class RebalanceListenerForTest(ConsumerRebalanceListener):
def __init__(self):
self.revoked = []
self.assigned = []
def on_partitions_revoked(self, revoked):
self.revoked.append(revoked)
raise Exception("coordinator should ignore this exception")
def on_partitions_assigned(self, assigned):
self.assigned.append(assigned)
raise Exception("coordinator should ignore this exception")
class TestKafkaCoordinatorIntegration(KafkaIntegrationTestCase):
@run_until_complete
async def test_coordinator_workflow(self):
# Check if 2 coordinators will coordinate rebalances correctly
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
# Check if the initial group join is performed correctly with minimal
# setup
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1', 'topic2'})
coordinator = GroupCoordinator(
client, subscription,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
self.assertEqual(coordinator.coordinator_id, None)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
await coordinator.ensure_coordinator_known()
self.assertNotEqual(coordinator.coordinator_id, None)
if subscription.subscription.assignment is None:
await subscription.wait_for_assignment()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertFalse(coordinator.need_rejoin(subscription.subscription))
tp_list = subscription.assigned_partitions()
self.assertEqual(
tp_list,
{
('topic1', 0),
('topic1', 1),
('topic2', 0),
('topic2', 1)
}
)
# Check if adding an additional coordinator will rebalance correctly
client2 = AIOKafkaClient(bootstrap_servers=self.hosts)
await client2.bootstrap()
subscription2 = SubscriptionState()
subscription2.subscribe(topics={'topic1', 'topic2'})
coordinator2 = GroupCoordinator(
client2, subscription2,
session_timeout_ms=10000,
heartbeat_interval_ms=500,
retry_backoff_ms=100)
await asyncio.gather(
subscription.wait_for_assignment(),
subscription2.wait_for_assignment()
)
tp_list = subscription.assigned_partitions()
self.assertEqual(len(tp_list), 2)
tp_list2 = subscription2.assigned_partitions()
self.assertEqual(len(tp_list2), 2)
tp_list |= tp_list2
self.assertEqual(
tp_list,
{
('topic1', 0),
('topic1', 1),
('topic2', 0),
('topic2', 1)
}
)
# Check is closing the first coordinator will rebalance the second
await coordinator.close()
await client.close()
await subscription2.wait_for_assignment()
tp_list = subscription2.assigned_partitions()
self.assertEqual(
tp_list,
{
('topic1', 0),
('topic1', 1),
('topic2', 0),
('topic2', 1)
}
)
await coordinator2.close()
await client2.close()
@run_until_complete
async def test_failed_group_join(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
self.add_cleanup(client.close)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
retry_backoff_ms=10)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
coordinator.coordinator_id = 15
self.add_cleanup(coordinator.close)
async def _on_join_leader(resp):
return b"123"
_on_join_leader_mock = mock.Mock()
_on_join_leader_mock.side_effect = _on_join_leader
async def do_rebalance():
rebalance = CoordinatorGroupRebalance(
coordinator, coordinator.group_id, coordinator.coordinator_id,
subscription.subscription, coordinator._assignors,
coordinator._session_timeout_ms,
coordinator._retry_backoff_ms)
rebalance._on_join_leader = _on_join_leader_mock
return (await rebalance.perform_group_join())
mocked = mock.MagicMock()
coordinator._client = mocked
coordinator._client.api_version = (0, 10, 1)
error_type = Errors.NoError
async def send(*agrs, **kw):
resp = JoinGroupRequest.RESPONSE_TYPE(
error_code=error_type.errno,
generation_id=-1, # generation_id
group_protocol="roundrobin",
leader_id="111", # leader_id
member_id="111", # member_id
members=[]
)
return resp
mocked.send.side_effect = send
subsc = subscription.subscription
# Success case, joined successfully
resp = await do_rebalance()
self.assertEqual(resp, ("roundrobin", b"123"))
self.assertEqual(_on_join_leader_mock.call_count, 1)
# no exception expected, just wait
error_type = Errors.GroupLoadInProgressError
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.InvalidGroupIdError
with self.assertRaises(Errors.InvalidGroupIdError):
await do_rebalance()
self.assertEqual(coordinator.need_rejoin(subsc), True)
# no exception expected, member_id should be reset
coordinator.member_id = 'some_invalid_member_id'
error_type = Errors.UnknownMemberIdError
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(
coordinator.member_id, JoinGroupRequest.UNKNOWN_MEMBER_ID)
error_type = Errors.UnknownError()
with self.assertRaises(Errors.KafkaError): # Masked as unknown error
await do_rebalance()
# no exception expected, coordinator_id should be reset
error_type = Errors.GroupCoordinatorNotAvailableError
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(coordinator.coordinator_id, None)
coordinator.coordinator_id = 15
coordinator._coordinator_dead_fut = create_future()
async def _on_join_leader(resp):
return None
# Sync group fails case
error_type = Errors.NoError
_on_join_leader_mock.side_effect = _on_join_leader
resp = await do_rebalance()
self.assertEqual(coordinator.coordinator_id, 15)
self.assertIsNone(resp)
self.assertEqual(_on_join_leader_mock.call_count, 2)
# Subscription changes before rebalance finishes
async def send_change_sub(*args, **kw):
subscription.subscribe(topics={'topic2'})
return (await send(*args, **kw))
mocked.send.side_effect = send_change_sub
resp = await do_rebalance()
self.assertEqual(resp, None)
self.assertEqual(_on_join_leader_mock.call_count, 2)
# `_send_req` itself raises an error
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
resp = await do_rebalance()
self.assertIsNone(resp)
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(coordinator.coordinator_id, None)
@run_until_complete
async def test_failed_sync_group(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
coordinator.coordinator_id = 15
self.add_cleanup(coordinator.close)
async def do_sync_group():
rebalance = CoordinatorGroupRebalance(
coordinator, coordinator.group_id, coordinator.coordinator_id,
subscription.subscription, coordinator._assignors,
coordinator._session_timeout_ms,
coordinator._retry_backoff_ms)
await rebalance._on_join_follower()
mocked = mock.MagicMock()
coordinator._client = mocked
coordinator._client.api_version = (0, 10, 1)
subsc = subscription.subscription
error_type = None
async def send(*agrs, **kw):
resp = SyncGroupResponse(
error_code=error_type.errno,
member_assignment=b"123"
)
return resp
mocked.send.side_effect = send
coordinator.member_id = 'some_invalid_member_id'
error_type = Errors.RebalanceInProgressError
await do_sync_group()
self.assertEqual(coordinator.member_id, 'some_invalid_member_id')
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.UnknownMemberIdError
await do_sync_group()
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.NotCoordinatorForGroupError
await do_sync_group()
self.assertEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.need_rejoin(subsc), True)
coordinator.coordinator_id = 15
coordinator._coordinator_dead_fut = create_future()
error_type = Errors.UnknownError()
with self.assertRaises(Errors.KafkaError): # Masked as some KafkaError
await do_sync_group()
self.assertEqual(coordinator.need_rejoin(subsc), True)
error_type = Errors.GroupAuthorizationFailedError()
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await do_sync_group()
self.assertEqual(coordinator.need_rejoin(subsc), True)
self.assertEqual(cm.exception.args[0], coordinator.group_id)
# If ``send()`` itself raises an error
mocked.send.side_effect = Errors.GroupCoordinatorNotAvailableError()
await do_sync_group()
self.assertEqual(coordinator.coordinator_id, None)
self.assertEqual(coordinator.need_rejoin(subsc), True)
@run_until_complete
async def test_generation_change_during_rejoin_sync(self):
coordinator = mock.MagicMock()
subscription = mock.MagicMock()
assignors = mock.MagicMock()
member_assignment = mock.Mock()
rebalance = CoordinatorGroupRebalance(
coordinator, "group_id", "coordinator_id", subscription,
assignors, 1000, 1000)
async def send_req(request):
await asyncio.sleep(0.1)
resp = mock.MagicMock()
resp.member_assignment = member_assignment
resp.error_code = 0
return resp
coordinator._send_req.side_effect = send_req
request = mock.MagicMock()
coordinator.generation = 1
coordinator.member_id = "member_id"
sync_req = asyncio.ensure_future(rebalance._send_sync_group_request(request))
await asyncio.sleep(0.05)
coordinator.generation = -1
coordinator.member_id = "member_id-changed"
assert await sync_req == member_assignment
# make sure values are set correctly
assert coordinator.generation == 1
assert coordinator.member_id == "member_id"
@run_until_complete
async def test_subscribe_pattern(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
test_listener = RebalanceListenerForTest()
subscription = SubscriptionState()
coordinator = GroupCoordinator(
client, subscription,
group_id='subs-pattern-group')
await self.wait_topic(client, 'st-topic1')
await self.wait_topic(client, 'st-topic2')
subscription.subscribe_pattern(
re.compile('st-topic*'), listener=test_listener)
client.set_topics([])
await subscription.wait_for_assignment()
self.assertNotEqual(coordinator.coordinator_id, None)
self.assertFalse(coordinator.need_rejoin(subscription.subscription))
tp_list = subscription.assigned_partitions()
assigned = {
('st-topic1', 0),
('st-topic1', 1),
('st-topic2', 0),
('st-topic2', 1),
}
self.assertEqual(tp_list, assigned)
self.assertEqual(test_listener.revoked, [set()])
self.assertEqual(test_listener.assigned, [assigned])
await coordinator.close()
await client.close()
@run_until_complete
async def test_commit_failed_scenarios(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='test-offsets-group')
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
offsets = {TopicPartition('topic1', 0): OffsetAndMetadata(1, '')}
await coordinator.commit_offsets(assignment, offsets)
_orig_send_req = coordinator._send_req
with mock.patch.object(coordinator, "_send_req") as mocked:
commit_error = None
async def mock_send_req(request):
if request.API_KEY == OffsetCommitRequest[0].API_KEY:
if isinstance(commit_error, list):
error_code = commit_error.pop(0).errno
else:
error_code = commit_error.errno
resp_topics = [("topic1", [(0, error_code)])]
return OffsetCommitResponse_v2(resp_topics)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
# Not retriable errors are propagated
commit_error = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError):
await coordinator.commit_offsets(assignment, offsets)
commit_error = Errors.TopicAuthorizationFailedError
with self.assertRaises(Errors.TopicAuthorizationFailedError):
await coordinator.commit_offsets(assignment, offsets)
commit_error = Errors.InvalidCommitOffsetSizeError
with self.assertRaises(Errors.InvalidCommitOffsetSizeError):
await coordinator.commit_offsets(assignment, offsets)
commit_error = Errors.OffsetMetadataTooLargeError
with self.assertRaises(Errors.OffsetMetadataTooLargeError):
await coordinator.commit_offsets(assignment, offsets)
# retriable errors should be retried
commit_error = [
Errors.GroupLoadInProgressError,
Errors.GroupLoadInProgressError,
Errors.NoError,
]
await coordinator.commit_offsets(assignment, offsets)
# If rebalance is needed we can't commit offset
commit_error = Errors.RebalanceInProgressError
with self.assertRaises(Errors.CommitFailedError):
await coordinator.commit_offsets(assignment, offsets)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
self.assertNotEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
commit_error = Errors.UnknownMemberIdError
was_member_id = coordinator.member_id
with self.assertRaises(Errors.CommitFailedError):
await coordinator.commit_offsets(assignment, offsets)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
# NOTE: Reconnecting with unknown ID will force a
# session_timeout_ms wait on broker, so we leave group to avoid
# that. Hack for test purposes)
request = LeaveGroupRequest(coordinator.group_id, was_member_id)
await coordinator._send_req(request)
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
# Coordinator errors should be retried after it was found again
commit_error = [
Errors.GroupCoordinatorNotAvailableError,
Errors.NoError
]
await coordinator.commit_offsets(assignment, offsets)
commit_error = [
Errors.NotCoordinatorForGroupError,
Errors.NoError
]
await coordinator.commit_offsets(assignment, offsets)
commit_error = [
Errors.RequestTimedOutError,
Errors.NoError
]
await coordinator.commit_offsets(assignment, offsets)
# Make sure coordinator_id is reset properly each retry
for retriable_error in (
Errors.GroupCoordinatorNotAvailableError,
Errors.NotCoordinatorForGroupError,
Errors.RequestTimedOutError,
):
self.assertIsNotNone(coordinator.coordinator_id)
commit_error = retriable_error
with self.assertRaises(retriable_error):
await coordinator._do_commit_offsets(assignment, offsets)
self.assertIsNone(coordinator.coordinator_id)
# ask coordinator to refresh coordinator_id value
await coordinator.ensure_coordinator_known()
# Unknown errors are just propagated too
commit_error = Errors.UnknownError
with self.assertRaises(Errors.UnknownError):
await coordinator.commit_offsets(assignment, offsets)
await coordinator.close()
await client.close()
@run_until_complete
async def test_fetchoffsets_failed_scenarios(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='fetch-offsets-group')
await subscription.wait_for_assignment()
tp = TopicPartition('topic1', 0)
partitions = {tp}
_orig_send_req = coordinator._send_req
with mock.patch.object(coordinator, "_send_req") as mocked:
fetch_error = None
async def mock_send_req(request):
if request.API_KEY == OffsetFetchRequest.API_KEY:
if isinstance(fetch_error, list):
error_code = fetch_error.pop(0).errno
else:
error_code = fetch_error.errno
if error_code == Errors.NoError.errno:
offset = 10
else:
offset = -1
resp_topics = [("topic1", [(0, offset, "", error_code)])]
return request.RESPONSE_TYPE(resp_topics)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
# 0 partitions call should just fast return
res = await coordinator.fetch_committed_offsets({})
self.assertEqual(res, {})
self.assertEqual(mocked.call_count, 0)
fetch_error = [
Errors.GroupLoadInProgressError,
Errors.GroupLoadInProgressError,
Errors.NoError,
Errors.NoError,
Errors.NoError
]
res = await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(res, {tp: OffsetAndMetadata(10, "")})
# Just omit the topic with a warning
fetch_error = Errors.UnknownTopicOrPartitionError
res = await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(res, {})
fetch_error = [
Errors.NotCoordinatorForGroupError,
Errors.NotCoordinatorForGroupError,
Errors.NoError,
Errors.NoError,
Errors.NoError
]
r = await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(r, {tp: OffsetAndMetadata(10, "")})
fetch_error = Errors.GroupAuthorizationFailedError
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await coordinator.fetch_committed_offsets(partitions)
self.assertEqual(cm.exception.args[0], coordinator.group_id)
fetch_error = Errors.UnknownError
with self.assertRaises(Errors.KafkaError):
await coordinator.fetch_committed_offsets(partitions)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_subscription_replace_on_rebalance(self):
# See issue #88
client = AIOKafkaClient(
metadata_max_age_ms=2000,
bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
client.set_topics(('topic1', ))
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-subscribe-replace',
heartbeat_interval_ms=1000)
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
async def _new(*args, **kw):
# Change the subscription to different topic before we finish
# rebalance
res = await _perform_assignment(*args, **kw)
if subscription.subscription.topics == {"topic1"}:
subscription.subscribe(topics={'topic2'})
client.set_topics(('topic2', ))
return res
mocked.side_effect = _new
await subscription.wait_for_assignment()
topics = {
tp.topic for tp in subscription.assigned_partitions()}
self.assertEqual(topics, {'topic2'})
# There should only be 2 rebalances to finish the task
self.assertEqual(mocked.call_count, 2)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_subscription_append_on_rebalance(self):
# same as above, but with adding topics instead of replacing them
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-subscribe-append',
heartbeat_interval_ms=20000000)
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
async def _new(*args, **kw):
# Change the subscription to different topic before we finish
# rebalance
res = await _perform_assignment(*args, **kw)
if subscription.subscription.topics == {"topic1"}:
subscription.subscribe(topics={'topic1', 'topic2'})
client.set_topics(('topic1', 'topic2', ))
return res
mocked.side_effect = _new
await subscription.wait_for_assignment()
topics = {
tp.topic for tp in subscription.assigned_partitions()}
self.assertEqual(topics, {'topic1', 'topic2'})
# There should only be 2 rebalances to finish the task
self.assertEqual(mocked.call_count, 2)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_metadata_update_during_rebalance(self):
# Race condition where client.set_topics start MetadataUpdate, but it
# fails to arrive before leader performed assignment
# Just ensure topics are created
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
await client.close()
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
client.set_topics(("topic1", ))
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-metadata-update',
heartbeat_interval_ms=20000000)
self.add_cleanup(coordinator.close)
await subscription.wait_for_assignment()
# Check that topic's partitions are properly assigned
self.assertEqual(
subscription.assigned_partitions(),
{TopicPartition("topic1", 0), TopicPartition("topic1", 1)})
_metadata_update = client._metadata_update
with mock.patch.object(client, '_metadata_update') as mocked:
async def _new(*args, **kw):
# Just make metadata updates a bit more slow for test
# robustness
await asyncio.sleep(0.5)
res = await _metadata_update(*args, **kw)
return res
mocked.side_effect = _new
# This case will assure, that the started metadata update will be
# waited for before assigning partitions. ``set_topics`` will start
# the metadata update
subscription.subscribe(topics={'topic2'})
client.set_topics(('topic2', ))
await subscription.wait_for_assignment()
self.assertEqual(
subscription.assigned_partitions(),
{TopicPartition("topic2", 0), TopicPartition("topic2", 1)})
@run_until_complete
async def test_coordinator_metadata_change_by_broker(self):
# Issue #108. We can have a misleading metadata change, that will
# trigger additional rebalance
client = AIOKafkaClient(
bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
client.set_topics(['other_topic'])
await client.force_metadata_update()
subscription = SubscriptionState()
coordinator = GroupCoordinator(
client, subscription,
group_id='race-rebalance-subscribe-append',
heartbeat_interval_ms=2000000)
subscription.subscribe(topics={'topic1'})
await client.set_topics(('topic1', ))
await subscription.wait_for_assignment()
_perform_assignment = coordinator._perform_assignment
with mock.patch.object(coordinator, '_perform_assignment') as mocked:
mocked.side_effect = _perform_assignment
subscription.subscribe(topics={'topic2'})
await client.set_topics(('topic2', ))
# Should only trigger 1 rebalance, but will trigger 2 with bug:
# Metadata snapshot will change:
# {'topic1': {0, 1}} -> {'topic1': {0, 1}, 'topic2': {0, 1}}
# And then again:
# {'topic1': {0, 1}, 'topic2': {0, 1}} -> {'topic2': {0, 1}}
await subscription.wait_for_assignment()
await client.force_metadata_update()
self.assertFalse(
coordinator.need_rejoin(subscription.subscription))
self.assertEqual(mocked.call_count, 1)
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator_ensure_active_group_on_expired_membership(self):
# Do not fail group join if group membership has expired (ie autocommit
# fails on join prepare)
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='test-offsets-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
await subscription.wait_for_assignment()
assignment = subscription.subscription.assignment
# Make sure we have something to commit before rejoining
tp = TopicPartition('topic1', 0)
subscription.seek(tp, 0)
offsets = assignment.all_consumed_offsets()
self.assertTrue(offsets) # Not empty
# during OffsetCommit, UnknownMemberIdError is raised
_orig_send_req = coordinator._send_req
resp_topics = [("topic1", [(0, Errors.UnknownMemberIdError.errno)])]
with mock.patch.object(coordinator, "_send_req") as mocked:
async def mock_send_req(request):
if request.API_KEY == OffsetCommitRequest[0].API_KEY:
return OffsetCommitResponse_v2(resp_topics)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
with self.assertRaises(Errors.CommitFailedError):
await coordinator.commit_offsets(assignment, offsets)
self.assertTrue(coordinator.need_rejoin(subscription.subscription))
# Waiting will assure we could rebalance even if commit fails
await subscription.wait_for_assignment()
await coordinator.close()
await client.close()
@run_until_complete
async def test_coordinator__send_req(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
self.add_cleanup(coordinator.close)
request = OffsetCommitRequest[2](topics=[])
# We did not call ensure_coordinator_known yet
with self.assertRaises(Errors.GroupCoordinatorNotAvailableError):
await coordinator._send_req(request)
await coordinator.ensure_coordinator_known()
self.assertIsNotNone(coordinator.coordinator_id)
with mock.patch.object(client, "send") as mocked:
async def mock_send(*args, **kw):
raise Errors.KafkaError("Some unexpected error")
mocked.side_effect = mock_send
# _send_req should mark coordinator dead on errors
with self.assertRaises(Errors.KafkaError):
await coordinator._send_req(request)
self.assertIsNone(coordinator.coordinator_id)
@run_until_complete
async def test_coordinator_close(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
waiter = create_future()
class WaitingListener(ConsumerRebalanceListener):
def on_partitions_revoked(self, revoked):
pass
async def on_partitions_assigned(self, assigned, waiter=waiter):
await waiter
coordinator = GroupCoordinator(
client, subscription,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
subscription.subscribe(
topics={'topic1'}, listener=WaitingListener())
# Close task should be loyal to rebalance and wait for it to finish
close_task = create_task(coordinator.close())
await asyncio.sleep(0.1)
self.assertFalse(close_task.done())
# Releasing the waiter on listener will allow close task to finish
waiter.set_result(True)
await close_task
# You can close again with no effect
await coordinator.close()
@run_until_complete
async def test_coordinator_close_autocommit(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
self.add_cleanup(client.close)
subscription = SubscriptionState()
coordinator = GroupCoordinator(
client, subscription,
group_id='test-my-group', session_timeout_ms=6000,
heartbeat_interval_ms=1000)
subscription.subscribe(topics={'topic1'})
await subscription.wait_for_assignment()
waiter = create_future()
async def commit_offsets(*args, **kw):
await waiter
coordinator.commit_offsets = mocked = mock.Mock()
mocked.side_effect = commit_offsets
# Close task should call autocommit last time
close_task = create_task(coordinator.close())
await asyncio.sleep(0.1)
# self.assertFalse(close_task.done())
# Raising an error should not prevent from closing. Error should be
# just logged
waiter.set_exception(Errors.UnknownError())
await close_task
@run_until_complete
async def test_coordinator_ensure_coordinator_known(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1,)
)
self.add_cleanup(coordinator.close)
def force_metadata_update():
fut = create_future()
fut.set_result(True)
return fut
client.ready = mock.Mock()
client.force_metadata_update = mock.Mock()
client.force_metadata_update.side_effect = force_metadata_update
async def ready(node_id, group=None):
if node_id == 0:
return True
return False
client.ready.side_effect = ready
client.coordinator_lookup = mock.Mock()
coordinator_lookup = None
async def _do_coordinator_lookup(type_, key):
node_id = coordinator_lookup.pop()
if isinstance(node_id, Exception):
raise node_id
return node_id
client.coordinator_lookup.side_effect = _do_coordinator_lookup
# CASE: the lookup returns a broken node, that can't be connected
# to. Ensure should wait until coordinator lookup finds the correct
# node.
coordinator.coordinator_dead()
coordinator_lookup = [0, 1, 1]
await coordinator.ensure_coordinator_known()
self.assertEqual(coordinator.coordinator_id, 0)
self.assertEqual(client.force_metadata_update.call_count, 0)
# CASE: lookup fails with error first time. We update metadata and try
# again
coordinator.coordinator_dead()
coordinator_lookup = [0, Errors.UnknownTopicOrPartitionError()]
await coordinator.ensure_coordinator_known()
self.assertEqual(client.force_metadata_update.call_count, 1)
# CASE: Special case for group authorization
coordinator.coordinator_dead()
coordinator_lookup = [0, Errors.GroupAuthorizationFailedError()]
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await coordinator.ensure_coordinator_known()
self.assertEqual(cm.exception.args[0], coordinator.group_id)
# CASE: unretriable errors should be reraised to higher level
coordinator.coordinator_dead()
coordinator_lookup = [0, Errors.UnknownError()]
with self.assertRaises(Errors.UnknownError):
await coordinator.ensure_coordinator_known()
@run_until_complete
async def test_coordinator__do_heartbeat(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
_orig_send_req = coordinator._send_req
coordinator._send_req = mocked = mock.Mock()
heartbeat_error = None
send_req_error = None
async def mock_send_req(request):
if send_req_error is not None:
raise send_req_error
if request.API_KEY == HeartbeatRequest.API_KEY:
if isinstance(heartbeat_error, list):
error_code = heartbeat_error.pop(0).errno
else:
error_code = heartbeat_error.errno
return HeartbeatRequest.RESPONSE_TYPE(error_code)
return (await _orig_send_req(request))
mocked.side_effect = mock_send_req
coordinator.coordinator_id = 15
heartbeat_error = Errors.GroupCoordinatorNotAvailableError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
self.assertIsNone(coordinator.coordinator_id)
coordinator._rejoin_needed_fut = create_future()
heartbeat_error = Errors.RebalanceInProgressError()
success = await coordinator._do_heartbeat()
self.assertTrue(success)
self.assertTrue(coordinator._rejoin_needed_fut.done())
coordinator.member_id = "some_member"
coordinator._rejoin_needed_fut = create_future()
heartbeat_error = Errors.IllegalGenerationError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
self.assertTrue(coordinator._rejoin_needed_fut.done())
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
coordinator.member_id = "some_member"
coordinator._rejoin_needed_fut = create_future()
heartbeat_error = Errors.UnknownMemberIdError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
self.assertTrue(coordinator._rejoin_needed_fut.done())
self.assertEqual(coordinator.member_id, UNKNOWN_MEMBER_ID)
heartbeat_error = Errors.GroupAuthorizationFailedError()
with self.assertRaises(Errors.GroupAuthorizationFailedError) as cm:
await coordinator._do_heartbeat()
self.assertEqual(cm.exception.args[0], coordinator.group_id)
heartbeat_error = Errors.UnknownError()
with self.assertRaises(Errors.KafkaError):
await coordinator._do_heartbeat()
heartbeat_error = None
send_req_error = Errors.RequestTimedOutError()
success = await coordinator._do_heartbeat()
self.assertFalse(success)
heartbeat_error = Errors.NoError()
send_req_error = None
success = await coordinator._do_heartbeat()
self.assertTrue(success)
@run_until_complete
async def test_coordinator__heartbeat_routine(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
subscription.subscribe(topics={'topic1'})
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=100,
session_timeout_ms=300,
retry_backoff_ms=50)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
coordinator._do_heartbeat = mocked = mock.Mock()
coordinator.coordinator_id = 15
coordinator.member_id = 17
coordinator.generation = 0
success = None
async def _do_heartbeat(*args, **kw):
if isinstance(success, list):
return success.pop(0)
return success
mocked.side_effect = _do_heartbeat
async def ensure_coordinator_known():
return None
coordinator.ensure_coordinator_known = mock.Mock()
coordinator.ensure_coordinator_known.side_effect = (
ensure_coordinator_known
)
routine = create_task(
coordinator._heartbeat_routine())
def cleanup():
routine.cancel()
return routine
self.add_cleanup(cleanup)
# CASE: simple heartbeat
success = True
await asyncio.sleep(0.13)
self.assertFalse(routine.done())
self.assertEqual(mocked.call_count, 1)
# CASE: 2 heartbeat fail
success = False
await asyncio.sleep(0.15)
self.assertFalse(routine.done())
# We did 2 heartbeats as we waited only retry_backoff_ms between them
self.assertEqual(mocked.call_count, 3)
# CASE: session_timeout_ms elapsed without heartbeat
await asyncio.sleep(0.10)
self.assertEqual(mocked.call_count, 5)
self.assertEqual(coordinator.coordinator_id, 15)
# last heartbeat try
await asyncio.sleep(0.05)
self.assertEqual(mocked.call_count, 6)
self.assertIsNone(coordinator.coordinator_id)
@run_until_complete
async def test_coordinator__maybe_refresh_commit_offsets(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
coordinator._do_fetch_commit_offsets = mocked = mock.Mock()
fetched_offsets = {tp: OffsetAndMetadata(12, "")}
test_self = self
async def do_fetch(need_update):
test_self.assertEqual(need_update, [tp])
return fetched_offsets
mocked.side_effect = do_fetch
def reset_assignment():
subscription.assign_from_user({tp})
assignment = subscription.subscription.assignment
tp_state = assignment.state_value(tp)
fut = tp_state.fetch_committed()
return assignment, tp_state, fut
assignment, tp_state, fut = reset_assignment()
# Success case
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, True)
self.assertEqual(fut.result(), OffsetAndMetadata(12, ""))
# Calling again will fast return without a request
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, True)
self.assertEqual(mocked.call_count, 1)
# Commit not found case
fetched_offsets = {}
assignment, tp_state, fut = reset_assignment()
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, True)
self.assertEqual(fut.result(), OffsetAndMetadata(-1, ""))
# Retriable error will be skipped
assignment, tp_state, fut = reset_assignment()
mocked.side_effect = Errors.GroupCoordinatorNotAvailableError()
resp = await coordinator._maybe_refresh_commit_offsets(assignment)
self.assertEqual(resp, False)
# Not retriable error will not be skipped
mocked.side_effect = Errors.UnknownError()
with self.assertRaises(Errors.UnknownError):
await coordinator._maybe_refresh_commit_offsets(assignment)
@run_until_complete
async def test_coordinator__maybe_do_autocommit(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000, auto_commit_interval_ms=1000,
retry_backoff_ms=50)
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1)
)
self.add_cleanup(coordinator.close)
coordinator._do_commit_offsets = mocked = mock.Mock()
async def do_commit(*args, **kw):
await asyncio.sleep(0.1)
return
mocked.side_effect = do_commit
def reset_assignment():
subscription.assign_from_user({tp})
assignment = subscription.subscription.assignment
tp_state = assignment.state_value(tp)
return assignment, tp_state
assignment, tp_state = reset_assignment()
# Fast return if autocommit disabled
coordinator._enable_auto_commit = False
timeout = await coordinator._maybe_do_autocommit(assignment)
self.assertIsNone(timeout) # Infinite timeout in this case
self.assertEqual(mocked.call_count, 0)
coordinator._enable_auto_commit = True
# Successful case should count time to next autocommit
loop = get_running_loop()
now = loop.time()
interval = 1
coordinator._next_autocommit_deadline = 0
timeout = await coordinator._maybe_do_autocommit(assignment)
# 1000ms interval minus 100 sleep
self.assertAlmostEqual(timeout, 0.9, places=1)
self.assertAlmostEqual(
coordinator._next_autocommit_deadline, now + interval, places=1)
self.assertEqual(mocked.call_count, 1)
# Retriable errors should backoff and retry, no skip autocommit
coordinator._next_autocommit_deadline = 0
mocked.side_effect = Errors.NotCoordinatorForGroupError()
now = loop.time()
timeout = await coordinator._maybe_do_autocommit(assignment)
self.assertEqual(timeout, 0.05)
# Dealine should be set into future, not depending on commit time, to
# avoid busy loops
self.assertAlmostEqual(
coordinator._next_autocommit_deadline, now + timeout,
places=1)
# UnknownMemberId should also retry
coordinator._next_autocommit_deadline = 0
mocked.side_effect = Errors.UnknownMemberIdError()
now = loop.time()
timeout = await coordinator._maybe_do_autocommit(assignment)
self.assertEqual(timeout, 0.05)
# Not retriable errors should skip autocommit and log
mocked.side_effect = Errors.UnknownError()
now = loop.time()
coordinator._next_autocommit_deadline = 0
with self.assertRaises(Errors.KafkaError):
await coordinator._maybe_do_autocommit(assignment)
@run_until_complete
async def test_coordinator__coordination_routine(self):
client = AIOKafkaClient(bootstrap_servers=self.hosts)
subscription = SubscriptionState()
tp = TopicPartition("topic1", 0)
coordinator = GroupCoordinator(
client, subscription,
heartbeat_interval_ms=20000, auto_commit_interval_ms=1000,
retry_backoff_ms=50)
self.add_cleanup(coordinator.close)
def start_coordination():
if coordinator._coordination_task:
coordinator._coordination_task.cancel()
coordinator._coordination_task = task = create_task(
coordinator._coordination_routine())
return task
async def stop_coordination():
coordinator._coordination_task.cancel() # disable for test
try:
await coordinator._coordination_task
except asyncio.CancelledError:
pass
coordinator._coordination_task = create_task(
asyncio.sleep(0.1))
await stop_coordination()
async def ensure_coordinator_known():
return None
coordinator.ensure_coordinator_known = coord_mock = mock.Mock()
coord_mock.side_effect = ensure_coordinator_known
async def _on_join_prepare(assign):
return None
coordinator._on_join_prepare = prepare_mock = mock.Mock()
prepare_mock.side_effect = _on_join_prepare
coordinator._do_rejoin_group = rejoin_mock = mock.Mock()
rejoin_ok = True
async def do_rejoin(subsc):
if rejoin_ok:
subscription.assign_from_subscribed({tp})
coordinator._rejoin_needed_fut = create_future()
return True
else:
await asyncio.sleep(0.1)
return False
rejoin_mock.side_effect = do_rejoin
async def _maybe_do_autocommit(assign):
return None
coordinator._maybe_do_autocommit = autocommit_mock = mock.Mock()
autocommit_mock.side_effect = _maybe_do_autocommit
coordinator._start_heartbeat_task = mock.Mock()
client.force_metadata_update = metadata_mock = mock.Mock()
done_fut = create_future()
done_fut.set_result(None)
metadata_mock.side_effect = lambda: done_fut
# CASE: coordination should stop and wait if subscription is not
# present
task = start_coordination()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 0)
# CASE: user assignment should skip rebalance calls
subscription.assign_from_user({tp})
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 1)
self.assertEqual(prepare_mock.call_count, 0)
self.assertEqual(rejoin_mock.call_count, 0)
self.assertEqual(autocommit_mock.call_count, 1)
# CASE: with user assignment routine should not react to request_rejoin
coordinator.request_rejoin()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 1)
self.assertEqual(prepare_mock.call_count, 0)
self.assertEqual(rejoin_mock.call_count, 0)
self.assertEqual(autocommit_mock.call_count, 1)
coordinator._rejoin_needed_fut = create_future()
# CASE: Changing subscription should propagete a rebalance
subscription.unsubscribe()
subscription.subscribe({"topic1"})
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 2)
self.assertEqual(prepare_mock.call_count, 1)
self.assertEqual(rejoin_mock.call_count, 1)
self.assertEqual(autocommit_mock.call_count, 2)
# CASE: If rejoin fails, we do it again without autocommit
rejoin_ok = False
coordinator.request_rejoin()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 3)
self.assertEqual(prepare_mock.call_count, 2)
self.assertEqual(rejoin_mock.call_count, 2)
self.assertEqual(autocommit_mock.call_count, 2)
# CASE: After we retry we should not call _on_join_prepare again
rejoin_ok = True
await subscription.wait_for_assignment()
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 4)
self.assertEqual(prepare_mock.call_count, 2)
self.assertEqual(rejoin_mock.call_count, 3)
self.assertEqual(autocommit_mock.call_count, 3)
# CASE: If pattern subscription present we should update metadata
# before joining.
subscription.unsubscribe()
subscription.subscribe_pattern(re.compile("^topic1&"))
subscription.subscribe_from_pattern({"topic1"})
self.assertEqual(metadata_mock.call_count, 0)
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 5)
self.assertEqual(prepare_mock.call_count, 3)
self.assertEqual(rejoin_mock.call_count, 4)
self.assertEqual(autocommit_mock.call_count, 4)
self.assertEqual(metadata_mock.call_count, 1)
# CASE: on unsubscribe we should stop and wait for new subscription
subscription.unsubscribe()
await asyncio.sleep(0.01)
self.assertFalse(task.done())
self.assertEqual(coord_mock.call_count, 5)
self.assertEqual(prepare_mock.call_count, 3)
self.assertEqual(rejoin_mock.call_count, 4)
self.assertEqual(autocommit_mock.call_count, 4)
self.assertEqual(metadata_mock.call_count, 1)
# CASE: on close we should perform finalizer and ignore it's error
coordinator._maybe_do_last_autocommit = last_commit_mock = mock.Mock()
last_commit_mock.side_effect = Errors.UnknownError()
await coordinator.close()
self.assertTrue(task.done())
# As we continued from a subscription wait it should fast exit
self.assertEqual(coord_mock.call_count, 5)
self.assertEqual(prepare_mock.call_count, 3)
self.assertEqual(rejoin_mock.call_count, 4)
self.assertEqual(autocommit_mock.call_count, 4)
self.assertEqual(metadata_mock.call_count, 1)
self.assertEqual(last_commit_mock.call_count, 1)
@run_until_complete
async def test_no_group_subscribe_during_metadata_update(self):
# Issue #536. During metadata update we can't assume the subscription
# did not change. We should handle the case by refreshing meta again.
client = AIOKafkaClient(bootstrap_servers=self.hosts)
await client.bootstrap()
await self.wait_topic(client, 'topic1')
await self.wait_topic(client, 'topic2')
await client.set_topics(('other_topic', ))
subscription = SubscriptionState()
coordinator = NoGroupCoordinator(
client, subscription)
subscription.subscribe(topics={'topic1'})
client.set_topics(('topic1', ))
await asyncio.sleep(0.0001)
# Change subscription before metadata update is received
subscription.subscribe(topics={'topic2'})
metadata_fut = client.set_topics(('topic2', ))
try:
await asyncio.wait_for(
metadata_fut,
timeout=0.2
)
except asyncio.TimeoutError:
pass
self.assertFalse(client._sync_task.done())
await coordinator.close()
await client.close()
| {
"content_hash": "c6152e363921e2eea5732fc03d11fd57",
"timestamp": "",
"source": "github",
"line_count": 1463,
"max_line_length": 85,
"avg_line_length": 39.866712235133285,
"alnum_prop": 0.6271238748392628,
"repo_name": "aio-libs/aiokafka",
"id": "f8ebb6c0c0cc7b062ea041c35f5a2ea031441b69",
"size": "58325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_coordinator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "13694"
},
{
"name": "Cython",
"bytes": "65403"
},
{
"name": "Dockerfile",
"bytes": "1542"
},
{
"name": "Makefile",
"bytes": "2814"
},
{
"name": "Python",
"bytes": "856687"
},
{
"name": "Shell",
"bytes": "7128"
}
],
"symlink_target": ""
} |
"""
S3 TOKEN MIDDLEWARE
This WSGI component:
* Get a request from the swift3 middleware with an S3 Authorization
access key.
* Validate s3 token in Keystone.
* Transform the account name to AUTH_%(tenant_name).
"""
import httplib
import webob
from keystone.openstack.common import jsonutils
from swift.common import utils as swift_utils
PROTOCOL_NAME = 'S3 Token Authentication'
class ServiceError(Exception):
pass
class S3Token(object):
"""Auth Middleware that handles S3 authenticating client calls."""
def __init__(self, app, conf):
"""Common initialization code."""
self.app = app
self.logger = swift_utils.get_logger(conf, log_route='s3token')
self.logger.debug('Starting the %s component' % PROTOCOL_NAME)
self.reseller_prefix = conf.get('reseller_prefix', 'AUTH_')
# where to find the auth service (we use this to validate tokens)
self.auth_host = conf.get('auth_host')
self.auth_port = int(conf.get('auth_port', 35357))
self.auth_protocol = conf.get('auth_protocol', 'https')
if self.auth_protocol == 'http':
self.http_client_class = httplib.HTTPConnection
else:
self.http_client_class = httplib.HTTPSConnection
# SSL
self.cert_file = conf.get('certfile')
self.key_file = conf.get('keyfile')
def deny_request(self, code):
error_table = {
'AccessDenied': (401, 'Access denied'),
'InvalidURI': (400, 'Could not parse the specified URI'),
}
resp = webob.Response(content_type='text/xml')
resp.status = error_table[code][0]
resp.body = error_table[code][1]
resp.body = ('<?xml version="1.0" encoding="UTF-8"?>\r\n'
'<Error>\r\n <Code>%s</Code>\r\n '
'<Message>%s</Message>\r\n</Error>\r\n' %
(code, error_table[code][1]))
return resp
def _json_request(self, creds_json):
headers = {'Content-Type': 'application/json'}
try:
if self.auth_protocol == 'http':
conn = self.http_client_class(self.auth_host, self.auth_port)
else:
conn = self.http_client_class(self.auth_host,
self.auth_port,
self.key_file,
self.cert_file)
conn.request('POST', '/v2.0/s3tokens',
body=creds_json,
headers=headers)
response = conn.getresponse()
output = response.read()
except Exception, e:
self.logger.info('HTTP connection exception: %s' % e)
resp = self.deny_request('InvalidURI')
raise ServiceError(resp)
finally:
conn.close()
if response.status < 200 or response.status >= 300:
self.logger.debug('Keystone reply error: status=%s reason=%s' %
(response.status, response.reason))
resp = self.deny_request('AccessDenied')
raise ServiceError(resp)
return (response, output)
def __call__(self, environ, start_response):
"""Handle incoming request. authenticate and send downstream."""
req = webob.Request(environ)
self.logger.debug('Calling S3Token middleware.')
try:
parts = swift_utils.split_path(req.path, 1, 4, True)
version, account, container, obj = parts
except ValueError:
msg = 'Not a path query, skipping.'
self.logger.debug(msg)
return self.app(environ, start_response)
# Read request signature and access id.
if not 'Authorization' in req.headers:
msg = 'No Authorization header. skipping.'
self.logger.debug(msg)
return self.app(environ, start_response)
token = req.headers.get('X-Auth-Token',
req.headers.get('X-Storage-Token'))
if not token:
msg = 'You did not specify a auth or a storage token. skipping.'
self.logger.debug(msg)
return self.app(environ, start_response)
auth_header = req.headers['Authorization']
try:
access, signature = auth_header.split(' ')[-1].rsplit(':', 1)
except(ValueError):
msg = 'You have an invalid Authorization header: %s'
self.logger.debug(msg % (auth_header))
return self.deny_request('InvalidURI')(environ, start_response)
# NOTE(chmou): This is to handle the special case with nova
# when we have the option s3_affix_tenant. We will force it to
# connect to another account than the one
# authenticated. Before people start getting worried about
# security, I should point that we are connecting with
# username/token specified by the user but instead of
# connecting to its own account we will force it to go to an
# another account. In a normal scenario if that user don't
# have the reseller right it will just fail but since the
# reseller account can connect to every account it is allowed
# by the swift_auth middleware.
force_tenant = None
if ':' in access:
access, force_tenant = access.split(':')
# Authenticate request.
creds = {'credentials': {'access': access,
'token': token,
'signature': signature}}
creds_json = jsonutils.dumps(creds)
self.logger.debug('Connecting to Keystone sending this JSON: %s' %
creds_json)
# NOTE(vish): We could save a call to keystone by having
# keystone return token, tenant, user, and roles
# from this call.
#
# NOTE(chmou): We still have the same problem we would need to
# change token_auth to detect if we already
# identified and not doing a second query and just
# pass it thru to swiftauth in this case.
try:
resp, output = self._json_request(creds_json)
except ServiceError as e:
resp = e.args[0]
msg = 'Received error, exiting middleware with error: %s'
self.logger.debug(msg % (resp.status))
return resp(environ, start_response)
self.logger.debug('Keystone Reply: Status: %d, Output: %s' % (
resp.status, output))
try:
identity_info = jsonutils.loads(output)
token_id = str(identity_info['access']['token']['id'])
tenant = identity_info['access']['token']['tenant']
except (ValueError, KeyError):
error = 'Error on keystone reply: %d %s'
self.logger.debug(error % (resp.status, str(output)))
return self.deny_request('InvalidURI')(environ, start_response)
req.headers['X-Auth-Token'] = token_id
tenant_to_connect = force_tenant or tenant['id']
self.logger.debug('Connecting with tenant: %s' % (tenant_to_connect))
new_tenant_name = '%s%s' % (self.reseller_prefix, tenant_to_connect)
environ['PATH_INFO'] = environ['PATH_INFO'].replace(account,
new_tenant_name)
return self.app(environ, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns a WSGI filter app for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
def auth_filter(app):
return S3Token(app, conf)
return auth_filter
| {
"content_hash": "f14c183fc036653265fc258095950352",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 77,
"avg_line_length": 40.19072164948454,
"alnum_prop": 0.5689367705527767,
"repo_name": "weiyuanke/mykeystone",
"id": "7cf2e3944dffc08438115616b103505f6e994c50",
"size": "8740",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keystone/middleware/s3_token.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "731676"
},
{
"name": "Shell",
"bytes": "5279"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python
"""
runtests.py [OPTIONS] [-- ARGS]
Run tests, building the project first.
Examples::
$ python runtests.py
$ python runtests.py -s {SAMPLE_SUBMODULE}
$ python runtests.py -t {SAMPLE_TEST}
$ python runtests.py --ipython
$ python runtests.py --python somescript.py
$ python runtests.py --bench
Run a debugger:
$ gdb --args python runtests.py [...other args...]
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
$ python runtests.py --gcov [...other args...]
$ python runtests.py --lcov-html
"""
#
# This is a generic test runner script for projects using Numpy's test
# framework. Change the following values to adapt to your project:
#
PROJECT_MODULE = "bananas"
PROJECT_ROOT_FILES = ['bananas', 'LICENSE', 'setup.py']
SAMPLE_TEST = "scipy/special/tests/test_basic.py:test_xlogy"
SAMPLE_SUBMODULE = "optimize"
EXTRA_PATH = ['/usr/lib/ccache', '/usr/lib/f90cache',
'/usr/local/lib/ccache', '/usr/local/lib/f90cache']
# ---------------------------------------------------------------------
if __doc__ is None:
__doc__ = "Run without -OO if you want usage info"
else:
__doc__ = __doc__.format(**globals())
import sys
import os
# In case we are run from the source directory, we don't want to import the
# project from there:
sys.path.pop(0)
import shutil
import subprocess
import time
import imp
from argparse import ArgumentParser, REMAINDER
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
parser.add_argument("--refguide-check", action="store_true", default=False,
help="Run refguide check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
parser.add_argument("--gcov", action="store_true", default=False,
help=("enable C code coverage via gcov (requires GCC). "
"gcov output goes to build/**/*.gc*"))
parser.add_argument("--lcov-html", action="store_true", default=False,
help=("produce HTML for C code coverage information "
"from a previous run with --gcov. "
"HTML output goes to build/lcov/"))
parser.add_argument("--mode", "-m", default="fast",
help="'fast', 'full', or something that could be "
"passed to nosetests -A [default: fast]")
parser.add_argument("--submodule", "-s", default=None,
help="Submodule whose tests to run (cluster, constants, ...)")
parser.add_argument("--pythonpath", "-p", default=None,
help="Paths to prepend to PYTHONPATH")
parser.add_argument("--tests", "-t", action='append',
help="Specify tests to run")
parser.add_argument("--python", action="store_true",
help="Start a Python shell with PYTHONPATH set")
parser.add_argument("--ipython", "-i", action="store_true",
help="Start IPython shell with PYTHONPATH set")
parser.add_argument("--shell", action="store_true",
help="Start Unix shell with PYTHONPATH set")
parser.add_argument("--debug", "-g", action="store_true",
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=1,
help="Number of parallel jobs during build (requires "
"Numpy 1.10 or greater).")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
help="Run benchmark suite instead of test suite")
parser.add_argument("--bench-compare", action="append", metavar="BEFORE",
help=("Compare benchmark results of current HEAD to BEFORE. "
"Use an additional --bench-compare=COMMIT to override HEAD with COMMIT. "
"Note that you need to commit your changes first!"
))
parser.add_argument("args", metavar="ARGS", default=[], nargs=REMAINDER,
help="Arguments to pass to Nose, Python or shell")
args = parser.parse_args(argv)
if args.bench_compare:
args.bench = True
args.no_build = True # ASV does the building
if args.lcov_html:
# generate C code coverage output
lcov_generate()
sys.exit(0)
if args.pythonpath:
for p in reversed(args.pythonpath.split(os.pathsep)):
sys.path.insert(0, p)
if args.gcov:
gcov_reset_counters()
if args.debug and args.bench:
print("*** Benchmarks should not be run against debug version; remove -g flag ***")
if not args.no_build:
site_dir, site_dir2 = build_project(args)
sys.path.insert(0, site_dir)
sys.path.insert(0, site_dir2)
os.environ['PYTHONPATH'] = site_dir + ':' + site_dir2
extra_argv = args.args[:]
if extra_argv and extra_argv[0] == '--':
extra_argv = extra_argv[1:]
if args.python:
if extra_argv:
# Don't use subprocess, since we don't want to include the
# current path in PYTHONPATH.
sys.argv = extra_argv
with open(extra_argv[0], 'r') as f:
script = f.read()
sys.modules['__main__'] = imp.new_module('__main__')
ns = dict(__name__='__main__',
__file__=extra_argv[0])
exec_(script, ns)
sys.exit(0)
else:
import code
code.interact()
sys.exit(0)
if args.ipython:
import IPython
IPython.embed(user_ns={})
sys.exit(0)
if args.shell:
shell = os.environ.get('SHELL', 'sh')
print("Spawning a Unix shell...")
os.execv(shell, [shell] + extra_argv)
sys.exit(1)
if args.coverage:
dst_dir = os.path.join(ROOT_DIR, 'build', 'coverage')
fn = os.path.join(dst_dir, 'coverage_html.js')
if os.path.isdir(dst_dir) and os.path.isfile(fn):
shutil.rmtree(dst_dir)
extra_argv += ['--cover-html',
'--cover-html-dir='+dst_dir]
if args.refguide_check:
cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
'--doctests']
if args.submodule:
cmd += [args.submodule]
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(0)
if args.bench:
# Run ASV
items = extra_argv
if args.tests:
items += args.tests
if args.submodule:
items += [args.submodule]
bench_args = []
for a in items:
bench_args.extend(['--bench', a])
if not args.bench_compare:
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'run', '-n', '-e', '--python=same'] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
else:
if len(args.bench_compare) == 1:
commit_a = args.bench_compare[0]
commit_b = 'HEAD'
elif len(args.bench_compare) == 2:
commit_a, commit_b = args.bench_compare
else:
p.error("Too many commits to compare benchmarks for")
# Check for uncommitted files
if commit_b == 'HEAD':
r1 = subprocess.call(['git', 'diff-index', '--quiet', '--cached', 'HEAD'])
r2 = subprocess.call(['git', 'diff-files', '--quiet'])
if r1 != 0 or r2 != 0:
print("*"*80)
print("WARNING: you have uncommitted changes --- these will NOT be benchmarked!")
print("*"*80)
# Fix commit ids (HEAD is local to current repo)
p = subprocess.Popen(['git', 'rev-parse', commit_b], stdout=subprocess.PIPE)
out, err = p.communicate()
commit_b = out.strip()
p = subprocess.Popen(['git', 'rev-parse', commit_a], stdout=subprocess.PIPE)
out, err = p.communicate()
commit_a = out.strip()
cmd = [os.path.join(ROOT_DIR, 'benchmarks', 'run.py'),
'--current-repo', 'continuous', '-e', '-f', '1.05',
commit_a, commit_b] + bench_args
os.execv(sys.executable, [sys.executable] + cmd)
sys.exit(1)
test_dir = os.path.join(ROOT_DIR, 'build', 'test')
if args.build_only:
sys.exit(0)
elif args.submodule:
modname = PROJECT_MODULE + '.' + args.submodule
try:
__import__(modname)
test = sys.modules[modname].test
except (ImportError, KeyError, AttributeError) as e:
print("Cannot run tests for %s (%s)" % (modname, e))
sys.exit(2)
elif args.tests:
def fix_test_path(x):
# fix up test path
p = x.split(':')
p[0] = os.path.relpath(os.path.abspath(p[0]),
test_dir)
return ':'.join(p)
tests = [fix_test_path(x) for x in args.tests]
def test(*a, **kw):
extra_argv = kw.pop('extra_argv', ())
extra_argv = extra_argv + tests[1:]
kw['extra_argv'] = extra_argv
from numpy.testing import Tester
return Tester(tests[0]).test(*a, **kw)
else:
__import__(PROJECT_MODULE)
test = sys.modules[PROJECT_MODULE].test
# Run the tests under build/test
try:
shutil.rmtree(test_dir)
except OSError:
pass
try:
os.makedirs(test_dir)
except OSError:
pass
# shutil.copyfile(os.path.join(ROOT_DIR, '.coveragerc'),
# os.path.join(test_dir, '.coveragerc'))
cwd = os.getcwd()
try:
os.chdir(test_dir)
result = test(args.mode,
verbose=args.verbose,
extra_argv=extra_argv,
doctests=args.doctests,
coverage=args.coverage)
finally:
os.chdir(cwd)
if isinstance(result, bool):
sys.exit(0 if result else 1)
elif result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
def build_project(args):
"""
Build a dev version of the project.
Returns
-------
site_dir
site-packages directory where it was installed
"""
root_ok = [os.path.exists(os.path.join(ROOT_DIR, fn))
for fn in PROJECT_ROOT_FILES]
if not all(root_ok):
print("To build the project, run runtests.py in "
"git checkout or unpacked source")
sys.exit(1)
dst_dir = os.path.join(ROOT_DIR, 'build', 'testenv')
env = dict(os.environ)
cmd = [sys.executable, 'setup.py']
# Always use ccache, if installed
env['PATH'] = os.pathsep.join(EXTRA_PATH + env.get('PATH', '').split(os.pathsep))
if args.debug or args.gcov:
# assume everyone uses gcc/gfortran
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
if args.gcov:
import distutils.sysconfig
cvars = distutils.sysconfig.get_config_vars()
env['OPT'] = '-O0 -ggdb'
env['FOPT'] = '-O0 -ggdb'
env['CC'] = cvars['CC'] + ' --coverage'
env['CXX'] = cvars['CXX'] + ' --coverage'
env['F77'] = 'gfortran --coverage '
env['F90'] = 'gfortran --coverage '
env['LDSHARED'] = cvars['LDSHARED'] + ' --coverage'
env['LDFLAGS'] = " ".join(cvars['LDSHARED'].split()[1:]) + ' --coverage'
cmd += ['build']
if args.parallel > 1:
cmd += ['-j', str(args.parallel)]
cmd += ['install', '--prefix=' + dst_dir]
log_filename = os.path.join(ROOT_DIR, 'build.log')
if args.show_build_log:
ret = subprocess.call(cmd, env=env, cwd=ROOT_DIR)
else:
log_filename = os.path.join(ROOT_DIR, 'build.log')
print("Building, see build.log...")
with open(log_filename, 'w') as log:
p = subprocess.Popen(cmd, env=env, stdout=log, stderr=log,
cwd=ROOT_DIR)
# Wait for it to finish, and print something to indicate the
# process is alive, but only if the log file has grown (to
# allow continuous integration environments kill a hanging
# process accurately if it produces no output)
last_blip = time.time()
last_log_size = os.stat(log_filename).st_size
while p.poll() is None:
time.sleep(0.5)
if time.time() - last_blip > 60:
log_size = os.stat(log_filename).st_size
if log_size > last_log_size:
print(" ... build in progress")
last_blip = time.time()
last_log_size = log_size
ret = p.wait()
if ret == 0:
print("Build OK")
else:
if not args.show_build_log:
with open(log_filename, 'r') as f:
print(f.read())
print("Build failed!")
sys.exit(1)
from distutils.sysconfig import get_python_lib
site_dir = get_python_lib(prefix=dst_dir, plat_specific=True)
site_dir2 = get_python_lib(prefix=dst_dir, plat_specific=False)
return site_dir, site_dir2
#
# GCOV support
#
def gcov_reset_counters():
print("Removing previous GCOV .gcda files...")
build_dir = os.path.join(ROOT_DIR, 'build')
for dirpath, dirnames, filenames in os.walk(build_dir):
for fn in filenames:
if fn.endswith('.gcda') or fn.endswith('.da'):
pth = os.path.join(dirpath, fn)
os.unlink(pth)
#
# LCOV support
#
LCOV_OUTPUT_FILE = os.path.join(ROOT_DIR, 'build', 'lcov.out')
LCOV_HTML_DIR = os.path.join(ROOT_DIR, 'build', 'lcov')
def lcov_generate():
try: os.unlink(LCOV_OUTPUT_FILE)
except OSError: pass
try: shutil.rmtree(LCOV_HTML_DIR)
except OSError: pass
print("Capturing lcov info...")
subprocess.call(['lcov', '-q', '-c',
'-d', os.path.join(ROOT_DIR, 'build'),
'-b', ROOT_DIR,
'--output-file', LCOV_OUTPUT_FILE])
print("Generating lcov HTML output...")
ret = subprocess.call(['genhtml', '-q', LCOV_OUTPUT_FILE,
'--output-directory', LCOV_HTML_DIR,
'--legend', '--highlight'])
if ret != 0:
print("genhtml failed!")
else:
print("HTML output generated under build/lcov/")
#
# Python 3 support
#
if sys.version_info[0] >= 3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(code, globs=None, locs=None):
"""Execute code in a namespace."""
if globs is None:
frame = sys._getframe(1)
globs = frame.f_globals
if locs is None:
locs = frame.f_locals
del frame
elif locs is None:
locs = globs
exec("""exec code in globs, locs""")
if __name__ == "__main__":
main(argv=sys.argv[1:])
| {
"content_hash": "e5851c498ab1ceab092ff823aad700fc",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 103,
"avg_line_length": 35.22875816993464,
"alnum_prop": 0.5395176252319109,
"repo_name": "bccp/bananaplots",
"id": "769d333c12ea8c5d4c26b58cf1cacfa4a9cd8bfc",
"size": "16170",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "38562"
},
{
"name": "Shell",
"bytes": "185"
}
],
"symlink_target": ""
} |
import cv2
import numpy as np
import timeit
import logging
import os
import cPickle as pickle
from scipy import ndimage
import fli
import gzip
import scipy.ndimage.interpolation as scipint
from os import listdir
from os import remove
from os.path import isfile, join
path = '../data/custom/'
def shuffle_in_unison(a, b):
assert len(a) == len(b)
shuffled_a = np.empty(a.shape, dtype=a.dtype)
shuffled_b = np.empty(b.shape, dtype=b.dtype)
permutation = np.random.permutation(len(a))
for old_index, new_index in enumerate(permutation):
shuffled_a[new_index] = a[old_index]
shuffled_b[new_index] = b[old_index]
return shuffled_a, shuffled_b
def get_blurred_vector(vector, blur=1):
return ndimage.gaussian_filter(vector.reshape(28, 28), blur).flatten()
def get_blurred_sets(set_x, set_y, blur=1):
return np.apply_along_axis(get_blurred_vector, axis=1, arr=set_x, blur=blur), set_y
def get_rotated_vector(vector, angle=0):
return scipint.rotate(vector.reshape(28, 28), angle, order=0, reshape=False).flatten()
def get_rotated_sets(set_x, set_y, angle=0):
return np.apply_along_axis(get_rotated_vector, axis=1, arr=set_x, angle=angle), set_y
#deletes the blurred image files in the path directori
def remove_blur_files(path=path):
files = [f for f in listdir(path) if isfile(join(path, f)) and 'blur' in f]
for file in files:
remove(path+file)
#creates blurred versions of the files in the path directory
def create_blur_files(path=path, blur=1):
files = [f for f in listdir(path) if isfile(join(path, f)) and 'blur' not in f]
for file in files:
test_img = fli.processImg(path, file, flatten=False)
i = file.find('.')
img_blurred = np.invert(ndimage.gaussian_filter(test_img.reshape((28, 28)), blur))
cv2.imwrite(path + file[:i] + '_blur_a.png', img_blurred)
def save_model(params, epoch=-1 , best_validation_loss=-1, test_score=-1, namestub='test'
,randomInit=False, add_blurs=False, testrun=False, logfilename='testLog.log',
endrun=False, annotation = ''):
blur = ''
last = ''
if randomInit:
init_1 = '_rand'
else:
init_1 = '_zero'
if add_blurs:
blur = '_blur'
if testrun:
last = '_test'
savedFileName = namestub + str(epoch) +"_pars_"+ init_1 + annotation + blur + last +'.pkl'
gg = open(savedFileName, 'wb')
pickle.dump(params, gg, protocol=pickle.HIGHEST_PROTOCOL)
gg.close()
print(('Best model params saved as ' + savedFileName
+ ' with test score %f %%') % (test_score * 100.))
time = timeit.default_timer()
if not os.path.isfile(logfilename):
open(logfilename, 'w+')
if not os.path.getsize(logfilename) > 0:
logging.info('end_time;epoch;filenamme;best_validation_score;test_score')
logging.info(str(time) + ';' + str(epoch) + ';' + savedFileName +
';' + str(best_validation_loss * 100) +
';' + str(test_score * 100.))
if endrun:
logging.info('-----------------------------------------')
def load_params(filename):
gg = open(filename, 'rb')
params = pickle.load(gg)
gg.close()
return params
def epoch_from_filename(filename):
return int(filter(str.isdigit, filename.split("pars")[0]))
def load_mnist(mnist_path="../data/mnist.pkl.gz"):
data_dir, data_file = os.path.split(mnist_path)
if (not os.path.isfile(mnist_path)) and data_file == 'mnist.pkl.gz':
from six.moves import urllib
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print('Downloading data from %s' % origin)
urllib.request.urlretrieve(origin, mnist_path)
f = gzip.open(mnist_path, 'rb')
dataset = pickle.load(f)
f.close()
return dataset
| {
"content_hash": "ab6a8f8bb51708cb01c11c7f30f2a982",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 94,
"avg_line_length": 35.97196261682243,
"alnum_prop": 0.6349701221096389,
"repo_name": "laputian/dml",
"id": "752170db334bae3d5f5bf2ed337f9d1a5c4184fa",
"size": "3849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlp_test/data_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "326596"
}
],
"symlink_target": ""
} |
import os
import uuid
from keystone.tests import unit as tests
from keystone.tests.unit import default_fixtures
from keystone.tests.unit.ksfixtures import database
from keystone.tests.unit import test_backend
DEFAULT_CATALOG_TEMPLATES = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'default_catalog.templates'))
class TestTemplatedCatalog(tests.TestCase, test_backend.CatalogTests):
DEFAULT_FIXTURE = {
'RegionOne': {
'compute': {
'adminURL': 'http://localhost:8774/v1.1/bar',
'publicURL': 'http://localhost:8774/v1.1/bar',
'internalURL': 'http://localhost:8774/v1.1/bar',
'name': "'Compute Service'",
'id': '2'
},
'identity': {
'adminURL': 'http://localhost:35357/v2.0',
'publicURL': 'http://localhost:5000/v2.0',
'internalURL': 'http://localhost:35357/v2.0',
'name': "'Identity Service'",
'id': '1'
}
}
}
def setUp(self):
super(TestTemplatedCatalog, self).setUp()
self.useFixture(database.Database())
self.load_backends()
self.load_fixtures(default_fixtures)
def config_overrides(self):
super(TestTemplatedCatalog, self).config_overrides()
self.config_fixture.config(group='catalog',
template_file=DEFAULT_CATALOG_TEMPLATES)
def test_get_catalog(self):
catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
self.assertDictEqual(catalog_ref, self.DEFAULT_FIXTURE)
def test_catalog_ignored_malformed_urls(self):
# both endpoints are in the catalog
catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
self.assertEqual(2, len(catalog_ref['RegionOne']))
region = self.catalog_api.driver.templates['RegionOne']
region['compute']['adminURL'] = 'http://localhost:8774/v1.1/$(tenant)s'
# the malformed one has been removed
catalog_ref = self.catalog_api.get_catalog('foo', 'bar')
self.assertEqual(1, len(catalog_ref['RegionOne']))
def test_get_catalog_endpoint_disabled(self):
self.skipTest("Templated backend doesn't have disabled endpoints")
def test_get_v3_catalog_endpoint_disabled(self):
self.skipTest("Templated backend doesn't have disabled endpoints")
def assert_catalogs_equal(self, expected, observed):
for e, o in zip(sorted(expected), sorted(observed)):
expected_endpoints = e.pop('endpoints')
observed_endpoints = o.pop('endpoints')
self.assertDictEqual(e, o)
self.assertItemsEqual(expected_endpoints, observed_endpoints)
def test_get_v3_catalog(self):
user_id = uuid.uuid4().hex
project_id = uuid.uuid4().hex
catalog_ref = self.catalog_api.get_v3_catalog(user_id, project_id)
exp_catalog = [
{'endpoints': [
{'interface': 'admin',
'region': 'RegionOne',
'url': 'http://localhost:8774/v1.1/%s' % project_id},
{'interface': 'public',
'region': 'RegionOne',
'url': 'http://localhost:8774/v1.1/%s' % project_id},
{'interface': 'internal',
'region': 'RegionOne',
'url': 'http://localhost:8774/v1.1/%s' % project_id}],
'type': 'compute',
'name': "'Compute Service'",
'id': '2'},
{'endpoints': [
{'interface': 'admin',
'region': 'RegionOne',
'url': 'http://localhost:35357/v2.0'},
{'interface': 'public',
'region': 'RegionOne',
'url': 'http://localhost:5000/v2.0'},
{'interface': 'internal',
'region': 'RegionOne',
'url': 'http://localhost:35357/v2.0'}],
'type': 'identity',
'name': "'Identity Service'",
'id': '1'}]
self.assert_catalogs_equal(exp_catalog, catalog_ref)
def test_get_catalog_ignores_endpoints_with_invalid_urls(self):
user_id = uuid.uuid4().hex
# If the URL has no 'tenant_id' to substitute, we will skip the
# endpoint which contains this kind of URL.
catalog_ref = self.catalog_api.get_v3_catalog(user_id, tenant_id=None)
exp_catalog = [
{'endpoints': [],
'type': 'compute',
'name': "'Compute Service'",
'id': '2'},
{'endpoints': [
{'interface': 'admin',
'region': 'RegionOne',
'url': 'http://localhost:35357/v2.0'},
{'interface': 'public',
'region': 'RegionOne',
'url': 'http://localhost:5000/v2.0'},
{'interface': 'internal',
'region': 'RegionOne',
'url': 'http://localhost:35357/v2.0'}],
'type': 'identity',
'name': "'Identity Service'",
'id': '1'}]
self.assert_catalogs_equal(exp_catalog, catalog_ref)
def test_list_regions_filtered_by_parent_region_id(self):
self.skipTest('Templated backend does not support hints')
def test_service_filtering(self):
self.skipTest("Templated backend doesn't support filtering")
| {
"content_hash": "56a5cf6b24a8d3a35ce272cf8bc0e6c5",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 79,
"avg_line_length": 39.5,
"alnum_prop": 0.5463217758209503,
"repo_name": "yanheven/keystone",
"id": "89e72992b3a601cfdceccd63e17ec4f3680e7974",
"size": "6037",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/test_backend_templated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "3840561"
},
{
"name": "Shell",
"bytes": "10877"
}
],
"symlink_target": ""
} |
"""Custom convolutional neural network layers in model_zoo."""
__all__ = ['DeformableConvolution', 'ModulatedDeformableConvolution']
from .... import symbol
from ...block import HybridBlock
from ....base import numeric_types
from ...nn import Activation
class DeformableConvolution(HybridBlock):
"""2-D Deformable Convolution v_1 (Dai, 2017).
Normal Convolution uses sampling points in a regular grid, while the sampling
points of Deformablem Convolution can be offset. The offset is learned with a
separate convolution layer during the training. Both the convolution layer for
generating the output features and the offsets are included in this gluon layer.
Parameters
----------
channels : int,
The dimensionality of the output space
i.e. the number of output channels in the convolution.
kernel_size : int or tuple/list of 2 ints, (Default value = (1,1))
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 2 ints, (Default value = (1,1))
Specifies the strides of the convolution.
padding : int or tuple/list of 2 ints, (Default value = (0,0))
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points.
dilation : int or tuple/list of 2 ints, (Default value = (1,1))
Specifies the dilation rate to use for dilated convolution.
groups : int, (Default value = 1)
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two convolution
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
num_deformable_group : int, (Default value = 1)
Number of deformable group partitions.
layout : str, (Default value = NCHW)
Dimension ordering of data and weight. Can be 'NCW', 'NWC', 'NCHW',
'NHWC', 'NCDHW', 'NDHWC', etc. 'N', 'C', 'H', 'W', 'D' stands for
batch, channel, height, width and depth dimensions respectively.
Convolution is performed over 'D', 'H', and 'W' dimensions.
use_bias : bool, (Default value = True)
Whether the layer for generating the output features uses a bias vector.
in_channels : int, (Default value = 0)
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and input channels will be inferred from the shape of input data.
activation : str, (Default value = None)
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
weight_initializer : str or `Initializer`, (Default value = None)
Initializer for the `weight` weights matrix for the convolution layer
for generating the output features.
bias_initializer : str or `Initializer`, (Default value = zeros)
Initializer for the bias vector for the convolution layer
for generating the output features.
offset_weight_initializer : str or `Initializer`, (Default value = zeros)
Initializer for the `weight` weights matrix for the convolution layer
for generating the offset.
offset_bias_initializer : str or `Initializer`, (Default value = zeros),
Initializer for the bias vector for the convolution layer
for generating the offset.
offset_use_bias: bool, (Default value = True)
Whether the layer for generating the offset uses a bias vector.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
out_width = floor((width+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1])+1
"""
def __init__(self, channels, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1,
num_deformable_group=1, layout='NCHW', use_bias=True, in_channels=0, activation=None,
weight_initializer=None, bias_initializer='zeros',
offset_weight_initializer='zeros', offset_bias_initializer='zeros', offset_use_bias=True,
op_name='DeformableConvolution', adj=None, prefix=None, params=None):
super(DeformableConvolution, self).__init__(prefix=prefix, params=params)
with self.name_scope():
self._channels = channels
self._in_channels = in_channels
assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,) * 2
if isinstance(strides, numeric_types):
strides = (strides,) * len(kernel_size)
if isinstance(padding, numeric_types):
padding = (padding,) * len(kernel_size)
if isinstance(dilation, numeric_types):
dilation = (dilation,) * len(kernel_size)
self._op_name = op_name
offset_channels = 2 * kernel_size[0] * kernel_size[1] * num_deformable_group
self._kwargs_offset = {
'kernel': kernel_size, 'stride': strides, 'dilate': dilation,
'pad': padding, 'num_filter': offset_channels, 'num_group': groups,
'no_bias': not offset_use_bias, 'layout': layout}
self._kwargs_deformable_conv = {
'kernel': kernel_size, 'stride': strides, 'dilate': dilation,
'pad': padding, 'num_filter': channels, 'num_group': groups,
'num_deformable_group': num_deformable_group,
'no_bias': not use_bias, 'layout': layout}
if adj:
self._kwargs_offset['adj'] = adj
self._kwargs_deformable_conv['adj'] = adj
dshape = [0] * (len(kernel_size) + 2)
dshape[layout.find('N')] = 1
dshape[layout.find('C')] = in_channels
op = getattr(symbol, 'Convolution')
offset = op(symbol.var('data', shape=dshape), **self._kwargs_offset)
offsetshapes = offset.infer_shape_partial()[0]
self.offset_weight = self.params.get('offset_weight', shape=offsetshapes[1],
init=offset_weight_initializer,
allow_deferred_init=True)
if offset_use_bias:
self.offset_bias = self.params.get('offset_bias', shape=offsetshapes[2],
init=offset_bias_initializer,
allow_deferred_init=True)
else:
self.offset_bias = None
deformable_conv_weight_shape = [0] * (len(kernel_size) + 2)
deformable_conv_weight_shape[0] = channels
deformable_conv_weight_shape[2] = kernel_size[0]
deformable_conv_weight_shape[3] = kernel_size[1]
self.deformable_conv_weight = self.params.get('deformable_conv_weight',
shape=deformable_conv_weight_shape,
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.deformable_conv_bias = self.params.get('deformable_conv_bias', shape=(channels,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.deformable_conv_bias = None
if activation:
self.act = Activation(activation, prefix=activation + '_')
else:
self.act = None
def hybrid_forward(self, F, x, offset_weight, deformable_conv_weight, offset_bias=None, deformable_conv_bias=None):
if offset_bias is None:
offset = F.Convolution(x, offset_weight, cudnn_off=True, **self._kwargs_offset)
else:
offset = F.Convolution(x, offset_weight, offset_bias, cudnn_off=True, **self._kwargs_offset)
if deformable_conv_bias is None:
act = F.contrib.DeformableConvolution(data=x, offset=offset, weight=deformable_conv_weight,
name='fwd', **self._kwargs_deformable_conv)
else:
act = F.contrib.DeformableConvolution(data=x, offset=offset, weight=deformable_conv_weight,
bias=deformable_conv_bias, name='fwd',
**self._kwargs_deformable_conv)
if self.act:
act = self.act(act)
return act
def _alias(self):
return 'deformable_conv'
def __repr__(self):
s = '{name}({mapping}, kernel_size={kernel}, stride={stride}'
len_kernel_size = len(self._kwargs_deformable_conv['kernel'])
if self._kwargs_deformable_conv['pad'] != (0,) * len_kernel_size:
s += ', padding={pad}'
if self._kwargs_deformable_conv['dilate'] != (1,) * len_kernel_size:
s += ', dilation={dilate}'
if hasattr(self, 'out_pad') and self.out_pad != (0,) * len_kernel_size:
s += ', output_padding={out_pad}'.format(out_pad=self.out_pad)
if self._kwargs_deformable_conv['num_group'] != 1:
s += ', groups={num_group}'
if self.deformable_conv_bias is None:
s += ', bias=False'
if self.act:
s += ', {}'.format(self.act)
s += ')'
shape = self.deformable_conv_weight.shape
return s.format(name=self.__class__.__name__,
mapping='{0} -> {1}'.format(shape[1] if shape[1] else None, shape[0]),
**self._kwargs_deformable_conv)
class ModulatedDeformableConvolution(HybridBlock):
"""2-D Deformable Convolution v2 (Dai, 2018).
The modulated deformable convolution operation is described in https://arxiv.org/abs/1811.11168
Parameters
----------
channels : int,
The dimensionality of the output space
i.e. the number of output channels in the convolution.
kernel_size : int or tuple/list of 2 ints, (Default value = (1,1))
Specifies the dimensions of the convolution window.
strides : int or tuple/list of 2 ints, (Default value = (1,1))
Specifies the strides of the convolution.
padding : int or tuple/list of 2 ints, (Default value = (0,0))
If padding is non-zero, then the input is implicitly zero-padded
on both sides for padding number of points.
dilation : int or tuple/list of 2 ints, (Default value = (1,1))
Specifies the dilation rate to use for dilated convolution.
groups : int, (Default value = 1)
Controls the connections between inputs and outputs.
At groups=1, all inputs are convolved to all outputs.
At groups=2, the operation becomes equivalent to having two convolution
layers side by side, each seeing half the input channels, and producing
half the output channels, and both subsequently concatenated.
num_deformable_group : int, (Default value = 1)
Number of deformable group partitions.
layout : str, (Default value = NCHW)
Dimension ordering of data and weight. Can be 'NCW', 'NWC', 'NCHW',
'NHWC', 'NCDHW', 'NDHWC', etc. 'N', 'C', 'H', 'W', 'D' stands for
batch, channel, height, width and depth dimensions respectively.
Convolution is performed over 'D', 'H', and 'W' dimensions.
use_bias : bool, (Default value = True)
Whether the layer for generating the output features uses a bias vector.
in_channels : int, (Default value = 0)
The number of input channels to this layer. If not specified,
initialization will be deferred to the first time `forward` is called
and input channels will be inferred from the shape of input data.
activation : str, (Default value = None)
Activation function to use. See :func:`~mxnet.ndarray.Activation`.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
weight_initializer : str or `Initializer`, (Default value = None)
Initializer for the `weight` weights matrix for the convolution layer
for generating the output features.
bias_initializer : str or `Initializer`, (Default value = zeros)
Initializer for the bias vector for the convolution layer
for generating the output features.
offset_weight_initializer : str or `Initializer`, (Default value = zeros)
Initializer for the `weight` weights matrix for the convolution layer
for generating the offset.
offset_bias_initializer : str or `Initializer`, (Default value = zeros),
Initializer for the bias vector for the convolution layer
for generating the offset.
offset_use_bias: bool, (Default value = True)
Whether the layer for generating the offset uses a bias vector.
Inputs:
- **data**: 4D input tensor with shape
`(batch_size, in_channels, height, width)` when `layout` is `NCHW`.
For other layouts shape is permuted accordingly.
Outputs:
- **out**: 4D output tensor with shape
`(batch_size, channels, out_height, out_width)` when `layout` is `NCHW`.
out_height and out_width are calculated as::
out_height = floor((height+2*padding[0]-dilation[0]*(kernel_size[0]-1)-1)/stride[0])+1
out_width = floor((width+2*padding[1]-dilation[1]*(kernel_size[1]-1)-1)/stride[1])+1
"""
def __init__(self, channels, kernel_size=(1, 1), strides=(1, 1), padding=(0, 0), dilation=(1, 1), groups=1,
num_deformable_group=1, layout='NCHW', use_bias=True, in_channels=0, activation=None,
weight_initializer=None, bias_initializer='zeros',
offset_weight_initializer='zeros', offset_bias_initializer='zeros', offset_use_bias=True,
op_name='ModulatedDeformableConvolution', adj=None, prefix=None, params=None):
super(ModulatedDeformableConvolution, self).__init__(prefix=prefix, params=params)
with self.name_scope():
self._channels = channels
self._in_channels = in_channels
assert layout in ('NCHW', 'NHWC'), "Only supports 'NCHW' and 'NHWC' layout for now"
if isinstance(kernel_size, numeric_types):
kernel_size = (kernel_size,) * 2
if isinstance(strides, numeric_types):
strides = (strides,) * len(kernel_size)
if isinstance(padding, numeric_types):
padding = (padding,) * len(kernel_size)
if isinstance(dilation, numeric_types):
dilation = (dilation,) * len(kernel_size)
self._op_name = op_name
offset_channels = num_deformable_group * 3 * kernel_size[0] * kernel_size[1]
self.offset_split_index = num_deformable_group * 2 * kernel_size[0] * kernel_size[1]
self._kwargs_offset = {
'kernel': kernel_size, 'stride': strides, 'dilate': dilation,
'pad': padding, 'num_filter': offset_channels, 'num_group': groups,
'no_bias': not offset_use_bias, 'layout': layout}
self._kwargs_deformable_conv = {
'kernel': kernel_size, 'stride': strides, 'dilate': dilation,
'pad': padding, 'num_filter': channels, 'num_group': groups,
'num_deformable_group': num_deformable_group,
'no_bias': not use_bias, 'layout': layout}
if adj:
self._kwargs_offset['adj'] = adj
self._kwargs_deformable_conv['adj'] = adj
deformable_conv_weight_shape = [0] * (len(kernel_size) + 2)
deformable_conv_weight_shape[0] = channels
deformable_conv_weight_shape[2] = kernel_size[0]
deformable_conv_weight_shape[3] = kernel_size[1]
self.deformable_conv_weight = self.params.get('deformable_conv_weight',
shape=deformable_conv_weight_shape,
init=weight_initializer,
allow_deferred_init=True)
if use_bias:
self.deformable_conv_bias = self.params.get('deformable_conv_bias', shape=(channels,),
init=bias_initializer,
allow_deferred_init=True)
else:
self.deformable_conv_bias = None
dshape = [0] * (len(kernel_size) + 2)
dshape[layout.find('N')] = 1
dshape[layout.find('C')] = in_channels
op = getattr(symbol, 'Convolution')
offset = op(symbol.var('data', shape=dshape), **self._kwargs_offset)
offsetshapes = offset.infer_shape_partial()[0]
self.offset_weight = self.params.get('offset_weight', shape=offsetshapes[1],
init=offset_weight_initializer,
allow_deferred_init=True)
if offset_use_bias:
self.offset_bias = self.params.get('offset_bias', shape=offsetshapes[2],
init=offset_bias_initializer,
allow_deferred_init=True)
else:
self.offset_bias = None
if activation:
self.act = Activation(activation, prefix=activation + '_')
else:
self.act = None
def hybrid_forward(self, F, x, offset_weight, deformable_conv_weight, offset_bias=None, deformable_conv_bias=None):
if offset_bias is None:
offset = F.Convolution(x, offset_weight, cudnn_off=True, **self._kwargs_offset)
else:
offset = F.Convolution(x, offset_weight, offset_bias, cudnn_off=True, **self._kwargs_offset)
offset_t = F.slice_axis(offset, axis=1, begin=0, end=self.offset_split_index)
mask = F.slice_axis(offset, axis=1, begin=self.offset_split_index, end=None)
mask = F.sigmoid(mask) * 2
if deformable_conv_bias is None:
act = F.contrib.ModulatedDeformableConvolution(data=x, offset=offset_t, mask=mask,
weight=deformable_conv_weight,
name='fwd', **self._kwargs_deformable_conv)
else:
act = F.contrib.ModulatedDeformableConvolution(data=x, offset=offset_t, mask=mask,
weight=deformable_conv_weight,
bias=deformable_conv_bias, name='fwd',
**self._kwargs_deformable_conv)
if self.act:
act = self.act(act)
return act
def _alias(self):
return 'modulated_deformable_conv'
| {
"content_hash": "4b2eeefe62012fedeac4066de6cfed9f",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 119,
"avg_line_length": 52.338582677165356,
"alnum_prop": 0.5794092573090618,
"repo_name": "larroy/mxnet",
"id": "c4924c130a28dbdc9672bb5492f3be341392c159",
"size": "20779",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "python/mxnet/gluon/contrib/cnn/conv_layers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "227904"
},
{
"name": "C++",
"bytes": "9484781"
},
{
"name": "CMake",
"bytes": "157181"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1290387"
},
{
"name": "Dockerfile",
"bytes": "100732"
},
{
"name": "Groovy",
"bytes": "165549"
},
{
"name": "HTML",
"bytes": "40277"
},
{
"name": "Java",
"bytes": "205196"
},
{
"name": "Julia",
"bytes": "445413"
},
{
"name": "Jupyter Notebook",
"bytes": "3660357"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "148945"
},
{
"name": "Perl",
"bytes": "1558292"
},
{
"name": "PowerShell",
"bytes": "11743"
},
{
"name": "Python",
"bytes": "9656682"
},
{
"name": "R",
"bytes": "357994"
},
{
"name": "Raku",
"bytes": "9012"
},
{
"name": "SWIG",
"bytes": "161870"
},
{
"name": "Scala",
"bytes": "1304647"
},
{
"name": "Shell",
"bytes": "460507"
},
{
"name": "Smalltalk",
"bytes": "3497"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ast
import os
import random
import subprocess
import sys
try:
import argparse
except ImportError:
print("Cannot import argparse.")
exit(1)
# Import the testing utils
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../tests/")
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + "/../codegen/")
import utils
from gentable import \
table_name, schema, description, examples, attributes, implementation, \
extended_schema, fuzz_paths, \
WINDOWS, LINUX, POSIX, DARWIN, FREEBSD, \
Column, ForeignKey, table as TableState, TableState as _TableState, \
TEXT, DATE, DATETIME, INTEGER, BIGINT, UNSIGNED_BIGINT, DOUBLE, BLOB
def _fuzz_paths(shell, name, paths, query):
cmd = [
"zzuf",
"-r0.001:0.1", "-s%d:%d" % (args.s, args.s + args.n)
]
for path in paths:
cmd.append("-I")
cmd.append(path)
cmd.append(shell)
cmd.append("--disable_extensions")
cmd.append(query)
if args.verbose:
print (" ".join(cmd))
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
if args.verbose:
print(stdout)
print(stderr)
if proc.returncode != 0:
print (" ".join(cmd))
print(stderr)
return proc.returncode
def _fuzz_queries(shell, name, paths, examples=[]):
print("Fuzzing file reads for: %s" % (name))
ret = _fuzz_paths(shell, name, paths, "select count(1) from `%s`" % (name))
if ret != 0:
return ret
for example in examples:
print("Fuzzing file reads for query: %s" % (example))
ret = _fuzz_paths(shell, name, paths, example)
if ret != 0:
return ret
return 0
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=(
"Search table specs for opt-in fuzzing options"
))
parser.add_argument(
"--specs", metavar="PATH", default="./specs",
help="Path to the osquery table specs."
)
parser.add_argument(
"--table", metavar="TABLE", default="",
help="Restrict to a single table"
)
parser.add_argument(
"--verbose", action="store_true", default=False,
help="Be verbose."
)
parser.add_argument(
"-c", action="store_true", default=False,
help="Continue working event if a crash is detected."
)
parser.add_argument(
"-n", type=int, default=20,
help="Number of seeds"
)
parser.add_argument(
"-s", type=int, default=-1,
help="Initial seed"
)
group = parser.add_argument_group("Run Options:")
group.add_argument(
"--shell", metavar="PATH", default="./build/%s/osquery/osqueryi" % (
utils.platform()),
help="Path to osqueryi shell (./build/<sys>/osquery/osqueryi)."
)
args = parser.parse_args()
if not os.path.exists(args.shell):
print("Cannot find --shell: %s" % (args.shell))
exit(1)
if not os.path.exists(args.specs):
print("Cannot find --specs: %s" % (args.specs))
exit(1)
if args.s < 0:
args.s = random.randint(0, 65535)
exit_code = 0
tables = utils.queries_from_tables(args.specs, args.table)
for table in tables:
table = table.split(".")
if table[0] == "specs":
table.pop(0)
table[-1] += ".table"
filename = os.path.join(args.specs, *table)
with open(filename, 'rU') as fh:
# Open and parse/execute the specification.
tree = ast.parse(fh.read())
TableState = _TableState()
exec(compile(tree, "<string>", "exec"))
# We may later introduce other (simple) types of fuzzing.
if len(TableState.fuzz_paths) > 0:
# The table specification opted-into path-based fuzzing.
ret = _fuzz_queries(args.shell, TableState.table_name,
TableState.fuzz_paths, TableState.examples)
if ret > 0:
exit_code = ret
if not args.c and ret != 0:
break
sys.exit(exit_code)
| {
"content_hash": "d6d11f508e240d4161ba4115c601441a",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 30.652482269503547,
"alnum_prop": 0.5853771402128645,
"repo_name": "jedi22/osquery",
"id": "bf5fe0b2d199b4f9ef8082baca88332b3bdfab5a",
"size": "4714",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tools/analysis/fuzz.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "38093"
},
{
"name": "C++",
"bytes": "2437561"
},
{
"name": "CMake",
"bytes": "78446"
},
{
"name": "Makefile",
"bytes": "7926"
},
{
"name": "Objective-C++",
"bytes": "65363"
},
{
"name": "Shell",
"bytes": "2038"
},
{
"name": "Thrift",
"bytes": "2969"
}
],
"symlink_target": ""
} |
"""Views."""
from __future__ import unicode_literals
import operator
import nm_helper
import cdb_helper
import netspot_settings
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .lib.spotmax import netspot
@login_required
def netmagis(request):
"""NetMagis landing page."""
return render(
request,
'netmagis/netmagis.htm',
context={},
)
@login_required
def networks(request):
"""NetMagis networks."""
# Get all networks
with nm_helper.NetMagisDB() as nmdb:
nm_networks = nmdb.query("SELECT * from dns.network")
return render(
request,
'netmagis/networks.htm',
context={'networks': nm_networks},
)
@login_required
def searchhost(request):
"""NetMagis networks."""
# Get search parameter
search = request.POST.get('search', None)
# Return NetMagis entries
netmagis_result = list()
# Search in NetMagis
with nm_helper.NetMagisDB() as nmdb:
hosts = nmdb.search(search)
for host in hosts:
# Create host entry. Assume CNAME-record if MAC and ADDR is missing
if host['mac'] or host['addr']:
netmagis_result.append({'idrr': host['idrr'],
'name': host['name'],
'addr': host['addr'],
'mac': host['mac'],
'date': host['date'],
'comment': host['comment']})
else:
# Add CNAME record
netmagis_result.append({'idrr': host['idrr'],
'name': '',
'addr': 'CNAME',
'mac': host['name'],
'date': host['date'],
'comment': host['comment']})
# Find A-records for any CNAME-records in hosts
if not host['addr']:
# Get A record
arecord = nmdb.get_arecord(host['idrr'])
if arecord:
host_entry = {'idrr': arecord[0]['idrr'],
'name': arecord[0]['name'],
'addr': arecord[0]['addr'],
'mac': arecord[0]['mac'],
'date': arecord[0]['date'],
'comment': arecord[0]['comment']}
# Add if this arecord is not in netmagis_result already
if host_entry not in netmagis_result:
netmagis_result.append(host_entry)
# Search for MACs
inventory = netspot.NetSPOT(collection=netspot_settings.COLL_MACS)
mac_search = []
# Find MAC addresses in the MAC database
for host in netmagis_result:
# If missing - skip
if not host['mac']:
continue
# Find interface in the MAC database
search_result = inventory.search('mac:%s' % host['mac'].upper(), key='asset', sort='asset')
# Filter out any other MAC entries
for asset in search_result:
for mac in asset.get('macs', []):
if host['mac'].upper() in mac.get('mac', ''):
mac_search.append({'asset': asset['asset'],
'mac': mac['mac'],
'ip': mac['ip'],
'interface': mac['interface'],
'vlan': mac['vlan']})
# Find outlet for each physical interface
for entry in mac_search:
if 'xe-' in entry['interface'] or 'ge-' in entry['interface']:
entry['outlet'] = cdb_helper.get_network_outlet(entry['asset'], entry['interface'])
return render(
request,
'netmagis/searchhost.htm',
context={'hosts': sorted(netmagis_result, key=operator.itemgetter('name'), reverse=True),
'mac_search': mac_search,
'search': search},
)
| {
"content_hash": "396f201a46c805c30f29e0d99ca053ea",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 95,
"avg_line_length": 30.983606557377048,
"alnum_prop": 0.5314814814814814,
"repo_name": "MaxIV-KitsControls/netspot",
"id": "b9d670c0125f43290ae152fa288e91817f6cd64b",
"size": "3805",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netspot/views_netmagis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "77841"
},
{
"name": "HTML",
"bytes": "43477"
},
{
"name": "JavaScript",
"bytes": "114144"
},
{
"name": "Python",
"bytes": "213676"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class SymbolsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="symbolsrc", parent_name="scattergeo.marker", **kwargs
):
super(SymbolsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
| {
"content_hash": "bbebebb8081cbd628572bc13820f7645",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 80,
"avg_line_length": 32.61538461538461,
"alnum_prop": 0.6108490566037735,
"repo_name": "plotly/plotly.py",
"id": "e3c73ef93b3f6b100146d96c0fdad0ec44fb5990",
"size": "424",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattergeo/marker/_symbolsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.