text stringlengths 4 1.02M | meta dict |
|---|---|
import copy
from api.jobs import gears
# DISCUSS: this basically asserts that the log helper doesn't throw, which is of non-zero but questionable value.
# Could instead be marked for pytest et. al to ignore coverage? Desirability? Compatibility?
def test_fill_defaults():
gear_config = {
'key_one': {'default': 1},
'key_two': {'default': 2},
'key_three': {'default': 3},
'key_no_de': {}
}
gear = {
'gear': {
'config': gear_config
}
}
# test sending in complete config does not change
config = {
'key_one': 4,
'key_two': 5,
'key_three': 6
}
result = gears.fill_gear_default_values(gear, config)
assert result['key_one'] == 4
assert result['key_two'] == 5
assert result['key_three'] == 6
# test sending in empty config
result = gears.fill_gear_default_values(gear, {})
assert result['key_one'] == 1
assert result['key_two'] == 2
assert result['key_three'] == 3
# test sending in None config
result = gears.fill_gear_default_values(gear, None)
assert result['key_one'] == 1
assert result['key_two'] == 2
assert result['key_three'] == 3
# test sending in semi-complete config
config = {
'key_one': None,
'key_two': []
#'key_three': 6 # missing
}
result = gears.fill_gear_default_values(gear, config)
assert result['key_one'] == None
assert result['key_two'] == []
assert result['key_three'] == 3
| {
"content_hash": "688fd031a61e3578a8ba95eca08e3c2f",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 113,
"avg_line_length": 27.945454545454545,
"alnum_prop": 0.5744957709824333,
"repo_name": "scitran/api",
"id": "75880407597dcdbbfebc4126b2cab8b06f8a8edd",
"size": "1538",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/unit_tests/python/test_gear_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "236479"
},
{
"name": "Shell",
"bytes": "12550"
}
],
"symlink_target": ""
} |
"""This module is deprecated. Please use `airflow.providers.microsoft.mssql.hooks.mssql`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.microsoft.mssql.hooks.mssql import MsSqlHook # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.microsoft.mssql.hooks.mssql`.",
DeprecationWarning, stacklevel=2
)
| {
"content_hash": "3c9319480ddf87201d1357712d5ace2e",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 93,
"avg_line_length": 33.45454545454545,
"alnum_prop": 0.7744565217391305,
"repo_name": "wooga/airflow",
"id": "2584ea76ee73fb6bf7705c54be35b0571c275ce0",
"size": "1155",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "airflow/hooks/mssql_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4084"
},
{
"name": "HTML",
"bytes": "128446"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5879650"
},
{
"name": "Shell",
"bytes": "41820"
}
],
"symlink_target": ""
} |
from lib import action
class EnableProject(action.JenkinsBaseAction):
def run(self, name):
return self.jenkins.enable_job(name)
| {
"content_hash": "8cc35340e06de793d091f377a12df986",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 46,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.7323943661971831,
"repo_name": "StackStorm/st2contrib",
"id": "ef8de9cf63a9bac959a63e973743ea0ea7ef1eaa",
"size": "142",
"binary": false,
"copies": "2",
"ref": "refs/heads/st2contrib-deprecated-archive",
"path": "archive/packs/jenkins/actions/enable_job.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "8532"
},
{
"name": "Makefile",
"bytes": "5581"
},
{
"name": "Python",
"bytes": "1362240"
},
{
"name": "Ruby",
"bytes": "3081"
},
{
"name": "Shell",
"bytes": "7781"
}
],
"symlink_target": ""
} |
from PyQt5 import Qt
import numpy
from zplib.curve import interpolate
from zplib.image import resample
from .. import shared_resources
from . import base
class FreeSpline(base.RWGeometryItemMixin, Qt.QGraphicsPathItem):
QGRAPHICSITEM_TYPE = shared_resources.generate_unique_qgraphicsitem_type()
def __init__(self, ris_widget, pen=None, geometry=None):
self.drawing = False
self._smoothing = 5
self._tck = None
self.bandwidth = 20
self.warped_view = getattr(ris_widget, 'alt_view', None)
if self.warped_view is not None:
ris_widget.layer_stack.focused_image_changed.connect(self._on_focused_image_changed)
self._on_focused_image_changed(ris_widget.layer_stack.focused_image)
self._drag_detector = WarpedViewDragDetector(self, self.warped_view)
super().__init__(ris_widget, pen, geometry)
self.setFlag(Qt.QGraphicsItem.ItemIsSelectable)
def remove(self):
super().remove()
if self.warped_view is not None:
self.rw.layer_stack.focused_image_changed.disconnect(self._on_focused_image_changed)
self._drag_detector.remove()
@property
def geometry(self):
return self._tck
@geometry.setter
def geometry(self, tck):
self.setSelected(False)
self.set_tck(tck)
def set_tck(self, tck, points=None):
self.drawing = False
self._tck = tck
self.path = Qt.QPainterPath()
if self.warped_view is not None:
self._update_warped_view()
if tck is not None:
if points is None:
self._generate_points_from_tck()
else:
self.points = points
bezier_elements = interpolate.spline_to_bezier(tck)
self.path.moveTo(*bezier_elements[0][0])
for (sx, sy), (c1x, c1y), (c2x, c2y), (ex, ey) in bezier_elements:
self.path.cubicTo(c1x, c1y, c2x, c2y, ex, ey)
self.setPath(self.path)
self._geometry_changed()
@property
def smoothing(self):
return self._smoothing
@smoothing.setter
def smoothing(self, value):
self._smoothing = value
if self._tck is not None:
self._generate_tck_from_points()
def _update_warped_view(self):
if self._tck is None:
self.warped_view.image = None
elif self._image is not None:
width = self._tck[0][-1] / 5
warped = resample.sample_image_along_spline(self._image, self._tck, width, order=1)
self.warped_view.image = warped
def _on_focused_image_changed(self, image):
self._image = None if image is None else image.data
if self._tck is not None:
self._update_warped_view()
def _generate_tck_from_points(self):
self.points = numpy.array(self.points)
l = len(self.points)
if l > 1:
tck = interpolate.fit_spline(self.points, smoothing=self._smoothing * l)
self.set_tck(tck, self.points)
def _generate_points_from_tck(self):
assert self._tck is not None
self.points = interpolate.spline_interpolate(self._tck, num_points=300)
def _start_drawing(self):
self.drawing = True
self.display_pen.setStyle(Qt.Qt.DotLine)
self.setPen(self.display_pen)
self.points = []
self._last_pos = None
self.path = Qt.QPainterPath()
self.setPath(self.path)
def _stop_drawing(self):
self.drawing = False
self.display_pen.setStyle(Qt.Qt.SolidLine)
self.setPen(self.display_pen)
if len(self.points) > 4:
self._generate_tck_from_points()
else:
self.set_tck(None)
def _add_point(self, pos):
self.points.append((pos.x(), pos.y()))
if self._last_pos is None:
self.path.moveTo(pos)
else:
self.path.lineTo(pos)
self._last_pos = pos
self.setPath(self.path)
def _start_warp(self, x, y):
self._warp_start = numpy.array([x, y])
self._generate_points_from_tck()
self._warp_points = self.points
self._warp_distances = numpy.sqrt(((self._warp_start - self.points)**2).sum(axis=1))
self._warp_bandwidth = self._tck[0][-1] / self.bandwidth # tck[0][-1] is approximate spline length
def _warp_spline(self, x, y, bandwidth_factor):
end = numpy.array([x, y])
delta = end - self._warp_start
bandwidth = self._warp_bandwidth * bandwidth_factor
warp_coefficients = numpy.exp(-(self._warp_distances/bandwidth)**2)
displacements = numpy.outer(warp_coefficients, delta)
disp_sqdist = (displacements**2).sum(axis=1)
displacements[disp_sqdist < 4] = 0
self.points = self._warp_points + displacements
self._generate_tck_from_points()
def _start_perpendicular_warp(self, x, y):
self._perp_warp_start = y
self._generate_points_from_tck()
self._warp_points = self.points
px, py = interpolate.spline_interpolate(self._tck, num_points=len(self.points), derivative=1).T
perps = numpy.transpose([py, -px])
self._perps = perps / numpy.sqrt((perps**2).sum(axis=1))[:, numpy.newaxis]
self._perp_warp_positions = numpy.linspace(0, self._tck[0][-1], len(perps))
self._perp_warp_bandwidth = self._tck[0][-1] / self.bandwidth # tck[0][-1] is approximate spline length
def _warp_spline_perpendicular(self, x, y, bandwidth_factor):
displacement = y - self._perp_warp_start
bandwidth = self._perp_warp_bandwidth * bandwidth_factor
distances = self._perp_warp_positions - x
warp_coefficients = numpy.exp(-(distances/bandwidth)**2)
displacements = displacement * self._perps * warp_coefficients[:, numpy.newaxis]
disp_sqdist = (displacements**2).sum(axis=1)
displacements[disp_sqdist < 4] = 0
self.points = self._warp_points + displacements
self._generate_tck_from_points()
def _extend_endpoint(self, x, y):
new_end = numpy.array([x, y])
old_ends = self.points[[0,-1]]
dists = ((old_ends - new_end)**2).sum(axis=1)
if dists[0] < dists[1]:
cat_list = [[new_end], self.points]
else:
cat_list = [self.points, [new_end]]
self.points = numpy.concatenate(cat_list, axis=0)
self._generate_tck_from_points()
self._generate_points_from_tck()
def _reverse_spline(self):
t, c, k = self._tck
self.set_tck(interpolate.reverse_spline(tck), self.points[::-1])
def sceneEventFilter(self, watched, event):
if self.drawing and event.type() in {Qt.QEvent.GraphicsSceneMousePress, Qt.QEvent.GraphicsSceneMouseMove}:
pos = event.pos()
if self._last_pos is None or (pos.x() - self._last_pos.x())**2 + (pos.y() - self._last_pos.y())**2 > 36:
self._add_point(pos)
return True
elif self.drawing and event.type() == Qt.QEvent.GraphicsSceneMouseRelease:
self._stop_drawing()
return True
elif self._tck is not None and event.type() == Qt.QEvent.GraphicsSceneMousePress and event.modifiers() & Qt.Qt.ShiftModifier:
pos = event.pos()
self._extend_endpoint(pos.x(), pos.y())
return True
elif self._tck is None and event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_Shift:
if not self.drawing:
self._start_drawing()
else:
self._stop_drawing()
return True
elif self.shared_filter(event):
return True
return super().sceneEventFilter(watched, event)
def shared_filter(self, event):
if event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_S:
if event.modifiers() & Qt.Qt.ShiftModifier:
self.smoothing = min(self.smoothing * 2, 160) # 5 * 2**5
else:
self.smoothing = max(self.smoothing / 2, 0.625) # 5 / 2**2
return True
elif self._tck is not None and event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_R:
self._reverse_spline()
return True
elif self._tck is not None and event.type() == Qt.QEvent.KeyPress and event.key() == Qt.Qt.Key_D and event.modifiers() & Qt.Qt.ShiftModifier:
self.geometry = None
return True
return False
def mousePressEvent(self, event):
p = event.pos()
self._start_warp(p.x(), p.y())
def mouseMoveEvent(self, event):
bandwidth_factor = 1
if event.modifiers() & Qt.Qt.ShiftModifier:
bandwidth_factor = 2
p = event.pos()
self._warp_spline(p.x(), p.y(), bandwidth_factor)
class WarpedViewDragDetector(Qt.QGraphicsObject):
QGRAPHICSITEM_TYPE = shared_resources.generate_unique_qgraphicsitem_type()
def __init__(self, free_spline, warped_view):
self.free_spline = free_spline
layer_stack_item = warped_view.image_scene.layer_stack_item
super().__init__(layer_stack_item)
self.setFlag(Qt.QGraphicsItem.ItemHasNoContents)
layer_stack_item.installSceneEventFilter(self)
def boundingRect(self):
return Qt.QRectF()
def remove(self):
self.parentItem().removeSceneEventFilter(self)
def sceneEventFilter(self, watched, event):
if self.free_spline._tck is not None and event.type() == Qt.QEvent.GraphicsSceneMousePress:
p = event.pos()
self.free_spline._start_perpendicular_warp(p.x(), p.y())
return True
elif self.free_spline._tck is not None and event.type() == Qt.QEvent.GraphicsSceneMouseMove:
bandwidth_factor = 1
if event.modifiers() & Qt.Qt.ShiftModifier:
bandwidth_factor = 2
p = event.pos()
self.free_spline._warp_spline_perpendicular(p.x(), p.y(), bandwidth_factor)
return True
elif self.free_spline.shared_filter(event):
return True
return False | {
"content_hash": "b8a05010ab75afcc4ad8c1d9ddf5f324",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 149,
"avg_line_length": 39.898039215686275,
"alnum_prop": 0.6006487124041675,
"repo_name": "zpincus/RisWidget",
"id": "e18a3f0c937de96a5cc50d8525150a5c8af5a42e",
"size": "10252",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ris_widget/overlay/free_spline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "86651"
},
{
"name": "GLSL",
"bytes": "1607"
},
{
"name": "Python",
"bytes": "331706"
}
],
"symlink_target": ""
} |
from django.urls import path
from netbox.views.generic import ObjectChangeLogView, ObjectJournalView
from . import views
from .models import *
app_name = 'wireless'
urlpatterns = (
# Wireless LAN groups
path('wireless-lan-groups/', views.WirelessLANGroupListView.as_view(), name='wirelesslangroup_list'),
path('wireless-lan-groups/add/', views.WirelessLANGroupEditView.as_view(), name='wirelesslangroup_add'),
path('wireless-lan-groups/import/', views.WirelessLANGroupBulkImportView.as_view(), name='wirelesslangroup_import'),
path('wireless-lan-groups/edit/', views.WirelessLANGroupBulkEditView.as_view(), name='wirelesslangroup_bulk_edit'),
path('wireless-lan-groups/delete/', views.WirelessLANGroupBulkDeleteView.as_view(), name='wirelesslangroup_bulk_delete'),
path('wireless-lan-groups/<int:pk>/', views.WirelessLANGroupView.as_view(), name='wirelesslangroup'),
path('wireless-lan-groups/<int:pk>/edit/', views.WirelessLANGroupEditView.as_view(), name='wirelesslangroup_edit'),
path('wireless-lan-groups/<int:pk>/delete/', views.WirelessLANGroupDeleteView.as_view(), name='wirelesslangroup_delete'),
path('wireless-lan-groups/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='wirelesslangroup_changelog', kwargs={'model': WirelessLANGroup}),
# Wireless LANs
path('wireless-lans/', views.WirelessLANListView.as_view(), name='wirelesslan_list'),
path('wireless-lans/add/', views.WirelessLANEditView.as_view(), name='wirelesslan_add'),
path('wireless-lans/import/', views.WirelessLANBulkImportView.as_view(), name='wirelesslan_import'),
path('wireless-lans/edit/', views.WirelessLANBulkEditView.as_view(), name='wirelesslan_bulk_edit'),
path('wireless-lans/delete/', views.WirelessLANBulkDeleteView.as_view(), name='wirelesslan_bulk_delete'),
path('wireless-lans/<int:pk>/', views.WirelessLANView.as_view(), name='wirelesslan'),
path('wireless-lans/<int:pk>/edit/', views.WirelessLANEditView.as_view(), name='wirelesslan_edit'),
path('wireless-lans/<int:pk>/delete/', views.WirelessLANDeleteView.as_view(), name='wirelesslan_delete'),
path('wireless-lans/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='wirelesslan_changelog', kwargs={'model': WirelessLAN}),
path('wireless-lans/<int:pk>/journal/', ObjectJournalView.as_view(), name='wirelesslan_journal', kwargs={'model': WirelessLAN}),
# Wireless links
path('wireless-links/', views.WirelessLinkListView.as_view(), name='wirelesslink_list'),
path('wireless-links/add/', views.WirelessLinkEditView.as_view(), name='wirelesslink_add'),
path('wireless-links/import/', views.WirelessLinkBulkImportView.as_view(), name='wirelesslink_import'),
path('wireless-links/edit/', views.WirelessLinkBulkEditView.as_view(), name='wirelesslink_bulk_edit'),
path('wireless-links/delete/', views.WirelessLinkBulkDeleteView.as_view(), name='wirelesslink_bulk_delete'),
path('wireless-links/<int:pk>/', views.WirelessLinkView.as_view(), name='wirelesslink'),
path('wireless-links/<int:pk>/edit/', views.WirelessLinkEditView.as_view(), name='wirelesslink_edit'),
path('wireless-links/<int:pk>/delete/', views.WirelessLinkDeleteView.as_view(), name='wirelesslink_delete'),
path('wireless-links/<int:pk>/changelog/', ObjectChangeLogView.as_view(), name='wirelesslink_changelog', kwargs={'model': WirelessLink}),
path('wireless-links/<int:pk>/journal/', ObjectJournalView.as_view(), name='wirelesslink_journal', kwargs={'model': WirelessLink}),
)
| {
"content_hash": "5dfb47c698ae1fa740d4fe5b2f47d18f",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 154,
"avg_line_length": 78.46666666666667,
"alnum_prop": 0.7380345511186632,
"repo_name": "digitalocean/netbox",
"id": "cef96fd5e3c084399f8c6b0debe037e8345c2a15",
"size": "3531",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/wireless/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import os
import unittest
import sys
import signal
import time
from plumbum import local, LocalPath, FG, BG, ERROUT
from plumbum.lib import six
from plumbum import CommandNotFound, ProcessExecutionError, ProcessTimedOut
from plumbum.fs.atomic import AtomicFile, AtomicCounterFile, PidFile
from plumbum.path import RelativePath
if not hasattr(unittest, "skipIf"):
import logging
import functools
def skipIf(cond, msg = None):
def deco(func):
if cond:
return func
else:
@functools.wraps(func)
def wrapper(*args, **kwargs):
logging.warn("skipping test")
return wrapper
return deco
unittest.skipIf = skipIf
class LocalPathTest(unittest.TestCase):
def test_basename(self):
name = LocalPath("/some/long/path/to/file.txt").basename
self.assertTrue(isinstance(name, six.string_types))
self.assertEqual("file.txt", str(name))
def test_dirname(self):
name = LocalPath("/some/long/path/to/file.txt").dirname
self.assertTrue(isinstance(name, LocalPath))
self.assertEqual("/some/long/path/to", str(name).replace("\\", "/"))
@unittest.skipIf(not hasattr(os, "chown"), "os.chown not supported")
def test_chown(self):
with local.tempdir() as dir:
p = dir / "foo.txt"
p.write(six.b("hello"))
self.assertEqual(p.uid, os.getuid())
self.assertEqual(p.gid, os.getgid())
p.chown(p.uid.name)
self.assertEqual(p.uid, os.getuid())
def test_split(self):
p = local.path("/var/log/messages")
self.assertEqual(p.split(), ["var", "log", "messages"])
def test_relative_to(self):
p = local.path("/var/log/messages")
self.assertEqual(p.relative_to("/var/log/messages"), RelativePath([]))
self.assertEqual(p.relative_to("/var/"), RelativePath(["log", "messages"]))
self.assertEqual(p.relative_to("/"), RelativePath(["var", "log", "messages"]))
self.assertEqual(p.relative_to("/var/tmp"), RelativePath(["..", "log", "messages"]))
self.assertEqual(p.relative_to("/opt"), RelativePath(["..", "var", "log", "messages"]))
self.assertEqual(p.relative_to("/opt/lib"), RelativePath(["..", "..", "var", "log", "messages"]))
for src in [local.path("/var/log/messages"), local.path("/var"), local.path("/opt/lib")]:
delta = p.relative_to(src)
self.assertEqual(src + delta, p)
def test_read_write(self):
with local.tempdir() as dir:
f = dir / "test.txt"
text = six.b('hello world\xd7\xa9\xd7\x9c\xd7\x95\xd7\x9d').decode("utf8")
f.write(text, "utf8")
text2 = f.read("utf8")
self.assertEqual(text, text2)
class LocalMachineTest(unittest.TestCase):
def test_getattr(self):
import plumbum
self.assertEqual(getattr(plumbum.cmd, 'does_not_exist', 1), 1)
def test_imports(self):
from plumbum.cmd import ls
self.assertTrue("test_local.py" in local["ls"]().splitlines())
self.assertTrue("test_local.py" in ls().splitlines())
self.assertRaises(CommandNotFound, lambda: local["non_exist1N9"])
try:
from plumbum.cmd import non_exist1N9 #@UnresolvedImport @UnusedImport
except (ImportError, CommandNotFound):
pass
else:
self.fail("from plumbum.cmd import non_exist1N9")
def test_cwd(self):
from plumbum.cmd import ls
self.assertEqual(local.cwd, os.getcwd())
self.assertTrue("__init__.py" not in ls().splitlines())
with local.cwd("../plumbum"):
self.assertTrue("__init__.py" in ls().splitlines())
self.assertTrue("__init__.py" not in ls().splitlines())
self.assertRaises(OSError, local.cwd.chdir, "../non_exist1N9")
def test_path(self):
self.assertFalse((local.cwd / "../non_exist1N9").exists())
self.assertTrue((local.cwd / ".." / "plumbum").isdir())
# traversal
found = False
for fn in local.cwd / ".." / "plumbum":
if fn.basename == "__init__.py":
self.assertTrue(fn.isfile())
found = True
self.assertTrue(found)
# glob'ing
found = False
for fn in local.cwd / ".." // "*/*.rst":
if fn.basename == "index.rst":
found = True
self.assertTrue(found)
def test_env(self):
self.assertTrue("PATH" in local.env)
self.assertFalse("FOOBAR72" in local.env)
self.assertRaises(ProcessExecutionError, local.python, "-c", "import os;os.environ['FOOBAR72']")
local.env["FOOBAR72"] = "spAm"
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR72'])").splitlines(), ["spAm"])
with local.env(FOOBAR73 = 1889):
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR73'])").splitlines(), ["1889"])
with local.env(FOOBAR73 = 1778):
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR73'])").splitlines(), ["1778"])
self.assertEqual(local.python("-c", "import os;print (os.environ['FOOBAR73'])").splitlines(), ["1889"])
self.assertRaises(ProcessExecutionError, local.python, "-c", "import os;os.environ['FOOBAR73']")
# path manipulation
self.assertRaises(CommandNotFound, local.which, "dummy-executable")
with local.env():
local.env.path.insert(0, local.cwd / "not-in-path")
p = local.which("dummy-executable")
self.assertEqual(p, local.cwd / "not-in-path" / "dummy-executable")
def test_local(self):
self.assertTrue("plumbum" in str(local.cwd))
self.assertTrue("PATH" in local.env.getdict())
self.assertEqual(local.path("foo"), os.path.join(os.getcwd(), "foo"))
local.which("ls")
local["ls"]
self.assertEqual(local.python("-c", "print ('hi there')").splitlines(), ["hi there"])
def test_piping(self):
from plumbum.cmd import ls, grep
chain = ls | grep["\\.py"]
self.assertTrue("test_local.py" in chain().splitlines())
chain = (ls["-a"] | grep["test"] | grep["local"])
self.assertTrue("test_local.py" in chain().splitlines())
def test_redirection(self):
from plumbum.cmd import cat, ls, grep, rm
chain = (ls | grep["\\.py"]) > "tmp.txt"
chain()
chain2 = (cat < "tmp.txt") | grep["local"]
self.assertTrue("test_local.py" in chain2().splitlines())
rm("tmp.txt")
chain3 = (cat << "this is the\nworld of helloness and\nspam bar and eggs") | grep["hello"]
self.assertTrue("world of helloness and" in chain3().splitlines())
rc, _, err = (grep["-Zq5"] >= "tmp2.txt").run(["-Zq5"], retcode = None)
self.assertEqual(rc, 2)
self.assertFalse(err)
self.assertTrue("Usage" in (cat < "tmp2.txt")())
rm("tmp2.txt")
rc, out, _ = (grep["-Zq5"] >= ERROUT).run(["-Zq5"], retcode = None)
self.assertEqual(rc, 2)
self.assertTrue("Usage" in out)
def test_popen(self):
from plumbum.cmd import ls
p = ls.popen(["-a"])
out, _ = p.communicate()
self.assertEqual(p.returncode, 0)
self.assertTrue("test_local.py" in out.decode(local.encoding).splitlines())
def test_run(self):
from plumbum.cmd import ls, grep
rc, out, err = (ls | grep["non_exist1N9"]).run(retcode = 1)
self.assertEqual(rc, 1)
def test_timeout(self):
from plumbum.cmd import sleep
self.assertRaises(ProcessTimedOut, sleep, 10, timeout = 5)
def test_iter_lines_timeout(self):
from plumbum.cmd import ping
try:
for i, (out, err) in enumerate(ping["127.0.0.1", "-i", 0.5].popen().iter_lines(timeout=2)):
print("out:", out)
print("err:", err)
except ProcessTimedOut:
self.assertTrue(i > 3)
else:
self.fail("Expected a timeout")
def test_iter_lines_error(self):
from plumbum.cmd import ls
try:
for i, lines in enumerate(ls["--bla"].popen()):
pass
self.assertEqual(i, 1)
except ProcessExecutionError:
ex = sys.exc_info()[1]
self.assertTrue(ex.stderr.startswith("/bin/ls: unrecognized option '--bla'"))
else:
self.fail("Expected an execution error")
def test_modifiers(self):
from plumbum.cmd import ls, grep
f = (ls["-a"] | grep["\\.py"]) & BG
f.wait()
self.assertTrue("test_local.py" in f.stdout.splitlines())
(ls["-a"] | grep["local"]) & FG
def test_arg_expansion(self):
from plumbum.cmd import ls
args = [ '-l', '-F' ]
ls(*args)
ls[args]
def test_session(self):
sh = local.session()
for _ in range(4):
_, out, _ = sh.run("ls -a")
self.assertTrue("test_local.py" in out.splitlines())
sh.run("cd ..")
sh.run("export FOO=17")
out = sh.run("echo $FOO")[1]
self.assertEqual(out.splitlines(), ["17"])
def test_quoting(self):
ssh = local["ssh"]
pwd = local["pwd"]
cmd = ssh["localhost", "cd", "/usr", "&&", ssh["localhost", "cd", "/", "&&",
ssh["localhost", "cd", "/bin", "&&", pwd]]]
self.assertTrue("\"'&&'\"" in " ".join(cmd.formulate(0)))
ls = local['ls']
try:
ls('-a', '') # check that empty strings are rendered correctly
except ProcessExecutionError:
ex = sys.exc_info()[1]
self.assertEqual(ex.argv[-2:], ['-a', ''])
else:
self.fail("Expected `ls` to fail")
def test_tempdir(self):
from plumbum.cmd import cat
with local.tempdir() as dir:
self.assertTrue(dir.isdir())
data = six.b("hello world")
with open(str(dir / "test.txt"), "wb") as f:
f.write(data)
with open(str(dir / "test.txt"), "rb") as f:
self.assertEqual(f.read(), data)
self.assertFalse(dir.exists())
def test_read_write(self):
with local.tempdir() as tmp:
data = six.b("hello world")
(tmp / "foo.txt").write(data)
self.assertEqual((tmp / "foo.txt").read(), data)
def test_links(self):
with local.tempdir() as tmp:
src = tmp / "foo.txt"
dst1 = tmp / "bar.txt"
dst2 = tmp / "spam.txt"
data = six.b("hello world")
src.write(data)
src.link(dst1)
self.assertEqual(data, dst1.read())
src.symlink(dst2)
self.assertEqual(data, dst2.read())
def test_as_user(self):
with local.as_root():
local["date"]()
def test_list_processes(self):
self.assertTrue(list(local.list_processes()))
def test_pgrep(self):
self.assertTrue(list(local.pgrep("python")))
def _generate_sigint(self):
try:
if sys.platform == "win32":
from win32api import GenerateConsoleCtrlEvent
GenerateConsoleCtrlEvent(0, 0) # send Ctrl+C to current TTY
else:
os.kill(0, signal.SIGINT)
time.sleep(1)
except KeyboardInterrupt:
pass
else:
self.fail("Expected KeyboardInterrupt")
@unittest.skipIf(not sys.stdin.isatty(), "Not a TTY")
def test_same_sesion(self):
from plumbum.cmd import sleep
p = sleep.popen([1000])
self.assertIs(p.poll(), None)
self._generate_sigint()
time.sleep(1)
self.assertIsNot(p.poll(), None)
@unittest.skipIf(not sys.stdin.isatty(), "Not a TTY")
def test_new_session(self):
from plumbum.cmd import sleep
p = sleep.popen([1000], new_session = True)
self.assertIs(p.poll(), None)
self._generate_sigint()
time.sleep(1)
self.assertIs(p.poll(), None)
p.terminate()
def test_local_daemon(self):
from plumbum.cmd import sleep
proc = local.daemonic_popen(sleep[5])
try:
os.waitpid(proc.pid, 0)
except OSError:
pass
else:
self.fail("I shouldn't have any children by now -- they are daemons!")
proc.wait()
def test_atomic_file(self):
af1 = AtomicFile("tmp.txt")
af2 = AtomicFile("tmp.txt")
af1.write_atomic(six.b("foo"))
af2.write_atomic(six.b("bar"))
self.assertEqual(af1.read_atomic(), six.b("bar"))
self.assertEqual(af2.read_atomic(), six.b("bar"))
local.path("tmp.txt").delete()
def test_atomic_file2(self):
af = AtomicFile("tmp.txt")
code = """from __future__ import with_statement
from plumbum.fs.atomic import AtomicFile
af = AtomicFile("tmp.txt")
try:
with af.locked(blocking = False):
raise ValueError("this should have failed")
except (OSError, IOError):
print("already locked")
"""
with af.locked():
output = local.python("-c", code)
self.assertEqual(output.strip(), "already locked")
local.path("tmp.txt").delete()
def test_pid_file(self):
code = """from __future__ import with_statement
from plumbum.fs.atomic import PidFile, PidFileTaken
try:
with PidFile("mypid"):
raise ValueError("this should have failed")
except PidFileTaken:
print("already locked")
"""
with PidFile("mypid"):
output = local.python("-c", code)
self.assertEqual(output.strip(), "already locked")
local.path("mypid").delete()
def test_atomic_counter(self):
local.path("counter").delete()
num_of_procs = 20
num_of_increments = 20
code = """from plumbum.fs.atomic import AtomicCounterFile
import time
time.sleep(0.2)
afc = AtomicCounterFile.open("counter")
for _ in range(%s):
print(afc.next())
time.sleep(0.1)
""" % (num_of_increments,)
procs = []
for _ in range(num_of_procs):
procs.append(local.python["-c", code].popen())
results = []
for p in procs:
out, _ = p.communicate()
self.assertEqual(p.returncode, 0)
results.extend(int(num) for num in out.splitlines())
self.assertEqual(len(results), num_of_procs * num_of_increments)
self.assertEqual(len(set(results)), len(results))
self.assertEqual(min(results), 0)
self.assertEqual(max(results), num_of_procs * num_of_increments - 1)
local.path("counter").delete()
def test_atomic_counter2(self):
local.path("counter").delete()
afc = AtomicCounterFile.open("counter")
self.assertEqual(afc.next(), 0)
self.assertEqual(afc.next(), 1)
self.assertEqual(afc.next(), 2)
self.assertRaises(TypeError, afc.reset, "hello")
afc.reset(70)
self.assertEqual(afc.next(), 70)
self.assertEqual(afc.next(), 71)
self.assertEqual(afc.next(), 72)
local.path("counter").delete()
def test_bound_env(self):
try:
from plumbum.cmd import printenv
except CommandNotFound:
self.skipTest("printenv is missing")
with local.env(FOO = "hello"):
self.assertEqual(printenv.with_env(BAR = "world")("FOO", "BAR"), "hello\nworld\n")
self.assertEqual(printenv.with_env(FOO = "sea", BAR = "world")("FOO", "BAR"), "sea\nworld\n")
def test_nesting_lists_as_argv(self):
from plumbum.cmd import ls
c = ls["-l", ["-a", "*.py"]]
self.assertEqual(c.formulate()[1:], ['-l', '-a', '*.py'])
def test_contains(self):
self.assertTrue("ls" in local, "Expected to find `ls`")
def test_issue_139(self):
LocalPath(local.cwd)
def test_pipeline_failure(self):
from plumbum.cmd import ls, head
self.assertRaises(ProcessExecutionError, (ls["--no-such-option"] | head))
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "7ae4c6cb2535e59252ef7e6f629a7af6",
"timestamp": "",
"source": "github",
"line_count": 462,
"max_line_length": 119,
"avg_line_length": 35.34848484848485,
"alnum_prop": 0.5684281427959096,
"repo_name": "siemens/plumbum",
"id": "00a6ccec5185dddfc019fe3aa06f5825ec969003",
"size": "16331",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_local.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255189"
},
{
"name": "Shell",
"bytes": "9"
}
],
"symlink_target": ""
} |
"""
Broadcast radiation levels to Twitter.
Require the twython packages:
https://github.com/ryanmcgrath/twython
You'll also need a Twitter application credentials:
https://apps.twitter.com
Released under MIT License. See LICENSE file.
By Yoan Tournade <yoan@ytotech.com>
"""
from PiPocketGeiger import RadiationWatch
import time
from twython import Twython
# Twitter application credentials.
APP_KEY = "your_app_key"
APP_SECRET = "your_app_secret"
OAUTH_TOKEN = "your_app_oauth_token"
OAUTH_TOKEN_SECRET = "your_app_oauth_token_secret"
# Your place name or exact location. Keep it short or it won't fit the tweet!
MY_PLACE = "(37 Rue de Rennes, Paris, France)"
# Period for twitting, in seconds.
TWITTING_PERIOD = 120
if __name__ == "__main__":
print("Twitting each {0} seconds.".format(TWITTING_PERIOD))
twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET)
with RadiationWatch(24, 23) as radiationWatch:
while 1:
time.sleep(TWITTING_PERIOD)
try:
readings = radiationWatch.status()
print("Twitting... {0}.".format(readings))
twitter.update_status(
status="Radiation in my house {0}: "
"{1} uSv/h +/- {2} -- {3} CPM @radiation_watch".format(
MY_PLACE,
readings["uSvh"],
readings["uSvhError"],
readings["cpm"],
)
)
print("Ok.")
except Exception as e:
print(e)
| {
"content_hash": "246362822f5d5426f4210ce35e0cf02e",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 77,
"avg_line_length": 32.6530612244898,
"alnum_prop": 0.58875,
"repo_name": "MonsieurV/PiPocketGeiger",
"id": "78da5a3dab24366815e073d00b9c7ba84981fece",
"size": "1646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/twitter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "398"
},
{
"name": "Python",
"bytes": "8026"
}
],
"symlink_target": ""
} |
"""
Test suite for the docx.parts.styles module
"""
from __future__ import absolute_import, print_function, unicode_literals
import pytest
from docx.opc.constants import CONTENT_TYPE as CT, RELATIONSHIP_TYPE as RT
from docx.opc.package import PartFactory
from docx.opc.packuri import PackURI
from docx.oxml.parts.styles import CT_Styles
from docx.package import Package
from docx.parts.styles import StylesPart, _Styles
from ..oxml.unitdata.styles import a_style, a_styles
from ..unitutil import (
function_mock, class_mock, initializer_mock, instance_mock, method_mock
)
class DescribeStylesPart(object):
def it_is_used_by_PartFactory_to_construct_styles_part(
self, load_fixture):
# fixture ----------------------
styles_part_load_, partname_, blob_, package_, styles_part_ = (
load_fixture
)
content_type, reltype = CT.WML_STYLES, RT.STYLES
# exercise ---------------------
part = PartFactory(partname_, content_type, reltype, blob_, package_)
# verify -----------------------
styles_part_load_.assert_called_once_with(
partname_, content_type, blob_, package_
)
assert part is styles_part_
def it_can_be_constructed_by_opc_part_factory(self, construct_fixture):
(partname_, content_type_, blob_, package_, oxml_fromstring_,
init__, styles_elm_) = construct_fixture
# exercise ---------------------
styles_part = StylesPart.load(
partname_, content_type_, blob_, package_
)
# verify -----------------------
oxml_fromstring_.assert_called_once_with(blob_)
init__.assert_called_once_with(
partname_, content_type_, styles_elm_, package_
)
assert isinstance(styles_part, StylesPart)
def it_provides_access_to_the_styles(self, styles_fixture):
styles_part, _Styles_, styles_elm_, styles_ = styles_fixture
styles = styles_part.styles
_Styles_.assert_called_once_with(styles_elm_)
assert styles is styles_
# fixtures -------------------------------------------------------
@pytest.fixture
def blob_(self, request):
return instance_mock(request, bytes)
@pytest.fixture
def construct_fixture(
self, partname_, content_type_, blob_, package_,
oxml_fromstring_, init__, styles_elm_):
return (
partname_, content_type_, blob_, package_, oxml_fromstring_,
init__, styles_elm_
)
@pytest.fixture
def content_type_(self, request):
return instance_mock(request, str)
@pytest.fixture
def init__(self, request):
return initializer_mock(request, StylesPart)
@pytest.fixture
def load_fixture(
self, styles_part_load_, partname_, blob_, package_,
styles_part_):
styles_part_load_.return_value = styles_part_
return (
styles_part_load_, partname_, blob_, package_, styles_part_
)
@pytest.fixture
def oxml_fromstring_(self, request, styles_elm_):
return function_mock(
request, 'docx.parts.styles.oxml_fromstring',
return_value=styles_elm_
)
@pytest.fixture
def package_(self, request):
return instance_mock(request, Package)
@pytest.fixture
def partname_(self, request):
return instance_mock(request, PackURI)
@pytest.fixture
def _Styles_(self, request, styles_):
return class_mock(
request, 'docx.parts.styles._Styles', return_value=styles_
)
@pytest.fixture
def styles_(self, request):
return instance_mock(request, _Styles)
@pytest.fixture
def styles_elm_(self, request):
return instance_mock(request, CT_Styles)
@pytest.fixture
def styles_fixture(self, _Styles_, styles_elm_, styles_):
styles_part = StylesPart(None, None, styles_elm_, None)
return styles_part, _Styles_, styles_elm_, styles_
@pytest.fixture
def styles_part_(self, request):
return instance_mock(request, StylesPart)
@pytest.fixture
def styles_part_load_(self, request):
return method_mock(request, StylesPart, 'load')
class Describe_Styles(object):
def it_knows_how_many_styles_it_contains(self, len_fixture):
styles, style_count = len_fixture
assert len(styles) == style_count
# fixtures -------------------------------------------------------
@pytest.fixture(params=[0, 1, 2, 3])
def len_fixture(self, request):
style_count = request.param
styles_bldr = a_styles().with_nsdecls()
for idx in range(style_count):
styles_bldr.with_child(a_style())
styles_elm = styles_bldr.element
styles = _Styles(styles_elm)
return styles, style_count
| {
"content_hash": "2de5d61e173e548436693449d12cc4a0",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 77,
"avg_line_length": 32.5,
"alnum_prop": 0.6006153846153847,
"repo_name": "sk1tt1sh/python-docx",
"id": "226fb2bdafb7a40279c944e86989a89331c74fa3",
"size": "4894",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/parts/test_styles.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "551267"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bughouse', '0005_auto_20150211_2025'),
]
operations = [
migrations.AddField(
model_name='playerrating',
name='key',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='teamrating',
name='key',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
| {
"content_hash": "9ba064ed4807b210e3bb96054ef606e6",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 63,
"avg_line_length": 25.2,
"alnum_prop": 0.5650793650793651,
"repo_name": "simpleenergy/bughouse-ranking",
"id": "6a8635df50765023997de3bd62590656ed3c2225",
"size": "654",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bughouse/migrations/0006_auto_20150211_2025.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3692"
},
{
"name": "HTML",
"bytes": "7840"
},
{
"name": "Handlebars",
"bytes": "8084"
},
{
"name": "JavaScript",
"bytes": "397536"
},
{
"name": "Makefile",
"bytes": "775"
},
{
"name": "Python",
"bytes": "72815"
},
{
"name": "Shell",
"bytes": "1682"
}
],
"symlink_target": ""
} |
import ciscoconfparse
from ciscoconfparse import CiscoConfParse
cisco_cfg = CiscoConfParse("cisco.txt")
cisco_cfg
'''
intf = cisco_cfg.find_objects(r"^interface")
for i in intf:
print intf
fa4 = intf[4]
fa4
fa4.children
'''
#search for 88 crypto map CRYPTO
#crypto_map = cisco_cfg
crypto_map = cisco_cfg.find_objects(r"^crypto map CRYPTO ")
crypto_map
for i in crypto_map:
print i.text
#now print out the children
'''
for child in crypto_map.all_children:
print child.text
'''
for c_map in crypto_map:
print
print c_map.text
for child in c_map.children:
print child.text
print
| {
"content_hash": "494e61cdb66d96278e2a81e2c12ef24d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 59,
"avg_line_length": 16.289473684210527,
"alnum_prop": 0.7011308562197092,
"repo_name": "tigerrabbit/pynet_gdw",
"id": "b9a268423ccd1915e5238e81177a01b1bd9c1c5a",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "week1/confparse.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3463"
}
],
"symlink_target": ""
} |
import statistics
import sys
import time
import pyperf
from pyperf._formatter import (format_number, format_value, format_values,
format_timedelta)
from pyperf._utils import MS_WINDOWS, percentile, median_abs_dev
MAX_LOOPS = 2 ** 32
# Parameters to calibrate and recalibrate warmups
MAX_WARMUP_VALUES = 300
WARMUP_SAMPLE_SIZE = 20
class WorkerTask:
def __init__(self, runner, name, task_func, func_metadata):
args = runner.args
name = name.strip()
if not name:
raise ValueError("benchmark name must be a non-empty string")
self.name = name
self.args = args
self.task_func = task_func
self.loops = args.loops
self.metadata = dict(runner.metadata)
if func_metadata:
self.metadata.update(func_metadata)
if 'unit' not in self.metadata:
# Set default unit to seconds
self.metadata['unit'] = 'second'
self.inner_loops = None
self.warmups = None
self.values = ()
def _compute_values(self, values, nvalue,
is_warmup=False,
calibrate_loops=False,
start=0):
unit = self.metadata.get('unit')
args = self.args
if nvalue < 1:
raise ValueError("nvalue must be >= 1")
if self.loops <= 0:
raise ValueError("loops must be >= 1")
if is_warmup:
value_name = 'Warmup'
else:
value_name = 'Value'
index = 1
inner_loops = self.inner_loops
if not inner_loops:
inner_loops = 1
while True:
if index > nvalue:
break
raw_value = self.task_func(self, self.loops)
raw_value = float(raw_value)
value = raw_value / (self.loops * inner_loops)
if not value and not calibrate_loops:
raise ValueError("benchmark function returned zero")
if is_warmup:
values.append((self.loops, value))
else:
values.append(value)
if args.verbose:
text = format_value(unit, value)
if is_warmup:
text = ('%s (loops: %s, raw: %s)'
% (text,
format_number(self.loops),
format_value(unit, raw_value)))
print("%s %s: %s" % (value_name, start + index, text))
if calibrate_loops and raw_value < args.min_time:
if self.loops * 2 > MAX_LOOPS:
print("ERROR: failed to calibrate the number of loops")
print("Raw timing %s with %s is still smaller than "
"the minimum time of %s"
% (format_value(unit, raw_value),
format_number(self.loops, 'loop'),
format_timedelta(args.min_time)))
sys.exit(1)
self.loops *= 2
# need more values for the calibration
nvalue += 1
index += 1
def collect_metadata(self):
from pyperf._collect_metadata import collect_metadata
return collect_metadata(process=False)
def test_calibrate_warmups(self, nwarmup, unit):
half = nwarmup + (len(self.warmups) - nwarmup) // 2
sample1 = [value for loops, value in self.warmups[nwarmup:half]]
sample2 = [value for loops, value in self.warmups[half:]]
first_value = sample1[0]
# test if the first value is an outlier
values = sample1[1:] + sample2
q1 = percentile(values, 0.25)
q3 = percentile(values, 0.75)
iqr = q3 - q1
outlier_max = (q3 + 1.5 * iqr)
# only check maximum, not minimum
outlier = not(first_value <= outlier_max)
mean1 = statistics.mean(sample1)
mean2 = statistics.mean(sample2)
mean_diff = (mean1 - mean2) / float(mean2)
s1_q1 = percentile(sample1, 0.25)
s2_q1 = percentile(sample2, 0.25)
s1_q3 = percentile(sample1, 0.75)
s2_q3 = percentile(sample2, 0.75)
q1_diff = (s1_q1 - s2_q1) / float(s2_q1)
q3_diff = (s1_q3 - s2_q3) / float(s2_q3)
mad1 = median_abs_dev(sample1)
mad2 = median_abs_dev(sample2)
# FIXME: handle division by zero
mad_diff = (mad1 - mad2) / float(mad2)
if self.args.verbose:
stdev1 = statistics.stdev(sample1)
stdev2 = statistics.stdev(sample2)
stdev_diff = (stdev1 - stdev2) / float(stdev2)
sample1_str = format_values(unit, (s1_q1, mean1, s1_q3, stdev1, mad1))
sample2_str = format_values(unit, (s2_q1, mean2, s2_q3, stdev2, mad2))
print("Calibration: warmups=%s" % format_number(nwarmup))
print(" first value: %s, outlier? %s (max: %s)"
% (format_value(unit, first_value), outlier,
format_value(unit, outlier_max)))
print(" sample1(%s): Q1=%s mean=%s Q3=%s stdev=%s MAD=%s"
% (len(sample1),
sample1_str[0],
sample1_str[1],
sample1_str[2],
sample1_str[3],
sample1_str[4]))
print(" sample2(%s): Q1=%s mean=%s Q3=%s stdev=%s MAD=%s"
% (len(sample2),
sample2_str[0],
sample2_str[1],
sample2_str[2],
sample2_str[3],
sample2_str[4]))
print(" diff: Q1=%+.0f%% mean=%+.0f%% Q3=%+.0f%% stdev=%+.0f%% MAD=%+.0f%%"
% (q1_diff * 100,
mean_diff * 100,
q3_diff * 100,
stdev_diff * 100,
mad_diff * 100))
if outlier:
return False
if not(-0.5 <= mean_diff <= 0.10):
return False
if abs(mad_diff) > 0.10:
return False
if abs(q1_diff) > 0.05:
return False
if abs(q3_diff) > 0.05:
return False
return True
def calibrate_warmups(self):
# calibrate the number of warmups
if self.loops < 1:
raise ValueError("loops must be >= 1")
if self.args.recalibrate_warmups:
nwarmup = self.args.warmups
else:
nwarmup = 1
unit = self.metadata.get('unit')
start = 0
# test_calibrate_warmups() requires at least 2 values per sample
while True:
total = nwarmup + WARMUP_SAMPLE_SIZE * 2
nvalue = total - len(self.warmups)
if nvalue:
self._compute_values(self.warmups, nvalue,
is_warmup=True,
start=start)
start += nvalue
if self.test_calibrate_warmups(nwarmup, unit):
break
if len(self.warmups) >= MAX_WARMUP_VALUES:
print("ERROR: failed to calibrate the number of warmups")
values = [format_value(unit, value)
for loops, value in self.warmups]
print("Values (%s): %s" % (len(values), ', '.join(values)))
sys.exit(1)
nwarmup += 1
if self.args.verbose:
print("Calibration: use %s warmups" % format_number(nwarmup))
print()
if self.args.recalibrate_warmups:
self.metadata['recalibrate_warmups'] = nwarmup
else:
self.metadata['calibrate_warmups'] = nwarmup
def calibrate_loops(self):
args = self.args
if not args.recalibrate_loops:
self.loops = 1
if args.warmups is not None:
nvalue = args.warmups
else:
nvalue = 1
nvalue += args.values
self._compute_values(self.warmups, nvalue,
is_warmup=True,
calibrate_loops=True)
if args.verbose:
print()
print("Calibration: use %s loops" % format_number(self.loops))
print()
if args.recalibrate_loops:
self.metadata['recalibrate_loops'] = self.loops
else:
self.metadata['calibrate_loops'] = self.loops
def compute_warmups_values(self):
args = self.args
if args.warmups:
self._compute_values(self.warmups, args.warmups, is_warmup=True)
if args.verbose:
print()
self._compute_values(self.values, args.values)
if args.verbose:
print()
def compute(self):
args = self.args
self.metadata['name'] = self.name
if self.inner_loops is not None:
self.metadata['inner_loops'] = self.inner_loops
self.warmups = []
self.values = []
if args.calibrate_warmups or args.recalibrate_warmups:
self.calibrate_warmups()
elif args.calibrate_loops or args.recalibrate_loops:
self.calibrate_loops()
else:
self.compute_warmups_values()
# collect metatadata
metadata2 = self.collect_metadata()
metadata2.update(self.metadata)
self.metadata = metadata2
self.metadata['loops'] = self.loops
def create_run(self):
start_time = time.monotonic()
self.compute()
self.metadata['duration'] = time.monotonic() - start_time
return pyperf.Run(self.values,
warmups=self.warmups,
metadata=self.metadata,
collect_metadata=False)
def _set_memory_value(self, value):
is_calibration = (not self.values)
self.metadata['unit'] = 'byte'
self.metadata['warmups'] = len(self.warmups)
self.metadata['values'] = len(self.values)
if is_calibration:
values = ((self.loops, value),)
self.warmups = values
self.values = ()
else:
self.warmups = None
self.values = (value,)
class WorkerProcessTask(WorkerTask):
def compute(self):
args = self.args
if args.track_memory:
if MS_WINDOWS:
from pyperf._win_memory import get_peak_pagefile_usage
else:
from pyperf._memory import PeakMemoryUsageThread
mem_thread = PeakMemoryUsageThread()
mem_thread.start()
if args.tracemalloc:
import tracemalloc
tracemalloc.start()
WorkerTask.compute(self)
if args.tracemalloc:
traced_peak = tracemalloc.get_traced_memory()[1]
tracemalloc.stop()
if not traced_peak:
raise RuntimeError("tracemalloc didn't trace any Python "
"memory allocation")
# drop timings, replace them with the memory peak
self._set_memory_value(traced_peak)
if args.track_memory:
if MS_WINDOWS:
mem_peak = get_peak_pagefile_usage()
else:
mem_thread.stop()
mem_peak = mem_thread.peak_usage
if not mem_peak:
raise RuntimeError("failed to get the memory peak usage")
# drop timings, replace them with the memory peak
self._set_memory_value(mem_peak)
def collect_metadata(self):
from pyperf._collect_metadata import collect_metadata
return collect_metadata()
| {
"content_hash": "dd4a89bfe8fab2e6e2e076a63bcd35cd",
"timestamp": "",
"source": "github",
"line_count": 350,
"max_line_length": 88,
"avg_line_length": 33.58571428571429,
"alnum_prop": 0.5152700978307103,
"repo_name": "haypo/perf",
"id": "2ecba68eee1b715bc24747afd1ad56635b99ae68",
"size": "11755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyperf/_worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317929"
}
],
"symlink_target": ""
} |
import sublime, sublime_plugin
import threading
import sys
import plistlib
# import urllib
#插件配置
SETTING_FILE = "PUIEditor.sublime-settings"
#UI资源路径
CONFIG_PATH = ""
#加载后的配置数据
SETTINGS = {}
# UI同步接口
UI_SERVER_URL = ""
# Confing Command
class PuiConfigCommand(sublime_plugin.WindowCommand):
def run(self):
global SETTINGS
global CONFIG_PATH
SETTINGS = sublime.load_settings(SETTING_FILE)
CONFIG_PATH = SETTINGS.get("UIResPath")
# print("configPath:" + CONFIG_PATH)
self.window.show_input_panel(u"设置 UI 资源路径:", CONFIG_PATH, self.onConfigDone, None, self.onConfigCancel)
def onConfigDone(self, data):
# print(self.window.project_data())
global SETTINGS
global CONFIG_PATH
CONFIG_PATH = data
if CONFIG_PATH == "":
sublime.error_message(u"需设置 UI 资源路径!")
return
isOk = sublime.ok_cancel_dialog(u"是否将 UI 资源路径设置为: " + CONFIG_PATH + " ?")
if not isOk:
self.window.show_input_panel(u"重新设置 UI 资源路径:", "", self.onConfigDone, None, self.onConfigCancel)
else:
# print(CONFIG_PATH)
SETTINGS.set("UIResPath", CONFIG_PATH)
#保存配置 在user目录下的 PUIEditor.sublime-settings文件
sublime.save_settings(SETTING_FILE)
projectData = self.window.project_data()
folders = projectData['folders']
folders.append({"path": CONFIG_PATH})
self.window.set_project_data(projectData)
snippetsGenerate = SnippetsGenerate()
snippetsGenerate.dealFiles()
# print(projectData['folders'])
def onConfigCancel(self):
global CONFIG_PATH
if CONFIG_PATH == "":
sublime.error_message(u"需设置 UI 资源路径!")
# Server Command
class PuiServerCommand(sublime_plugin.WindowCommand):
def run(self):
global SETTINGS
global UI_SERVER_URL
SETTINGS = sublime.load_settings(SETTING_FILE)
UI_SERVER_URL = SETTINGS.get("UIServerUrl")
if UI_SERVER_URL == "":
sublime.error_message(u"需先设置 UI 同步服务器地址")
self.window.show_input_panel(u"设置 UI 同步服务器地址:", "", self.onServerConfigDone, None, None)
else:
self.window.show_input_panel(u"设置 UI 同步服务器地址:", UI_SERVER_URL, self.onServerConfigDone, None, None)
# 同步数据
serverThread = ServerThread()
serverThread.start()
def onServerConfigDone(self, data):
global SETTINGS
global UI_SERVER_URL
UI_SERVER_URL = data
if UI_SERVER_URL == "":
sublime.error_message(u"需先设置 UI 同步服务器地址")
self.window.show_input_panel(u"设置 UI 同步服务器地址:", UI_SERVER_URL, self.onServerConfigDone, None, None)
else:
SETTINGS.set("UIServerUrl", UI_SERVER_URL)
sublime.save_settings(SETTING_FILE)
# UI预览 TODO:
class PuiPreviewCommand(sublime_plugin.TextCommand):
def run(self, edit):
print("PuiPreviewCommand")
# 新建Layer容器
class PuiCreateCommand(sublime_plugin.TextCommand):
def run(self, edit):
template = '''<!-- Cocos2d-x UI Configuration -->
<layer anchorX="0" anchorY="0" x="0" y="0">
</layer>'''
print(self.view)
window = self.view.window()
window.new_file()
activeView = window.active_view()
activeView.set_name("pui.pxml")
activeView.set_syntax_file("Packages/UIEditor/PXML.tmLanguage")
activeView.insert(edit, 0, template)
# 通讯线程
class ServerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
print("ServerThread")
def run(self):
print("Threading Runing")
#资源snippets 生成
class SnippetsGenerate():
plists = []
snippetKeys = []
global SETTINGS
global CONFIG_PATH
SETTINGS = sublime.load_settings(SETTING_FILE)
CONFIG_PATH = SETTINGS.get("UIResPath")
def dealFiles(self):
global CONFIG_PATH
for root, dirs, files in os.walk(CONFIG_PATH, True):
for name in files:
fileName = os.path.join(name)
fileType = os.path.splitext(name)[1]
if fileType == ".plist":
SnippetsGenerate.plists.append(root + "/" + fileName)
SnippetsGenerate.parseFiles()
def parseFiles():
for filePath in SnippetsGenerate.plists:
if os.path.exists(filePath):
pl = plistlib.readPlist(filePath)
for fileKey in pl["frames"]:
SnippetsGenerate.snippetKeys.append(fileKey)
else:
break
SnippetsGenerate.generateSnippetFile()
def generateSnippetFile():
template = '''<snippet>
<description>%s</description>
<content><![CDATA[%s${1:$SELECTION}]]></content>
<tabTrigger>%s</tabTrigger>
<scope>source.pxml</scope>
</snippet>'''
snippetsRoot = sublime.packages_path() + "/UIEditor/snippets/sourceSnippets/"
for snippetKey in SnippetsGenerate.snippetKeys:
ls = os.path.splitext(snippetKey)
snippetFileName = ls[0]
print(snippetFileName)
f = open(snippetsRoot + snippetFileName + ".sublime-snippet", "w")
snippetXML = template % (snippetKey, snippetKey, "res_" + snippetFileName)
f.write(snippetXML)
f.close()
class PreviewImageCommand(sublime_plugin.TextCommand):
def run(self, edit):
pass
# Get the file path from the json file
# imagepath = '/Volumes/Media/vancopper/Pictures/04.jpg'
# self.files = [imagepath]
# print(self.view)
# self.view.show_popup_menu(self.files, self.on_done)
# sublime.active_window().show_quick_panel(self.files, lambda s: self.on_done(s), selected_index=0, on_highlight=lambda s: self.show_preview(s) )
# def on_done(self, index):
# print()
# sublime.active_window().focus_view( self.view )
# def show_preview(self, index):
# if index >= 0:
# file_path = self.files[index]
# sublime.active_window().open_file(file_path, sublime.TRANSIENT)
# | {
"content_hash": "716898b1f60fa162ad5ab22d61f92627",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 153,
"avg_line_length": 26.72906403940887,
"alnum_prop": 0.6940656100258017,
"repo_name": "TgermPrj/UIEditor",
"id": "b9496b5645432b84be6f95b5fdecbd1f2e45ba9a",
"size": "5744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PUIEditor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5744"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.contenttypes import generic
from statusbayes.models import SpamStatus
class SpamStatusInline(generic.GenericTabularInline):
model = SpamStatus
| {
"content_hash": "565564d17219d13ec5dcd6feca05ad78",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 53,
"avg_line_length": 28.857142857142858,
"alnum_prop": 0.8415841584158416,
"repo_name": "waylan/django-spambayes",
"id": "d87cb49c70f756f08550cf6c6ece99ecb8330fc5",
"size": "202",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "statusbayes/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3616"
},
{
"name": "Python",
"bytes": "17226"
}
],
"symlink_target": ""
} |
import sys
import getopt
import re
import requests
import json
import os
from os import listdir
from os.path import isfile, join
from bs4 import BeautifulSoup
# À spécifier
user = {
"login": "", # Nom d'utilisateur dans KI
"password": "", # Mot de passe dans KI
"id": 0 # Votre identifiant dans KI
}
root = "http://www.kraland.org/"
url_login = root + "main.php?p=1&a=100"
url_test = root + "main.php?p=2_3"
url_fightDefinitions = root + "report.php?p=2&p1="
directory_save = "/tmp/"
file_fightDefinition_prefix = 'KI Combat - '
proxyDict = {
#"http" : ""
}
def usage(errorcode):
print "\nUsage: " + sys.argv[0] + " [--get] [--file=<fichier à importer>] [--directory=<dossier à importer>] [--exportAll=<fichier CSV de sortie>] [--exportSummary=<fichier CSV de sortie>]"
print ""
print " --get: Permet de récupérer au format HTML tous les combats visibles dans la liste des combats."
print " --file<fichier à importer> : Permet d'importer un fichier HTML d'un combat"
print " --directory<dossier à importer> : Permet d'importer un répertoire dans lequel se trouvent des fichiers HTML d'un combat. Fait la même chose que --file=<fichier> mais avec un répertoire."
print " --exportAll=<fichier CSV de sortie>: Retourne au format CSV la liste de tous les combats et de tous les participants, avec un certain nombre de statistiques."
print " --exportSummary=<fichier CSV de sortie>: Retourne au format CSV la liste de tous les participants et un certain nombre de statistiques."
print ""
print "Exemple de exportAll:"
print " Combat;Personnage;Nombre de rounds;Dégâts infligés;Dégâts reçus;Nombre d'attaques;Nombre d'attaques réussies;Nombre de parades;Nombre de parades réussies"
print " 89351-Tyrant Kane VS 2 Policiers;Dragonneau;9;35;24;9;7;17;8"
print " 89351-Tyrant Kane VS 2 Policiers;2 Policiers;5;13;41;15;3;9;5"
print " 89351-Tyrant Kane VS 2 Policiers;Tyrant Kane;2;19;6;4;2;5;3"
print " 89351-Tyrant Kane VS 2 Policiers;Policier;4;17;13;7;2;4;0"
print ""
print "Exemple de exportSummary:"
print " Personnage;Nombre de rounds;Dégâts infligés;Dégâts reçus;Ciblé;Nombre d'attaques;Nombre d'attaques réussies;Nombre de parades;Nombre de parades réussies"
print " 2 Policiers;5;13;41;9;15;3;9;5"
print " Dragonneau;10;38;43;20;10;8;20;11"
print " Tyrant Kane;2;19;6;5;4;2;5;3"
print " Policier;4;17;13;4;7;2;4;0"
print ""
sys.exit(errorcode)
def main(argv):
if user["login"] == "":
print "\nVous devez spécifier un nom d'utilisateur (variable user au début de ce script"
sys.exit(2)
if user["password"] == "":
print "\nVous devez spécifier un mot de passe (variable user au début de ce script"
sys.exit(2)
if user["id"] == "":
print "\nVous devez spécifier votre identifiant de KI (variable user au début de ce script"
sys.exit(2)
try:
opts, args = getopt.getopt(argv, "", ["get", "file=", "directory=", "exportAll=", "exportSummary="])
except getopt.GetoptError:
usage(2)
u = StatCombat(user["login"], user["password"], user["id"])
u.login()
fightDefinitions = []
fights = []
for opt, arg in opts:
if opt == '-h':
usage(0)
elif opt in ("--get"):
fightDefinitions = u.getFights()
u.saveFightDefinition(fightDefinitions)
fights = u.parseFights(fightDefinitions)
elif opt in ("--file"):
fightDefinitions = u.importFile(arg)
u.parseFights(fightDefinitions)
elif opt in ("--directory"):
fightDefinitions = u.importDirectory(arg)
fights = u.parseFights(fightDefinitions)
elif opt in ("--exportAll"):
u.exportAll(fights, arg)
elif opt in ("--exportSummary"):
u.exportSummary(fights, arg)
else:
usage(2)
class StatCombat:
username = ""
password = ""
userid = 0
cookies = ""
session = requests.Session()
def __init__(self, username, password, userid):
self.username = username
self.password = password
self.userid = userid
##
## Se connecte à KI
##
def login(self):
# Authenticate
data = {"p1": self.username, "p2": self.password}
r = self.session.post(url_login, proxies=proxyDict, params = data)
self.cookies = r.cookies
# Check authentication
r = self.session.get(url_test, cookies = self.cookies, proxies=proxyDict)
soup = BeautifulSoup(r.content)
for p in soup.find_all("p", class_="right-boxprofile-name"):
print " -> Connecté en tant que " + str(p.text)
##
## Récupère les informations des combats du personnage courant
##
def getFights(self):
# print "-- Getting fightDefinitions --"
r = self.session.get(url_fightDefinitions + str(self.userid), proxies=proxyDict)
r.content
fightDefinitions = []
soup = BeautifulSoup(r.content)
for li in soup.find_all("li"):
link = li.find("a", text=re.compile(r'VS'))
if link != None:
m = re.search('.*=([0-9]+)$', link["href"])
id = "0"
if m:
id = m.group(1)
r = requests.get(root + link["href"], cookies = self.cookies, proxies=proxyDict)
fightDefinitions.append({
"title": id + "-" + link.text,
"url": link["href"],
"content": r.content
})
return fightDefinitions
##
## Lit un fichier en particulier, et recupère son contenu
##
def importFile(self, file):
# print "Importing " + file
f = open(file, "r");
fightDefinitions = [{
"url": "999",
"content": f.read(),
"title": file_fightDefinition_prefix + os.path.basename(file)
}]
return fightDefinitions
##
## Lit le contenu d'un répertoire, lit tous les fichiers trouvés, et récupère leur contenu
##
def importDirectory(self, path):
# print "Importing " + path
files = [ f for f in listdir(path) if isfile(join(path, f)) ]
fightDefinitions = []
for file in files:
f = open(path + file, "r");
fightDefinitions.append({
"url": "999",
"content": f.read(),
"title": file_fightDefinition_prefix + os.path.splitext(os.path.basename(file))[0]
})
return fightDefinitions
##
## Sauvegarde le HTML des définitions de combat
##
def saveFightDefinition(self, fightDefinitions):
# print "-- Saving fightDefinitions --"
for fightDefinition in fightDefinitions:
m = re.search('.*=([0-9]+)$', fightDefinition["url"])
if m:
fightDefinitionid = m.group(1)
fh = open(directory_save + "/" + fightDefinition["title"] + ".html", "w")
fh.write(fightDefinition["content"])
fh.close();
else:
print "Mauvaise URL de combat : " + fightDefinition["url"]
##
## Parse le HTML des définitions de combat passées en paramètre
##
def parseFights(self, fightDefinitions):
fights = {}
for fightDefinition in fightDefinitions:
# print "-- Parsing fightDefinition " + fightDefinition["title"] + " --"
fights[fightDefinition["title"]] = {}
personnages = fights[fightDefinition["title"]] = {}
lastPersonnage = ''
soup = BeautifulSoup(fightDefinition["content"])
nbRounds = len(soup.find_all("p", class_="t", text=re.compile(r'Round')))
currentRound = 1
for table in soup.find_all("table"):
for row in table.find_all("tr"):
if len(row.find_all('td')) > 1:
cells = row.find_all('td', class_="tdb")
if len(cells) < 3:
continue
modifier = 0
if len(cells) == 6:
lastPersonnage = cells[0].text
if lastPersonnage not in personnages:
personnages[lastPersonnage] = {
"Nom": lastPersonnage,
"Rounds": {},
"DegatsRecus": 0,
"DegatsInfliges": 0
}
elif len(cells) == 5:
modifier = -1
attaqueCell = cells[2 + modifier].text
cibleCell = cells[3 + modifier].text
paradeCell = cells[4 + modifier].text
degatsCell = str(cells[5 + modifier].text)
# init
nomAttaque = ''
jetAttaque = 0
chanceAttaque = 0
nomParade = ''
jetParade = 0
chanceParade = 0
degatsRecus = 0
degatsInfliges = 0
# A attaqué
m = re.search('^(?:([^\(]+)|-)\(([0-9]+)/([0-9]+)%\)', attaqueCell)
if m:
nomAttaque = m.group(1)
jetAttaque = m.group(2)
chanceAttaque = m.group(3)
# A fait une parade
m = re.search('^(?:([^\(]+)|-)\(([0-9]+)/([0-9]+)%\)', paradeCell)
if m:
nomParade = m.group(1)
jetParade = m.group(2)
chanceParade = m.group(3)
# Dégâts infliges
m = re.search('^.* ([0-9]+) PdV', degatsCell)
if m:
degatsInfliges = int(m.group(1))
if cibleCell not in personnages:
personnages[cibleCell] = {
"Nom": cibleCell,
"Rounds": {},
"DegatsRecus": 0,
"DegatsInfliges": 0
}
if "Round " + str(currentRound) not in personnages[cibleCell]["Rounds"]:
personnages[cibleCell]["Rounds"]["Round " + str(currentRound)] = {
"DegatsRecus": 0,
"DegatsInfliges": 0,
"Attaque": [],
"Parade": [],
"Cible": 0
}
# Cible
if "DegatsRecus" not in personnages[cibleCell]:
personnages[cibleCell]["DegatsRecus"] = 0
if "DegatsRecus" not in personnages[cibleCell]["Rounds"]["Round " + str(currentRound)]:
personnages[cibleCell]["Rounds"]["Round " + str(currentRound)]["DegatsRecus"] = 0
personnages[cibleCell]["DegatsRecus"] += degatsInfliges
personnages[cibleCell]["Rounds"]["Round " + str(currentRound)]["DegatsRecus"] += degatsInfliges
personnages[cibleCell]["Rounds"]["Round " + str(currentRound)]["Cible"] += 1
if nomParade != "":
personnages[cibleCell]["Rounds"]["Round " + str(currentRound)]["Parade"].append({
"Nom": nomParade,
"Jet": int(jetParade),
"Chances": int(chanceParade),
"Résultat": (int(jetParade) < int(chanceParade))
})
# Personnage courant
if "Round " + str(currentRound) not in personnages[lastPersonnage]["Rounds"]:
personnages[lastPersonnage]["Rounds"]["Round " + str(currentRound)] = {
"DegatsInfliges": 0,
"Attaque": [],
"Parade": [],
"Cible": 0
}
personnages[lastPersonnage]["DegatsInfliges"] += degatsInfliges
personnages[lastPersonnage]["Rounds"]["Round " + str(currentRound)]["DegatsInfliges"] += degatsInfliges
if nomAttaque != "":
personnages[lastPersonnage]["Rounds"]["Round " + str(currentRound)]["Attaque"].append({
"Nom": nomAttaque,
"Jet": int(jetAttaque),
"Chances": int(chanceAttaque),
"Résultat": (int(jetAttaque) < int(chanceAttaque)),
"Infligés": degatsInfliges
})
currentRound += 1
f = open(directory_save + '/' + fightDefinition["title"] + ".json", "w")
f.write(json.dumps(fights[fightDefinition["title"]]))
f.close()
f = open(directory_save + "/all.json", "w")
f.write(json.dumps(fights))
f.close()
return fights
##
## Retourne la liste de tous les combats et de tous les personnages, ainsi qu'un certain nombre d'informations
##
def exportAll(self, fights, outputFile):
f = open(outputFile, 'w')
f.write(u"Combat;Personnage;Nombre de rounds;Dégâts infligés;Dégâts reçus;Nombre d'attaques;Nombre d'attaques réussies;Nombre de parades;Nombre de parades réussies\n".encode("utf-8"))
for fightKey, fightValue in fights.iteritems():
for persoKey, persoValue in fights[fightKey].iteritems():
nbRounds = len(persoValue["Rounds"])
nbAttaquesSuccess = 0
nbParadesSuccess = 0
degatsRecus = 0
degatsInfliges = 0
nbAttaques = 0
nbParades = 0
for roundKey, roundValue in fights[fightKey][persoKey]["Rounds"].iteritems():
nbAttaques += len([ item for item in roundValue["Attaque"] if item["Chances"] > 0 ])
nbParades += len([ item for item in roundValue["Parade"] if item["Chances"] > 0 ])
nbAttaquesSuccess += len([ item for item in roundValue["Attaque"] if item["Résultat"] == True ])
nbParadesSuccess += len([ item for item in roundValue["Parade"] if item["Résultat"] == True ])
degatsInfliges = persoValue["DegatsInfliges"]
degatsRecus = persoValue["DegatsRecus"]
row = fightKey + ";" + persoValue["Nom"] + ";" + str(nbRounds) + ";" + str(degatsInfliges) + ";" + str(degatsRecus) + ";" + str(nbAttaques) + ";" + str(nbAttaquesSuccess) + ";" + str(nbParades) + ";" + str(nbParadesSuccess)
f.write(unicode(row).encode("utf-8") + "\n")
f.close()
##
## Renvoie un rapport au format CSV par personnage avec un certain nombre d'informations
##
def exportSummary(self, fights, outputFile):
personnages = {}
for fightKey, fightValue in fights.iteritems():
for persoKey, persoValue in fights[fightKey].iteritems():
if persoKey not in personnages:
personnages[persoKey] = {
"Nombre de rounds": 0,
"Dégats Infligés": 0,
"Dégats Reçus": 0,
"Nombre d'attaques": 0,
"Nombre d'attaques réussies": 0,
"Nombre de parades": 0,
"Nombre de parades réussies": 0,
"Ciblé": 0
}
personnages[persoKey]["Nombre de rounds"] += len(persoValue["Rounds"])
personnages[persoKey]["Dégats Infligés"] += persoValue["DegatsInfliges"]
personnages[persoKey]["Dégats Reçus"] += persoValue["DegatsRecus"]
for roundKey, roundValue in fights[fightKey][persoKey]["Rounds"].iteritems():
personnages[persoKey]["Ciblé"] += roundValue["Cible"]
personnages[persoKey]["Nombre d'attaques"] += len([ item for item in roundValue["Attaque"] if item["Chances"] > 0 ])
personnages[persoKey]["Nombre de parades"] += len([ item for item in roundValue["Parade"] if item["Chances"] > 0 ])
personnages[persoKey]["Nombre d'attaques réussies"] += len([ item for item in roundValue["Attaque"] if item["Résultat"] == True ])
personnages[persoKey]["Nombre de parades réussies"] += len([ item for item in roundValue["Parade"] if item["Résultat"] == True ])
f = open(outputFile, "w")
f.write("Personnage;Nombre de rounds;Dégâts infligés;Dégâts reçus;Ciblé;Nombre d'attaques;Nombre d'attaques réussies;Nombre de parades;Nombre de parades réussies\n")
for key, value in personnages.iteritems():
f.write(unicode(key + ";" + str(value["Nombre de rounds"]) + ";" +
str(value["Dégats Infligés"]) + ";" +
str(value["Dégats Reçus"]) + ";" +
str(value["Ciblé"]) + ";" +
str(value["Nombre d'attaques"]) + ";" +
str(value["Nombre d'attaques réussies"]) + ";" +
str(value["Nombre de parades"]) + ";" +
str(value["Nombre de parades réussies"])).encode("utf-8") + "\n")
f.close()
def is_ascii(s):
return all(ord(c) < 128 for c in s)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "77f06583077e3912864028abc28b5451",
"timestamp": "",
"source": "github",
"line_count": 426,
"max_line_length": 227,
"avg_line_length": 33.859154929577464,
"alnum_prop": 0.6416389351081531,
"repo_name": "ph--/ki",
"id": "28c2bb5abe3f11663382d38179e1972eed1a2ed4",
"size": "14553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/stat_combat.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "31326"
},
{
"name": "Python",
"bytes": "14061"
}
],
"symlink_target": ""
} |
'''Utilities
=============
'''
from kivy.compat import PY2
from kivy.utils import get_color_from_hex
from kivy.properties import StringProperty, ObservableDict, ObservableList
from kivy.factory import Factory
from kivy.event import EventDispatcher
from kivy.weakproxy import WeakProxy
import json
from io import StringIO
from ruamel.yaml import YAML, SafeRepresenter
__all__ = ('pretty_time', 'pretty_space', 'byteify', 'json_dumps',
'json_loads', 'ColorTheme', 'apply_args_post')
SafeRepresenter.add_representer(ObservableList, SafeRepresenter.represent_list)
SafeRepresenter.add_representer(ObservableDict, SafeRepresenter.represent_dict)
def pretty_time(seconds):
'''Returns a nice representation of a time value.
:Parameters:
`seconds`: float, int
The number, in seconds, to convert to a string.
:returns:
String representation of the time.
For example::
>>> pretty_time(36574)
'10:9:34.0'
'''
seconds = int(seconds * 10)
s, ms = divmod(seconds, 10)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
if h:
return '{0:d}:{1:d}:{2:d}.{3:d}'.format(h, m, s, ms)
elif m:
return '{0:d}:{1:d}.{2:d}'.format(m, s, ms)
else:
return '{0:d}.{1:d}'.format(s, ms)
def pretty_space(space, is_rate=False):
'''Returns a nice string representation of a number representing either
size, e.g. 10 MB, or rate, e.g. 10 MB/s.
:Parameters:
`space`: float, int
The number to convert.
`is_rate`: bool
Whether the number represents size or rate. Defaults to False.
:returns:
String representation of the space.
For example::
>>> pretty_space(10003045065)
'9.32 GB'
>>> tools.pretty_space(10003045065, is_rate=True)
'9.32 GB/s'
'''
t = '/s' if is_rate else ''
for x in ['bytes', 'KB', 'MB', 'GB']:
if space < 1024.0:
return "%3.2f %s%s" % (space, x, t)
space /= 1024.0
return "%3.2f %s%s" % (space, 'TB', t)
def byteify(val, py2_only=True):
'''Returns a copy of the input with all string in the input converted to
bytes.
:Parameters:
`val`: object
The object to convert.
`py2_only`: bool
If the conversion should happen in Python 2.x only. If False,
it's always converted. If True, the default, it's only converted to
bytes when running in Python 2.
For example in python 2::
>>> obj = {u'cheese': u'crackers', 4: [u'four', u'apple', 5, \
'cheeses']}
>>> obj
{u'cheese': u'crackers', 4: [u'four', u'apple', 5, 'cheeses']}
>>> byteify(obj)
{'cheese': 'crackers', 4: ['four', 'apple', 5, 'cheeses']}
'''
if not PY2 and py2_only:
return val
if isinstance(val, dict):
return {byteify(key): byteify(value)
for key, value in val.items()}
elif isinstance(val, list):
return [byteify(element) for element in val]
elif isinstance(val, unicode):
return val.encode('utf-8')
else:
return val
def unicodify(val, py3_only=False):
if PY2 and py3_only:
return val
if isinstance(val, dict):
return {unicodify(key): unicodify(value)
for key, value in val.items()}
elif isinstance(val, list):
return [unicodify(element) for element in val]
elif isinstance(val, bytes):
return val.decode('utf-8')
else:
return val
def json_dumps(value):
return json.dumps(value, sort_keys=True, indent=4, separators=(',', ': '))
def json_loads(value):
decoded = json.loads(value)
return byteify(decoded, True)
def _get_yaml():
yaml = YAML(typ='safe')
return yaml
def yaml_dumps(value):
yaml = _get_yaml()
s = StringIO()
yaml.preserve_quotes = True
yaml.dump(value, s)
return s.getvalue()
def yaml_loads(value):
yaml = _get_yaml()
return yaml.load(value)
class ColorTheme(EventDispatcher):
'''Default values from https://www.materialpalette.com/amber/indigo
'''
primary_dark = StringProperty(get_color_from_hex('FFA000FF'))
primary = StringProperty(get_color_from_hex('FFC107FF'))
primary_light = StringProperty(get_color_from_hex('FFECB3FF'))
primary_text = StringProperty(get_color_from_hex('FFFFFFFF'))
'''This is different.
'''
accent = StringProperty(get_color_from_hex('536DFEFF'))
text_primary = StringProperty(get_color_from_hex('212121FF'))
text_secondary = StringProperty(get_color_from_hex('757575FF'))
divider = StringProperty(get_color_from_hex('BDBDBDFF'))
@staticmethod
def interpolate(color1, color2, fraction):
color = []
for c1, c2 in zip(color1, color2):
c = min(max((c2 - c1) * fraction + c1, 0), 1)
color.append(c)
return color
class KVBehavior(object):
pass
def apply_args_post(cls, **keywordargs):
def ret_func(*largs, **kwargs):
o = cls(*largs, **kwargs)
for key, value in keywordargs.items():
setattr(o, key, value)
return o
return ret_func
Factory.register(classname='ColorTheme', cls=ColorTheme)
Factory.register(classname='KVBehavior', cls=KVBehavior)
| {
"content_hash": "6a4b4f62f4d3944ebdf88e6a1c12182d",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 79,
"avg_line_length": 27.555,
"alnum_prop": 0.5868263473053892,
"repo_name": "matham/cplcom",
"id": "4968ecf6220af8320cba76fbf3f64fcf4416f0cd",
"size": "5511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cplcom/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "173272"
}
],
"symlink_target": ""
} |
from flask import Blueprint, redirect, render_template, session, url_for
from app.utils import get_logger
mod_notification = Blueprint('mod_notification', __name__, static_folder='../static',
url_prefix='/notification')
logger = get_logger(__name__)
@mod_notification.route('/all', methods=['GET'])
def all():
if 'uid' not in session:
return redirect(url_for('mod_auth.signin'))
logger.info('/notification/all',
extra={'uid': session['uid'],
'en_name': session['en_name']})
return render_template('notification.html')
| {
"content_hash": "fea739cff9a7b145b80a7dacc2d46acc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 85,
"avg_line_length": 32.21052631578947,
"alnum_prop": 0.6111111111111112,
"repo_name": "hellock/labman",
"id": "4b1b6dbb274647ca8903ae8fcc3a5ad30f9d9f97",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/notification/controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "696784"
},
{
"name": "HTML",
"bytes": "3468419"
},
{
"name": "JavaScript",
"bytes": "2908592"
},
{
"name": "PHP",
"bytes": "3916"
},
{
"name": "Python",
"bytes": "47590"
}
],
"symlink_target": ""
} |
import click
import webbrowser
import sys
import floyd
from floyd.client.auth import AuthClient
from floyd.manager.auth_config import AuthConfigManager
from floyd.model.access_token import AccessToken
from floyd.model.credentials import Credentials
from floyd.log import logger as floyd_logger
@click.command()
@click.option('--token', is_flag=True, default=False, help='Just enter token')
@click.option('--username', '-u', help='FloydHub username')
@click.option('--password', '-p', help='FloydHub password')
def login(token, username, password):
"""
Log into Floyd via Auth0.
"""
if username:
# Use username / password login
if not password:
password = click.prompt('Please enter your password', type=str, hide_input=True)
password = password.strip()
if not password:
floyd_logger.info('You entered an empty string. Please make sure you enter your password correctly.')
sys.exit(1)
login_credentials = Credentials(username=username,
password=password)
access_code = AuthClient().login(login_credentials)
if not access_code:
floyd_logger.info("Failed to login")
return
else:
# Fallback to the access token from the browser login
if not token:
cli_info_url = "{}/settings/security".format(floyd.floyd_web_host)
click.confirm('Authentication token page will now open in your browser. Continue?',
abort=True,
default=True)
webbrowser.open(cli_info_url)
floyd_logger.info("Please copy and paste the authentication token.")
access_code = click.prompt('This is an invisible field. Paste token and press ENTER', type=str, hide_input=True)
access_code = access_code.strip()
if not access_code:
floyd_logger.info("Empty token received. Make sure your shell is handling the token appropriately.")
floyd_logger.info("See docs for help: http://docs.floydhub.com/faqs/authentication/")
return
access_code = access_code.strip(" ")
user = AuthClient().get_user(access_code)
access_token = AccessToken(username=user.username,
token=access_code)
AuthConfigManager.set_access_token(access_token)
floyd_logger.info("Login Successful as %s", user.username)
@click.command()
def logout():
"""
Logout of Floyd.
"""
AuthConfigManager.purge_access_token()
| {
"content_hash": "eef24aa58b3d2f0a3e84604e646c9f12",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 120,
"avg_line_length": 36.857142857142854,
"alnum_prop": 0.6387596899224807,
"repo_name": "mckayward/floyd-cli",
"id": "a33e89af76875eab7f35ba465d3dbb1f151b5b2b",
"size": "2580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "floyd/cli/auth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "107"
},
{
"name": "Python",
"bytes": "154529"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
"""Translation helper functions."""
from __future__ import unicode_literals
from collections import OrderedDict
import locale
import os
import re
import sys
import gettext as gettext_module
from importlib import import_module
from threading import local
import warnings
from django.utils.encoding import force_str, force_text
from django.utils.functional import memoize
from django.utils._os import upath
from django.utils.safestring import mark_safe, SafeData
from django.utils import six
from django.utils.six import StringIO
from django.utils.translation import TranslatorCommentWarning
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = local()
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
_checked_languages = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = "\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9
# and RFC 3066, section 2.1
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z0-9]{1,8})*|\*) # "en", "en-au", "x-y-z", "es-419", "*"
(?:\s*;\s*q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
language_code_prefix_re = re.compile(r'^/([\w-]+)(/|$)')
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset.
"""
def __init__(self, *args, **kw):
gettext_module.GNUTranslations.__init__(self, *args, **kw)
self.set_output_charset('utf-8')
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
def _fetch(lang, fallback=None):
global _translations
res = _translations.get(lang, None)
if res is not None:
return res
loc = to_locale(lang)
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in list(_translations)]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for appname in reversed(settings.INSTALLED_APPS):
app = import_module(appname)
apppath = os.path.join(os.path.dirname(upath(app.__file__)), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
for localepath in reversed(settings.LOCALE_PATHS):
if os.path.isdir(localepath):
res = _merge(localepath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
_active.value = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
if hasattr(_active, "value"):
del _active.value
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active.value = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = getattr(_active, "value", None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default
t = getattr(_active, "value", None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
global _default
# str() is allowing a bytestring message to remain bytestring on Python 2
eol_message = message.replace(str('\r\n'), str('\n')).replace(str('\r'), str('\n'))
t = getattr(_active, "value", None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
"""
Returns a string of the translation of the message.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_translate(message, 'gettext')
if six.PY3:
ugettext = gettext
else:
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
msg_with_ctxt = "%s%s%s" % (context, CONTEXT_SEPARATOR, message)
result = ugettext(msg_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default
t = getattr(_active, "value", None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a string of the translation of either the singular or plural,
based on the number.
Returns a string on Python 3 and an UTF-8-encoded bytestring on Python 2.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
if six.PY3:
ungettext = ngettext
else:
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
msgs_with_ctxt = ("%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number)
result = ungettext(*msgs_with_ctxt)
if CONTEXT_SEPARATOR in result:
# Translation not found
result = ungettext(singular, plural, number)
return result
def all_locale_paths():
"""
Returns a list of paths to user-provides languages files.
"""
from django.conf import settings
globalpath = os.path.join(
os.path.dirname(upath(sys.modules[settings.__module__].__file__)), 'locale')
return [globalpath] + list(settings.LOCALE_PATHS)
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies
or session and during format localization.
"""
for path in all_locale_paths():
if gettext_module.find('django', path, [to_locale(lang_code)]) is not None:
return True
return False
check_for_language = memoize(check_for_language, _checked_languages, 1)
def get_supported_language_variant(lang_code, supported=None, strict=False):
"""
Returns the language-code that's listed in supported languages, possibly
selecting a more generic variant. Raises LookupError if nothing found.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = OrderedDict(settings.LANGUAGES)
if lang_code:
# if fr-CA is not supported, try fr-ca; if that fails, fallback to fr.
generic_lang_code = lang_code.split('-')[0]
variants = (lang_code, lang_code.lower(), generic_lang_code,
generic_lang_code.lower())
for code in variants:
if code in supported and check_for_language(code):
return code
if not strict:
# if fr-fr is not supported, try fr-ca.
for supported_code in supported:
if supported_code.startswith((generic_lang_code + '-',
generic_lang_code.lower() + '-')):
return supported_code
raise LookupError(lang_code)
def get_language_from_path(path, supported=None, strict=False):
"""
Returns the language-code if there is a valid language-code
found in the `path`.
If `strict` is False (the default), the function will look for an alternative
country-specific variant when the currently checked is not found.
"""
if supported is None:
from django.conf import settings
supported = OrderedDict(settings.LANGUAGES)
regex_match = language_code_prefix_re.match(path)
if not regex_match:
return None
lang_code = regex_match.group(1)
try:
return get_supported_language_variant(lang_code, supported, strict=strict)
except LookupError:
return None
def get_language_from_request(request, check_path=False):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
If check_path is True, the URL path prefix will be checked for a language
code, otherwise this is skipped for backwards compatibility.
"""
global _accepted
from django.conf import settings
supported = OrderedDict(settings.LANGUAGES)
if check_path:
lang_code = get_language_from_path(request.path_info, supported)
if lang_code is not None:
return lang_code
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
try:
return get_supported_language_variant(lang_code, supported)
except LookupError:
pass
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
try:
accept_lang = get_supported_language_variant(accept_lang, supported)
except LookupError:
continue
else:
_accepted[normalized] = accept_lang
return accept_lang
try:
return get_supported_language_variant(settings.LANGUAGE_CODE, supported)
except LookupError:
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""")
inline_re = re.compile(r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""")
block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
one_percent_re = re.compile(r"""(?<!%)%(?!%)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.conf import settings
from django.template import (Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK,
TOKEN_COMMENT, TRANSLATOR_COMMENT_MARK)
src = force_text(src, settings.FILE_CHARSET)
out = StringIO()
message_context = None
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
lineno_comment_map = {}
comment_lineno_cache = None
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
content = ''.join(comment)
translators_comment_start = None
for lineno, line in enumerate(content.splitlines(True)):
if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
translators_comment_start = lineno
for lineno, line in enumerate(content.splitlines(True)):
if translators_comment_start is not None and lineno >= translators_comment_start:
out.write(' # %s' % line)
else:
out.write(' #\n')
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
if message_context:
out.write(' npgettext(%r, %r, %r,count) ' % (message_context, ''.join(singular), ''.join(plural)))
else:
out.write(' ngettext(%r, %r, count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
if message_context:
out.write(' pgettext(%r, %r) ' % (message_context, ''.join(singular)))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
message_context = None
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = one_percent_re.sub('%%', t.contents)
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
# Handle comment tokens (`{# ... #}`) plus other constructs on
# the same line:
if comment_lineno_cache is not None:
cur_lineno = t.lineno + t.contents.count('\n')
if comment_lineno_cache == cur_lineno:
if t.token_type != TOKEN_COMMENT:
for c in lineno_comment_map[comment_lineno_cache]:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
warn_msg = ("The translator-targeted comment '%s' "
"(%sline %d) was ignored, because it wasn't the last item "
"on the line.") % (c, filemsg, comment_lineno_cache)
warnings.warn(warn_msg, TranslatorCommentWarning)
lineno_comment_map[comment_lineno_cache] = []
else:
out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache]))
comment_lineno_cache = None
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"':
g = g.strip('"')
elif g[0] == "'":
g = g.strip("'")
g = one_percent_re.sub('%%', g)
if imatch.group(2):
# A context is provided
context_match = context_re.match(imatch.group(2))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
out.write(' pgettext(%r, %r) ' % (message_context, g))
message_context = None
else:
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
if bmatch.group(1):
# A context is provided
context_match = context_re.match(bmatch.group(1))
message_context = context_match.group(1)
if message_context[0] == '"':
message_context = message_context.strip('"')
elif message_context[0] == "'":
message_context = message_context.strip("'")
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK):
lineno_comment_map.setdefault(t.lineno,
[]).append(t.contents)
comment_lineno_cache = t.lineno
else:
out.write(blankout(t.contents, 'X'))
return force_str(out.getvalue())
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
if priority:
try:
priority = float(priority)
except ValueError:
return []
if not priority: # if priority is 0.0 at this point make it 1.0
priority = 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
| {
"content_hash": "c30f176d66682e052e2f8a913347185f",
"timestamp": "",
"source": "github",
"line_count": 679,
"max_line_length": 143,
"avg_line_length": 37.74521354933726,
"alnum_prop": 0.5730617659682391,
"repo_name": "ZhaoCJ/django",
"id": "c7b41fa3734bd83db37bfddccec4992d0edf60b0",
"size": "25629",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django/utils/translation/trans_real.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import main
import robocup
import constants
## Estimates the length of a path given a start and end point
# @param start The start point of the path
# @param end The end point of the path
# @param blocking_robots The list of robots to dodge
# @param dodge_dist The amount of distance to dodge left or right to miss a robot
# @return the distance from start to end
def estimate_path_length(start, end, blocking_robots, dodge_dist):
total = 0
next_pt = start
prev_pt = start
line = robocup.Segment(start, end)
iterations = 0
max_iterations = 10
# While there is a robot in the way
blocking_robot = find_intersecting_robot(line, blocking_robots, dodge_dist)
while (blocking_robot is not None) and (iterations < max_iterations):
# Find next point
# Next point is +-dodge_dist * perp_vect
robot_vec = (blocking_robot.pos - next_pt)
perp_vec = robot_vec.perp_cw().normalized()
pt1 = perp_vec * dodge_dist + blocking_robot.pos - next_pt
pt2 = perp_vec * -dodge_dist + blocking_robot.pos - next_pt
# Find shortest path
if (pt1.mag() < pt2.mag()):
next_pt = pt1
else:
next_pt = pt2
# Add dist to total
total += (next_pt - prev_pt).mag()
prev_pt = next_pt
line = robocup.Segment(next_pt, end)
blocking_robot = find_intersecting_robot(line, blocking_robots,
dodge_dist)
iterations += 1
total += (end - next_pt).mag()
return total
## Whether any robot can collect the ball before the opponent
# @param our_robots_to_check List of our robots that can move to ball
# @param their_robots_to_check List of their robots that can move to ball
# @param our_robots_to_dodge List of our robots that can be considered obsticles to dodge
# @param their_Robots_to_dodge List of their robots that can be considered obsticles to dodge
# @param valid_error_percent Wiggle room so if the path is slightly off, it still tries if it is close
# @return Tuple
# Whether we can collect the ball before the opponen
# The closest robot on our team
# @note If any imputs are None, their values are defaulted
def can_collect_ball_before_opponent(our_robots_to_check=None,
their_robots_to_check=None,
our_robots_to_dodge=None,
their_robots_to_dodge=None,
valid_error_percent=0.05):
if our_robots_to_check is None:
our_robots_to_check = main.our_robots()
if their_robots_to_check is None:
their_robots_to_check = main.their_robots()
if our_robots_to_dodge is None:
our_robots_to_dodge = main.our_robots()
if their_robots_to_dodge is None:
their_robots_to_dodge = main.their_robots()
shortest_opp_time = float("inf")
shortest_our_time = float("inf")
dodge_dist = constants.Robot.Radius
closest_robot = None
# TODO: Do some sort of prediction as the ball moves
target_pos = main.ball().pos
# TODO: Take velocity and acceleration into account
# Find closest opponent robot
for bot in their_robots_to_check:
dist = estimate_path_length(bot.pos, target_pos, our_robots_to_dodge,
dodge_dist)
target_dir = (target_pos - bot.pos).normalized()
time = robocup.get_trapezoidal_time(dist, dist, 2.2, 1,
target_dir.dot(bot.vel) /
target_dir.mag(), 0)
if (time < shortest_opp_time):
shortest_opp_time = time
# Find closest robot on our team
for bot in our_robots_to_check:
dist = estimate_path_length(bot.pos, target_pos, their_robots_to_dodge,
dodge_dist)
target_dir = (target_pos - bot.pos).normalized()
time = robocup.get_trapezoidal_time(dist, dist, 2.2, 1,
target_dir.dot(bot.vel) /
target_dir.mag(), 0)
if (time < shortest_our_time):
shortest_our_time = time
closest_robot = bot
return shortest_our_time < shortest_opp_time * (1 + valid_error_percent
), closest_robot
## Finds the intersecting robots in this line
# @param line The line to compare the robot locations to
# @param blocking_robots List of robots to check against the line
# @param dodge_dist Distance cutoff between the line and robot positions
# @return The robot (or None) that intersects the line within dodge_dist
def find_intersecting_robot(line, blocking_robots, dodge_dist):
for bot in blocking_robots:
if (line.dist_to(bot.pos) < dodge_dist):
return bot
return None
| {
"content_hash": "faddd8e8bc7064bc47d3f249bbc8c74f",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 103,
"avg_line_length": 39.92741935483871,
"alnum_prop": 0.6021005857402545,
"repo_name": "JNeiger/robocup-software",
"id": "5662b56a099fcf8f6b77da3719b50b817ece78e0",
"size": "4951",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "soccer/gameplay/evaluation/path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2990"
},
{
"name": "C++",
"bytes": "1083792"
},
{
"name": "CMake",
"bytes": "112437"
},
{
"name": "Dockerfile",
"bytes": "2872"
},
{
"name": "MATLAB",
"bytes": "31229"
},
{
"name": "Makefile",
"bytes": "5816"
},
{
"name": "Python",
"bytes": "735005"
},
{
"name": "Shell",
"bytes": "21468"
}
],
"symlink_target": ""
} |
"""
tools.fileio.py contains helper utilities for file reading and writing.
"""
import csv
from collections import namedtuple
from contextlib import contextmanager
from io import BufferedReader
from zipfile import ZipFile
from foil.filters import create_indexer
class TextReader:
"""Reads text file.
Parameters
----------
path : Absolute path to text file.
encoding : File encoding.
"""
def __init__(self, path: str, encoding: str):
self.path = path
self.encoding = encoding
def __iter__(self):
with open(self.path, 'r', encoding=self.encoding) as f:
for line in f:
yield line.strip('\r\n')
class DelimitedReader:
"""Read delimited text stream into namedtuple Records.
Attributes
----------
stream: stream of text.
dialect: delimited file attributes.
fields: Record field names.
converters: casting functions to cast fields to Python objects.
Utilize tools.parsers.make_converters for the general use case.
Factory Methods
---------------
See factory methods for alternative constructors.
"""
def __init__(self, stream,
dialect: csv.Dialect, fields: list, converters: list):
reader = csv.reader(stream, dialect=dialect)
self.header = next(reader)
self.reader = reader
self.converters = converters
self.Record = namedtuple('Record', fields)
def __iter__(self):
return self
def __next__(self):
Record = self.Record
record = Record._make(type_converter(item) for type_converter, item
in zip(self.converters, next(self.reader)))
return record
@property
def file_line_number(self):
return self.reader.line_num
@classmethod
def from_file(cls, path, encoding, dialect, fields, converters):
"""Read delimited text from a text file."""
return cls(open(path, 'r', encoding=encoding), dialect, fields, converters)
@classmethod
def from_zipfile(cls, path, filename, encoding, dialect, fields, converters):
"""Read delimited text from zipfile."""
stream = ZipReader(path, filename).readlines(encoding)
return cls(stream, dialect, fields, converters)
@staticmethod
def discover_headers(stream, dialect):
from foil.parsers import parse_quoted_string
headers = DelimitedReader(stream, dialect=dialect,
fields=[], converters=[]).header
return (parse_quoted_string(field) for field in headers)
@staticmethod
def file_headers(path, encoding, dialect):
stream = open(path, 'r', encoding=encoding)
return DelimitedReader.discover_headers(stream, dialect=dialect)
@staticmethod
def zipfile_headers(path, filename, encoding, dialect):
stream = ZipReader(path, filename).readlines(encoding)
return DelimitedReader.discover_headers(stream, dialect)
class DelimitedSubsetReader(DelimitedReader):
"""Read delimited text into namedtuple Records ignoring certain fields."""
def __init__(self, stream, dialect: csv.Dialect, fields: list,
converters: list, field_index: list):
super().__init__(stream, dialect, fields, converters)
self.indexer = create_indexer(field_index)
def __next__(self):
indexer = self.indexer
Record = self.Record
row = indexer(next(self.reader))
record = Record._make(type_converter(item) for type_converter, item
in zip(self.converters, row))
return record
@classmethod
def from_file(cls, path, encoding, dialect, fields, converters, field_index):
"""Read delimited text from a text file."""
return cls(open(path, 'r', encoding=encoding), dialect, fields, converters, field_index)
@classmethod
def from_zipfile(cls, path, filename, encoding, dialect, fields,
converters, field_index):
"""Read delimited text from zipfile."""
stream = ZipReader(path, filename).readlines(encoding)
return cls(stream, dialect, fields, converters, field_index)
class ZipReader:
"""Reads zip file.
Parameters
----------
path : Absolute path to zip file archive.
filename : File name in archive to read.
"""
def __init__(self, path: str, filename: str):
self.path = path
self.filename = filename
def read(self, encoding):
"""Read content into encoded str."""
return self.read_bytes().decode(encoding)
def read_bytes(self):
"""Read content into byte string."""
with ZipFile(self.path, mode='r') as archive:
return archive.read(self.filename)
def readlines(self, encoding):
"""Read content into encoded str line generator."""
return (line.decode(encoding) for line in self.readlines_bytes())
def readlines_bytes(self):
"""Read content into byte str line iterator."""
with open_zipfile_archive(self.path, self.filename) as file:
for line in file:
yield line.rstrip(b'\r\n')
@contextmanager
def open_zipfile_archive(path, filename):
with ZipFile(path, mode='r') as archive:
with BufferedReader(archive.open(filename, mode='r')) as file:
yield file
def concatenate_streams(streams):
"""Chain a sequence of iterators into a single stream."""
for stream in streams:
yield from stream
| {
"content_hash": "359e60664b444d7584150c86308de858",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 96,
"avg_line_length": 29.838709677419356,
"alnum_prop": 0.6342342342342342,
"repo_name": "portfoliome/foil",
"id": "3df5b1afca7365803d2e177524883f3480e422ac",
"size": "5550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "foil/fileio.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76745"
},
{
"name": "Shell",
"bytes": "258"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import time
import re
import codecs
import os
import math
import argparse
import textwrap
import sys
from StringIO import StringIO
import iso3166
import pyxb.binding.datatypes as xsd
import pyxb as pyxb
from pyxb.utils.six.moves.urllib import request as urllib_request
from GeodesyMLToSiteLog import geodesymltositelog_bindings # Bindings generated by PyXB
import pyxb.utils.domutils as domutils
class SiteLog(object):
def __init__(self, xml):
doc = domutils.StringToDOM(xml)
try:
response = geodesymltositelog_bindings.CreateFromDOM(doc.documentElement)
except (pyxb.MissingAttributeError, pyxb.UnrecognizedContentError, pyxb.IncompleteElementContentError) as e:
print(e.details())
pyxb.RequireValidWhenParsing(False)
response = geodesymltositelog_bindings.CreateFromDOM(doc.documentElement)
if len(response.AbstractSiteLog) == 1:
self.siteLog = response.AbstractSiteLog[0]
else:
self.siteLog = None
raise Exception('XML format probably is incorrect')
def siteLogType(self):
return self.siteLog
@classmethod
def complexValue(cls, item):
return "" if item is None else item.value()
@classmethod
def simpleValue(cls, item, pattern=None):
if item is None or item._isNil():
return ""
else:
return (pattern.format(item) if pattern else u'{}'.format(item)).strip()
@classmethod
def dateTime(cls, timePosition):
return cls.date(timePosition, True)
@classmethod
def date(cls, timePosition, withTime=False):
pattern = "{year:0>4}-{month:0>2}-{day:0>2}"
dateFields = { "year": "CCYY", "month": "MM", "day": "DD" }
if withTime:
pattern += "T{hour:0>2}:{minute:0>2}Z"
dateFields.update({ "hour": "hh", "minute": "mm" })
if timePosition:
timePositionUnion = timePosition._TypeDefinition
text = timePositionUnion.XsdLiteral(timePosition.value())
else:
text = None
if not text:
return "(" + pattern.format(**dateFields) + ")"
# Note: To map 2017-02 to 2017-02-DD, add xsd.gYearMonth to dataTypes.
dateTypes = [xsd.dateTime]
date = None
for dateType in dateTypes:
try:
date = dateType(text)
break
except (xsd.SimpleTypeValueError, ValueError):
pass
if date:
for dateField in date._ValidFields if hasattr(date, "_ValidFields") else dateFields:
dateFields[dateField] = getattr(date, dateField)
return pattern.format(**dateFields)
else:
return text
@classmethod
def toMultiple(cls, line):
""" To handle field span multiple lines """
if not line:
return "\n"
padding = ' ' * 32
# 48 = 80 - 32 padding
length = 48
return ('\n'+padding).join(textwrap.wrap(line, 48)) + '\n'
@classmethod
def country(cls, code):
try:
return "China" if code == 'HKG' else iso3166.countries.get(code).name
except KeyError:
return code
class Introduction(object):
Content = """ XXXX Site Information Form (site log)
International GNSS Service
See Instructions at:
ftp://igs.org/pub/station/general/sitelog_instr.txt\n"""
def __init__(self, fourCharacterID):
content = type(self).Content
self.default = content.replace("XXXX", fourCharacterID.upper(), 1)
def output(self):
return self.default
class FormInformation(object):
Extra = """ If Update:
Previous Site Log : (ssss_ccyymmdd.log)
Modified/Added Sections : (n.n,n.n,...)\n"""
def __init__(self, siteLog):
formInformation = siteLog.formInformation.FormInformation
self.reportType = SiteLog.simpleValue(formInformation.reportType)
self.preparedBy = SiteLog.simpleValue(formInformation.preparedBy)
self.datePrepared = SiteLog.date(formInformation.datePrepared)
def output(self):
io = StringIO()
io.write("0. Form\n")
io.write("\n")
io.write(" Prepared by (full name) : " + SiteLog.toMultiple(self.preparedBy))
io.write(" Date Prepared : " + self.datePrepared + "\n")
io.write(" Report Type : " + self.reportType + "\n")
io.write(type(self).Extra)
io.write("\n")
text = io.getvalue()
io.close()
return text
class SiteIdentification(object):
def __init__(self, siteLog):
siteIdentification = siteLog.siteIdentification.SiteIdentification
self.siteName = SiteLog.simpleValue(siteIdentification.siteName)
self.fourCharacterID = SiteLog.simpleValue(siteIdentification.fourCharacterID)
self.monumentInscription = SiteLog.simpleValue(siteIdentification.monumentInscription)
self.iersDOMESNumber = SiteLog.simpleValue(siteIdentification.iersDOMESNumber)
self.cdpNumber = SiteLog.simpleValue(siteIdentification.cdpNumber)
self.monumentDescription = SiteLog.complexValue(siteIdentification.monumentDescription)
self.heightOfTheMonument = str(SiteLog.simpleValue(siteIdentification.heightOfTheMonument))
if str(self.heightOfTheMonument):
self.heightOfTheMonument += " m"
self.monumentFoundation = SiteLog.simpleValue(siteIdentification.monumentFoundation)
self.foundationDepth = str(SiteLog.simpleValue(siteIdentification.foundationDepth))
if str(self.foundationDepth):
self.foundationDepth += " m"
self.markerDescription = SiteLog.simpleValue(siteIdentification.markerDescription)
self.dateInstalled = SiteLog.dateTime(siteIdentification.dateInstalled)
self.geologicCharacteristic = SiteLog.complexValue(siteIdentification.geologicCharacteristic)
self.bedrockType = SiteLog.simpleValue(siteIdentification.bedrockType)
self.bedrockCondition = SiteLog.simpleValue(siteIdentification.bedrockCondition)
self.fractureSpacing = SiteLog.simpleValue(siteIdentification.fractureSpacing)
self.faultZonesNearby = SiteLog.complexValue(siteIdentification.faultZonesNearby)
self.distance_Activity = SiteLog.simpleValue(siteIdentification.distance_Activity)
self.notes = SiteLog.simpleValue(siteIdentification.notes)
def output(self):
io = StringIO()
io.write("1. Site Identification of the GNSS Monument\n")
io.write("\n")
io.write(" Site Name : " + self.siteName + "\n")
io.write(" Four Character ID : " + self.fourCharacterID + "\n")
io.write(" Monument Inscription : " + SiteLog.toMultiple(self.monumentInscription))
io.write(" IERS DOMES Number : " + self.iersDOMESNumber + "\n")
io.write(" CDP Number : " + self.cdpNumber + "\n")
io.write(" Monument Description : " + SiteLog.toMultiple(self.monumentDescription))
io.write(" Height of the Monument : " + self.heightOfTheMonument + "\n")
io.write(" Monument Foundation : " + SiteLog.toMultiple(self.monumentFoundation))
io.write(" Foundation Depth : " + self.foundationDepth + "\n")
io.write(" Marker Description : " + SiteLog.toMultiple(self.markerDescription))
io.write(" Date Installed : " + self.dateInstalled + "\n")
io.write(" Geologic Characteristic : " + SiteLog.toMultiple(self.geologicCharacteristic))
io.write(" Bedrock Type : " + SiteLog.toMultiple(self.bedrockType))
io.write(" Bedrock Condition : " + SiteLog.toMultiple(self.bedrockCondition))
io.write(" Fracture Spacing : " + SiteLog.toMultiple(self.fractureSpacing))
io.write(" Fault zones nearby : " + self.faultZonesNearby + "\n")
io.write(" Distance/activity : " + SiteLog.toMultiple(self.distance_Activity))
io.write(" Additional Information : " + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def dd2dms(dd):
minutes, seconds = divmod(abs(dd) * 3600, 60)
degrees, minutes = divmod(minutes, 60)
return (degrees if dd >= 0 else -degrees, minutes, seconds)
class SiteLocation(object):
def __init__(self, siteLog):
siteLocation = siteLog.siteLocation.SiteLocation
self.city = SiteLog.simpleValue(siteLocation.city)
self.state = SiteLog.simpleValue(siteLocation.state)
self.countryCodeISO = SiteLog.complexValue(siteLocation.countryCodeISO)
self.tectonicPlate = SiteLog.complexValue(siteLocation.tectonicPlate)
try:
self.x = str(siteLocation.approximatePositionITRF.cartesianPosition.Point.pos.value()[0])
except:
self.x = ""
try:
self.y = str(siteLocation.approximatePositionITRF.cartesianPosition.Point.pos.value()[1])
except:
self.y = ""
try:
self.z = str(siteLocation.approximatePositionITRF.cartesianPosition.Point.pos.value()[2])
except:
self.z = ""
try:
latitude = siteLocation.approximatePositionITRF.geodeticPosition.Point.pos.value()[0]
degrees, minutes, seconds = dd2dms(latitude)
self.lat = '{:+03.0f}'.format(degrees) + '{:02.0f}'.format(minutes) + '{:05.2f}'.format(seconds)
except:
self.lat = ""
try:
longitude = siteLocation.approximatePositionITRF.geodeticPosition.Point.pos.value()[1]
degrees, minutes, seconds = dd2dms(longitude)
self.lng = '{:+04.0f}'.format(degrees) + '{:02.0f}'.format(minutes) + '{:05.2f}'.format(seconds)
except:
self.lng = ""
try:
self.hgt = '{:<7.1f}'.format(siteLocation.approximatePositionITRF.geodeticPosition.Point.pos.value()[2]).rstrip()
except:
self.hgt = ""
self.notes = SiteLog.simpleValue(siteLocation.notes)
def output(self):
io = StringIO()
io.write("2. Site Location Information\n")
io.write("\n")
io.write(" City or Town : " + self.city + "\n")
io.write(" State or Province : " + self.state + "\n")
io.write(" Country : " + SiteLog.country(self.countryCodeISO) + "\n")
io.write(" Tectonic Plate : " + SiteLog.toMultiple(self.tectonicPlate))
io.write(" Approximate Position (ITRF)" + "\n")
io.write(" X coordinate (m) : " + self.x + "\n")
io.write(" Y coordinate (m) : " + self.y + "\n")
io.write(" Z coordinate (m) : " + self.z + "\n")
io.write(" Latitude (N is +) : " + self.lat + "\n")
io.write(" Longitude (E is +) : " + self.lng + "\n")
io.write(" Elevation (m,ellips.) : " + self.hgt + "\n")
io.write(" Additional Information : " + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def isItemDeleted(item):
return item.dateDeleted is not None or item.deletedReason is not None
class GnssReceiverProperty(object):
Title = "3. GNSS Receiver Information\n"
Default = """3.x Receiver Type : (A20, from rcvr_ant.tab; see instructions)
Satellite System : (GPS+GLO+GAL+BDS+QZSS+SBAS)
Serial Number : (A20, but note the first A5 is used in SINEX)
Firmware Version : (A11)
Elevation Cutoff Setting : (deg)
Date Installed : (CCYY-MM-DDThh:mmZ)
Date Removed : (CCYY-MM-DDThh:mmZ)
Temperature Stabiliz. : (none or tolerance in degrees C)
Additional Information : (multiple lines)\n"""
def __init__(self, siteLog):
self.allReceivers = []
itemList = siteLog.gnssReceiver
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
gnssReceiver = self.GnssReceiver(item.GnssReceiver)
gnssReceiver.updateIndex(i)
self.allReceivers.append(gnssReceiver)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for receiver in self.allReceivers:
io.write(receiver.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class GnssReceiver(object):
ReceiverType = "3.x Receiver Type : "
Satellite = " Satellite System : "
SerialNumber = " Serial Number : "
Firmware = " Firmware Version : "
Cutoff = " Elevation Cutoff Setting : "
Installed = " Date Installed : "
Removed = " Date Removed : "
Stabilizer = " Temperature Stabiliz. : "
Additional = " Additional Information : "
def __init__(self, receiver):
self.receiverModel = SiteLog.complexValue(receiver.igsModelCode)
gnss = ""
for system in receiver.satelliteSystem:
satellite = SiteLog.complexValue(system)
if gnss:
gnss += "+" + str(satellite)
else:
gnss += str(satellite)
self.satelliteSystem = gnss
self.manufacturerSerialNumber = SiteLog.simpleValue(receiver.manufacturerSerialNumber)
self.firmwareVersion = SiteLog.simpleValue(receiver.firmwareVersion)
self.elevationCutoffSetting = SiteLog.simpleValue(receiver.elevationCutoffSetting, "{:.0f} deg")
self.dateInstalled = SiteLog.dateTime(receiver.dateInstalled)
self.dateRemoved = SiteLog.dateTime(receiver.dateRemoved)
stabilizer = SiteLog.simpleValue(receiver.temperatureStabilization)
self.temperatureStabilization = stabilizer or "none"
self.notes = SiteLog.simpleValue(receiver.notes)
self.typeOfReceiver = type(self).ReceiverType
def output(self):
io = StringIO()
io.write(self.typeOfReceiver + self.receiverModel + "\n")
io.write(type(self).Satellite + self.satelliteSystem + "\n")
io.write(type(self).SerialNumber + self.manufacturerSerialNumber + "\n")
io.write(type(self).Firmware + self.firmwareVersion + "\n")
io.write(type(self).Cutoff + self.elevationCutoffSetting + "\n")
io.write(type(self).Installed + self.dateInstalled + "\n")
io.write(type(self).Removed + self.dateRemoved + "\n")
io.write(type(self).Stabilizer + self.temperatureStabilization + "\n")
io.write(type(self).Additional + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "3." + str(index)
self.typeOfReceiver = text + self.typeOfReceiver[len(text):]
class GnssAntennaProperty(object):
Title = "4. GNSS Antenna Information\n"
Default = """4.x Antenna Type : (A20, from rcvr_ant.tab; see instructions)
Serial Number : (A*, but note the first A5 is used in SINEX)
Antenna Reference Point : (BPA/BCR/XXX from "antenna.gra"; see instr.)
Marker->ARP Up Ecc. (m) : (F8.4)
Marker->ARP North Ecc(m) : (F8.4)
Marker->ARP East Ecc(m) : (F8.4)
Alignment from True N : (deg; + is clockwise/east)
Antenna Radome Type : (A4 from rcvr_ant.tab; see instructions)
Radome Serial Number :
Antenna Cable Type : (vendor & type number)
Antenna Cable Length : (m)
Date Installed : (CCYY-MM-DDThh:mmZ)
Date Removed : (CCYY-MM-DDThh:mmZ)
Additional Information : (multiple lines)\n"""
def __init__(self, siteLog):
self.allAntennas = []
itemList = siteLog.gnssAntenna
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
gnssAntenna = self.GnssAntenna(item.GnssAntenna)
gnssAntenna.updateIndex(i)
self.allAntennas.append(gnssAntenna)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for antenna in self.allAntennas:
io.write(antenna.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class GnssAntenna(object):
AntennaType = "4.x Antenna Type : "
SerialNumber = " Serial Number : "
ReferencePoint = " Antenna Reference Point : "
Up = " Marker->ARP Up Ecc. (m) : "
North = " Marker->ARP North Ecc(m) : "
East = " Marker->ARP East Ecc(m) : "
Alignment = " Alignment from True N : "
RadomeType = " Antenna Radome Type : "
RadomeSerialNumber = " Radome Serial Number : "
CableType = " Antenna Cable Type : "
CableLength = " Antenna Cable Length : "
Installed = " Date Installed : "
Removed = " Date Removed : "
Additional = " Additional Information : "
def __init__(self, antenna):
antennaAndDomeTypes = SiteLog.complexValue(antenna.igsModelCode).split()
if len(antennaAndDomeTypes) > 0:
antennaType = antennaAndDomeTypes[0]
else:
antennaType = ''
if len(antennaAndDomeTypes) > 1:
domeType = antennaAndDomeTypes[1]
else:
domeType = SiteLog.complexValue(antenna.antennaRadomeType)
if not domeType:
domeType = 'NONE'
self.antennaModel = '{0: <16}{1: <4}'.format(antennaType, domeType)
self.manufacturerSerialNumber = SiteLog.simpleValue(antenna.manufacturerSerialNumber)
self.antennaReferencePoint = SiteLog.complexValue(antenna.antennaReferencePoint)
self.marker_arpUpEcc = SiteLog.simpleValue(antenna.marker_arpUpEcc, "{:08.4f}")
self.marker_arpNorthEcc = SiteLog.simpleValue(antenna.marker_arpNorthEcc, "{:08.4f}")
self.marker_arpEastEcc = SiteLog.simpleValue(antenna.marker_arpEastEcc, "{:08.4f}")
trueNorth = SiteLog.simpleValue(antenna.alignmentFromTrueNorth, "{:+.0f} deg")
self.alignmentFromTrueNorth = "0 deg" if trueNorth == "+0 deg" else trueNorth
self.antennaRadomeType = SiteLog.complexValue(antenna.antennaRadomeType)
self.radomeSerialNumber = SiteLog.simpleValue(antenna.radomeSerialNumber)
self.antennaCableType = SiteLog.simpleValue(antenna.antennaCableType)
self.antennaCableLength = SiteLog.simpleValue(antenna.antennaCableLength, "{} m")
self.dateInstalled = SiteLog.dateTime(antenna.dateInstalled)
self.dateRemoved = SiteLog.dateTime(antenna.dateRemoved)
self.notes = SiteLog.simpleValue(antenna.notes)
self.typeOfAntenna = type(self).AntennaType
def output(self):
io = StringIO()
io.write(self.typeOfAntenna + self.antennaModel + "\n")
io.write(type(self).SerialNumber + self.manufacturerSerialNumber + "\n")
io.write(type(self).ReferencePoint + self.antennaReferencePoint + "\n")
io.write(type(self).Up + self.marker_arpUpEcc + "\n")
io.write(type(self).North + self.marker_arpNorthEcc + "\n")
io.write(type(self).East + self.marker_arpEastEcc + "\n")
io.write(type(self).Alignment + self.alignmentFromTrueNorth + "\n")
io.write(type(self).RadomeType + self.antennaRadomeType + "\n")
io.write(type(self).RadomeSerialNumber + self.radomeSerialNumber + "\n")
io.write(type(self).CableType + self.antennaCableType + "\n")
io.write(type(self).CableLength + self.antennaCableLength + "\n")
io.write(type(self).Installed + self.dateInstalled + "\n")
io.write(type(self).Removed + self.dateRemoved + "\n")
io.write(type(self).Additional + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "4." + str(index)
self.typeOfAntenna = text + self.typeOfAntenna[len(text):]
class SurveyedLocalTieProperty(object):
Title = "5. Surveyed Local Ties\n"
Default = """5.x Tied Marker Name :
Tied Marker Usage : (SLR/VLBI/LOCAL CONTROL/FOOTPRINT/etc)
Tied Marker CDP Number : (A4)
Tied Marker DOMES Number : (A9)
Differential Components from GNSS Marker to the tied monument (ITRS)
dx (m) : (m)
dy (m) : (m)
dz (m) : (m)
Accuracy (mm) : (mm)
Survey method : (GPS CAMPAIGN/TRILATERATION/TRIANGULATION/etc)
Date Measured : (CCYY-MM-DDThh:mmZ)
Additional Information : (multiple lines)\n"""
def __init__(self, siteLog):
self.allLocalTies = []
itemList = siteLog.surveyedLocalTie
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
localTie = self.SurveyedLocalTie(item.SurveyedLocalTie)
localTie.updateIndex(i)
self.allLocalTies.append(localTie)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for localTie in self.allLocalTies:
io.write(localTie.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class SurveyedLocalTie(object):
MarkerName = "5.x Tied Marker Name : "
MarkerUsage = " Tied Marker Usage : "
CDPNumber = " Tied Marker CDP Number : "
DOMESNumber = " Tied Marker DOMES Number : "
ITRS = " Differential Components from GNSS Marker to the tied monument (ITRS)"
Dx = " dx (m) : "
Dy = " dy (m) : "
Dz = " dz (m) : "
Accuracy = " Accuracy (mm) : "
SurveyMethod = " Survey method : "
DateMeasured = " Date Measured : "
Additional = " Additional Information : "
def __init__(self, localTie):
self.tiedMarkerName = SiteLog.simpleValue(localTie.tiedMarkerName)
self.tiedMarkerUsage = SiteLog.simpleValue(localTie.tiedMarkerUsage)
self.tiedMarkerCDPNumber = SiteLog.simpleValue(localTie.tiedMarkerCDPNumber)
self.tiedMarkerDOMESNumber = SiteLog.simpleValue(localTie.tiedMarkerDOMESNumber)
try:
self.dx = str(SiteLog.simpleValue(localTie.differentialComponentsGNSSMarkerToTiedMonumentITRS.dx))
except:
self.dx = ""
try:
self.dy = str(SiteLog.simpleValue(localTie.differentialComponentsGNSSMarkerToTiedMonumentITRS.dy))
except:
self.dy = ""
try:
self.dz = str(SiteLog.simpleValue(localTie.differentialComponentsGNSSMarkerToTiedMonumentITRS.dz))
except:
self.dz = ""
self.localSiteTiesAccuracy = SiteLog.simpleValue(localTie.localSiteTiesAccuracy)
self.surveyMethod = SiteLog.simpleValue(localTie.surveyMethod)
self.dateMeasured = SiteLog.dateTime(localTie.dateMeasured)
self.notes = SiteLog.simpleValue(localTie.notes)
self.nameOfMarker = type(self).MarkerName
def output(self):
io = StringIO()
io.write(self.nameOfMarker + self.tiedMarkerName + "\n")
io.write(type(self).MarkerUsage + SiteLog.toMultiple(self.tiedMarkerUsage))
io.write(type(self).CDPNumber + self.tiedMarkerCDPNumber + "\n")
io.write(type(self).DOMESNumber + self.tiedMarkerDOMESNumber + "\n")
io.write(type(self).ITRS + "\n")
io.write(type(self).Dx + self.dx + "\n")
io.write(type(self).Dy + self.dy + "\n")
io.write(type(self).Dz + self.dz + "\n")
io.write(type(self).Accuracy + self.localSiteTiesAccuracy + "\n")
io.write(type(self).SurveyMethod + SiteLog.toMultiple(self.surveyMethod))
io.write(type(self).DateMeasured + self.dateMeasured + "\n")
io.write(type(self).Additional + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "5." + str(index)
self.nameOfMarker = text + self.nameOfMarker[len(text):]
class FrequencyStandardProperty(object):
Title = "6. Frequency Standard\n"
Default = """6.x Standard Type : (INTERNAL or EXTERNAL H-MASER/CESIUM/etc)
Input Frequency : (if external)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Notes : (multiple lines)\n"""
def __init__(self, siteLog):
self.allFrequencyStandards = []
itemList = siteLog.frequencyStandard
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
frequencyStandard = self.FrequencyStandard(item.FrequencyStandard)
frequencyStandard.updateIndex(i)
self.allFrequencyStandards.append(frequencyStandard)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for frequencyStandard in self.allFrequencyStandards:
io.write(frequencyStandard.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class FrequencyStandard(object):
StandardType = "6.x Standard Type : "
InputFrequency = " Input Frequency : "
EffectiveDates = " Effective Dates : "
Notes = " Notes : "
def __init__(self, frequencyStandard):
self.standardType = SiteLog.complexValue(frequencyStandard.standardType)
frequency = SiteLog.simpleValue(frequencyStandard.inputFrequency)
self.inputFrequency = SiteLog.simpleValue(frequencyStandard.inputFrequency, "{:.0f} MHz")
try:
begin = SiteLog.date(frequencyStandard.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(frequencyStandard.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(frequencyStandard.notes)
self.typeOfFrequencyStandard = type(self).StandardType
def output(self):
io = StringIO()
io.write(self.typeOfFrequencyStandard + self.standardType + "\n")
io.write(type(self).InputFrequency + self.inputFrequency + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Notes + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "6." + str(index)
self.typeOfFrequencyStandard = text + self.typeOfFrequencyStandard[len(text):]
class CollocationInformationProperty(object):
Title = "7. Collocation Information\n"
Default = """7.x Instrumentation Type : (GPS/GLONASS/DORIS/PRARE/SLR/VLBI/TIME/etc)
Status : (PERMANENT/MOBILE)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Notes : (multiple lines)\n"""
def __init__(self, siteLog):
self.allCollocationInformations = []
itemList = siteLog.collocationInformation
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
collocationInformation = self.CollocationInformation(item.CollocationInformation)
collocationInformation.updateIndex(i)
self.allCollocationInformations.append(collocationInformation)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for collocationInformation in self.allCollocationInformations:
io.write(collocationInformation.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class CollocationInformation(object):
Instrumentation = "7.x Instrumentation Type : "
Status = " Status : "
EffectiveDates = " Effective Dates : "
Notes = " Notes : "
def __init__(self, collocationInformation):
self.instrumentationType = SiteLog.complexValue(collocationInformation.instrumentationType)
self.status = SiteLog.complexValue(collocationInformation.status)
try:
begin = SiteLog.date(collocationInformation.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(collocationInformation.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(collocationInformation.notes)
self.typeOfCollocationInformation = type(self).Instrumentation
def output(self):
io = StringIO()
io.write(self.typeOfCollocationInformation + self.instrumentationType + "\n")
io.write(type(self).Status + self.status + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Notes + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "7." + str(index)
self.typeOfCollocationInformation = text + self.typeOfCollocationInformation[len(text):]
class HumiditySensorProperty(object):
Title = "8. Meteorological Instrumentation\n"
Default = """8.1.x Humidity Sensor Model :
Manufacturer :
Serial Number :
Data Sampling Interval : (sec)
Accuracy (% rel h) : (% rel h)
Aspiration : (UNASPIRATED/NATURAL/FAN/etc)
Height Diff to Ant : (m)
Calibration date : (CCYY-MM-DD)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Notes : (multiple lines)\n"""
def __init__(self, siteLog):
self.allHumiditySensors = []
itemList = siteLog.humiditySensor
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
humiditySensor = self.HumiditySensor(item.HumiditySensor)
humiditySensor.updateIndex(i)
self.allHumiditySensors.append(humiditySensor)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for humiditySensor in self.allHumiditySensors:
io.write(humiditySensor.output())
io.write(type(self).Default)
text = io.getvalue()
io.close()
return text
class HumiditySensor(object):
SensorModel = "8.1.x Humidity Sensor Model : "
Manufacturer = " Manufacturer : "
SerialNumber = " Serial Number : "
SamplingInterval = " Data Sampling Interval : "
Accuracy = " Accuracy (% rel h) : "
Aspiration = " Aspiration : "
Diff = " Height Diff to Ant : "
CalibrationDate = " Calibration date : "
EffectiveDates = " Effective Dates : "
Notes = " Notes : "
def __init__(self, humiditySensor):
self.type = SiteLog.complexValue(humiditySensor.type)
self.manufacturer = SiteLog.simpleValue(humiditySensor.manufacturer)
self.serialNumber = SiteLog.simpleValue(humiditySensor.serialNumber)
self.dataSamplingInterval = SiteLog.simpleValue(humiditySensor.dataSamplingInterval, "{:.0f} sec")
self.accuracy_percentRelativeHumidity = SiteLog.simpleValue(humiditySensor.accuracy_percentRelativeHumidity, "{:.2f}")
self.aspiration = SiteLog.simpleValue(humiditySensor.aspiration)
self.heightDiffToAntenna = SiteLog.simpleValue(humiditySensor.heightDiffToAntenna, "{} m")
self.calibrationDate = SiteLog.date(humiditySensor.calibrationDate)
try:
begin = SiteLog.date(humiditySensor.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(humiditySensor.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(humiditySensor.notes)
self.modelOfHumiditySensor = type(self).SensorModel
def output(self):
io = StringIO()
io.write(self.modelOfHumiditySensor + self.type + "\n")
io.write(type(self).Manufacturer + self.manufacturer + "\n")
io.write(type(self).SerialNumber + self.serialNumber + "\n")
io.write(type(self).SamplingInterval + self.dataSamplingInterval + "\n")
io.write(type(self).Accuracy + self.accuracy_percentRelativeHumidity + "\n")
io.write(type(self).Aspiration + self.aspiration + "\n")
io.write(type(self).Diff + self.heightDiffToAntenna + "\n")
io.write(type(self).CalibrationDate + self.calibrationDate + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Notes + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "8.1." + str(index)
self.modelOfHumiditySensor = text + self.modelOfHumiditySensor[len(text):]
class PressureSensorProperty(object):
Title = ""
Default = """8.2.x Pressure Sensor Model :
Manufacturer :
Serial Number :
Data Sampling Interval : (sec)
Accuracy : (hPa)
Height Diff to Ant : (m)
Calibration date : (CCYY-MM-DD)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Notes : (multiple lines)\n"""
def __init__(self, siteLog):
self.allPressureSensors = []
itemList = siteLog.pressureSensor
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
pressureSensor = self.PressureSensor(item.PressureSensor)
pressureSensor.updateIndex(i)
self.allPressureSensors.append(pressureSensor)
i += 1
def output(self):
io = StringIO()
for pressureSensor in self.allPressureSensors:
io.write(pressureSensor.output())
io.write(type(self).Default)
text = io.getvalue()
io.close()
return text
class PressureSensor(object):
SensorModel = "8.2.x Pressure Sensor Model : "
Manufacturer = " Manufacturer : "
SerialNumber = " Serial Number : "
SamplingInterval = " Data Sampling Interval : "
Accuracy = " Accuracy : "
Aspiration = " Aspiration : "
Diff = " Height Diff to Ant : "
CalibrationDate = " Calibration date : "
EffectiveDates = " Effective Dates : "
Notes = " Notes : "
def __init__(self, pressureSensor):
self.type = SiteLog.complexValue(pressureSensor.type)
self.manufacturer = SiteLog.simpleValue(pressureSensor.manufacturer)
self.serialNumber = SiteLog.simpleValue(pressureSensor.serialNumber)
self.dataSamplingInterval = SiteLog.simpleValue(pressureSensor.dataSamplingInterval, "{:.0f} sec")
self.accuracy_hPa = SiteLog.simpleValue(pressureSensor.accuracy_hPa, "{:.2f} hPa")
self.heightDiffToAntenna = SiteLog.simpleValue(pressureSensor.heightDiffToAntenna, "{} m")
self.calibrationDate = SiteLog.date(pressureSensor.calibrationDate)
try:
begin = SiteLog.date(pressureSensor.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(pressureSensor.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(pressureSensor.notes)
self.modelOfPressureSensor = type(self).SensorModel
def output(self):
io = StringIO()
io.write(self.modelOfPressureSensor + self.type + "\n")
io.write(type(self).Manufacturer + self.manufacturer + "\n")
io.write(type(self).SerialNumber + self.serialNumber + "\n")
io.write(type(self).SamplingInterval + self.dataSamplingInterval + "\n")
io.write(type(self).Accuracy + self.accuracy_hPa + "\n")
io.write(type(self).Diff + self.heightDiffToAntenna + "\n")
io.write(type(self).CalibrationDate + self.calibrationDate + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Notes + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "8.2." + str(index)
self.modelOfPressureSensor = text + self.modelOfPressureSensor[len(text):]
class TemperatureSensorProperty(object):
Title = ""
Default = """8.3.x Temp. Sensor Model :
Manufacturer :
Serial Number :
Data Sampling Interval : (sec)
Accuracy : (deg C)
Aspiration : (UNASPIRATED/NATURAL/FAN/etc)
Height Diff to Ant : (m)
Calibration date : (CCYY-MM-DD)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Notes : (multiple lines)\n"""
def __init__(self, siteLog):
self.allTemperatureSensors = []
itemList = siteLog.temperatureSensor
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
temperatureSensor = self.TemperatureSensor(item.TemperatureSensor)
temperatureSensor.updateIndex(i)
self.allTemperatureSensors.append(temperatureSensor)
i += 1
def output(self):
io = StringIO()
for temperatureSensor in self.allTemperatureSensors:
io.write(temperatureSensor.output())
io.write(type(self).Default)
text = io.getvalue()
io.close()
return text
class TemperatureSensor(object):
SensorModel = "8.3.x Temp. Sensor Model : "
Manufacturer = " Manufacturer : "
SerialNumber = " Serial Number : "
SamplingInterval = " Data Sampling Interval : "
Accuracy = " Accuracy : "
Aspiration = " Aspiration : "
Diff = " Height Diff to Ant : "
CalibrationDate = " Calibration date : "
EffectiveDates = " Effective Dates : "
Notes = " Notes : "
def __init__(self, temperatureSensor):
self.type = SiteLog.complexValue(temperatureSensor.type)
self.manufacturer = SiteLog.simpleValue(temperatureSensor.manufacturer)
self.serialNumber = SiteLog.simpleValue(temperatureSensor.serialNumber)
self.dataSamplingInterval = SiteLog.simpleValue(temperatureSensor.dataSamplingInterval, "{:.0f} sec")
self.accuracy_degreesCelcius = SiteLog.simpleValue(temperatureSensor.accuracy_degreesCelcius, "{:.2f} deg")
self.aspiration = SiteLog.simpleValue(temperatureSensor.aspiration)
self.heightDiffToAntenna = SiteLog.simpleValue(temperatureSensor.heightDiffToAntenna, "{} m")
self.calibrationDate = SiteLog.date(temperatureSensor.calibrationDate)
try:
begin = SiteLog.date(temperatureSensor.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(temperatureSensor.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(temperatureSensor.notes)
self.modelOfTemperatureSensor = type(self).SensorModel
def output(self):
io = StringIO()
io.write(self.modelOfTemperatureSensor + self.type + "\n")
io.write(type(self).Manufacturer + self.manufacturer + "\n")
io.write(type(self).SerialNumber + self.serialNumber + "\n")
io.write(type(self).SamplingInterval + self.dataSamplingInterval + "\n")
io.write(type(self).Accuracy + self.accuracy_degreesCelcius + "\n")
io.write(type(self).Aspiration + self.aspiration + "\n")
io.write(type(self).Diff + self.heightDiffToAntenna + "\n")
io.write(type(self).CalibrationDate + self.calibrationDate + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Notes + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "8.3." + str(index)
self.modelOfTemperatureSensor = text + self.modelOfTemperatureSensor[len(text):]
class WaterVaporSensorProperty(object):
Title = ""
Default = """8.4.x Water Vapor Radiometer :
Manufacturer :
Serial Number :
Distance to Antenna : (m)
Height Diff to Ant : (m)
Calibration date : (CCYY-MM-DD)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Notes : (multiple lines)\n"""
def __init__(self, siteLog):
self.allWaterVaporSensors = []
itemList = siteLog.waterVaporSensor
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
waterVaporSensor = self.WaterVaporSensor(item.WaterVaporSensor)
waterVaporSensor.updateIndex(i)
self.allWaterVaporSensors.append(waterVaporSensor)
i += 1
def output(self):
io = StringIO()
for waterVaporSensor in self.allWaterVaporSensors:
io.write(waterVaporSensor.output())
io.write(type(self).Default)
text = io.getvalue()
io.close()
return text
class WaterVaporSensor(object):
SensorModel = "8.4.x Water Vapor Radiometer : "
Manufacturer = " Manufacturer : "
SerialNumber = " Serial Number : "
Distance = " Distance to Antenna : "
Diff = " Height Diff to Ant : "
CalibrationDate = " Calibration date : "
EffectiveDates = " Effective Dates : "
Notes = " Notes : "
def __init__(self, waterVaporSensor):
self.type = SiteLog.complexValue(waterVaporSensor.type)
self.manufacturer = SiteLog.simpleValue(waterVaporSensor.manufacturer)
self.serialNumber = SiteLog.simpleValue(waterVaporSensor.serialNumber)
self.distanceToAntenna = SiteLog.simpleValue(waterVaporSensor.distanceToAntenna, "{} m")
self.heightDiffToAntenna = SiteLog.simpleValue(waterVaporSensor.heightDiffToAntenna, "{} m")
self.calibrationDate = SiteLog.date(waterVaporSensor.calibrationDate)
try:
begin = SiteLog.date(waterVaporSensor.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(waterVaporSensor.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(waterVaporSensor.notes)
self.modelOfWaterVaporSensor = type(self).SensorModel
def output(self):
io = StringIO()
io.write(self.modelOfWaterVaporSensor + self.type + "\n")
io.write(type(self).Manufacturer + self.manufacturer + "\n")
io.write(type(self).SerialNumber + self.serialNumber + "\n")
io.write(type(self).Distance + self. distanceToAntenna + "\n")
io.write(type(self).Diff + self.heightDiffToAntenna + "\n")
io.write(type(self).CalibrationDate + self.calibrationDate + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Notes + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "8.4." + str(index)
self.modelOfWaterVaporSensor = text + self.modelOfWaterVaporSensor[len(text):]
class OtherInstrumentationProperty(object):
Title = ""
Default = """8.5.x Other Instrumentation : (multiple lines)\n"""
def __init__(self, siteLog):
self.allOtherInstrumentations = []
itemList = siteLog.otherInstrumentation
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
otherInstrumentation = self.OtherInstrumentation(item.OtherInstrumentation)
otherInstrumentation.updateIndex(i)
self.allOtherInstrumentations.append(otherInstrumentation)
i += 1
def output(self):
io = StringIO()
for otherInstrumentation in self.allOtherInstrumentations:
io.write(otherInstrumentation.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class OtherInstrumentation(object):
Instrumentation = "8.5.x Other Instrumentation : "
def __init__(self, otherInstrumentation):
self.instrumentation = SiteLog.simpleValue(otherInstrumentation.instrumentation)
self.textOfInstrumentation = type(self).Instrumentation
def output(self):
io = StringIO()
io.write(self.textOfInstrumentation + SiteLog.toMultiple(self.instrumentation))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "8.5." + str(index)
self.textOfInstrumentation = text + self.textOfInstrumentation[len(text):]
class RadioInterferenceProperty(object):
Title = "9. Local Ongoing Conditions Possibly Affecting Computed Position\n"
Default = """9.1.x Radio Interferences : (TV/CELL PHONE ANTENNA/RADAR/etc)
Observed Degradations : (SN RATIO/DATA GAPS/etc)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Additional Information : (multiple lines)\n"""
def __init__(self, siteLog):
self.allRadioInterferences = []
itemList = siteLog.radioInterference
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
radioInterference = self.RadioInterference(item.RadioInterference)
radioInterference.updateIndex(i)
self.allRadioInterferences.append(radioInterference)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for radioInterference in self.allRadioInterferences:
io.write(radioInterference.output())
io.write(type(self).Default)
text = io.getvalue()
io.close()
return text
class RadioInterference(object):
ProblemSource = "9.1.x Radio Interferences : "
Degradation = " Observed Degradations : "
EffectiveDates = " Effective Dates : "
Additional = " Additional Information : "
def __init__(self, radioInterference):
self.possibleProblemSource = SiteLog.simpleValue(radioInterference.possibleProblemSource)
self.observedDegradation = SiteLog.simpleValue(radioInterference.observedDegradation)
try:
begin = SiteLog.date(radioInterference.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(radioInterference.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(radioInterference.notes)
self.sourceOfRadioInterference = type(self).ProblemSource
def output(self):
io = StringIO()
io.write(self.sourceOfRadioInterference + self.possibleProblemSource + "\n")
io.write(type(self).Degradation + self.observedDegradation + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Additional + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "9.1." + str(index)
self.sourceOfRadioInterference = text + self.sourceOfRadioInterference[len(text):]
class MultipathSourceProperty(object):
Title = ""
Default = """9.2.x Multipath Sources : (METAL ROOF/DOME/VLBI ANTENNA/etc)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Additional Information : (multiple lines)\n"""
def __init__(self, siteLog):
self.allMultipathSources = []
itemList = siteLog.multipathSource
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
multipathSource = self.MultipathSource(item.MultipathSource)
multipathSource.updateIndex(i)
self.allMultipathSources.append(multipathSource)
i += 1
def output(self):
io = StringIO()
for multipathSource in self.allMultipathSources:
io.write(multipathSource.output())
io.write(type(self).Default)
text = io.getvalue()
io.close()
return text
class MultipathSource(object):
ProblemSource = "9.2.x Multipath Sources : "
EffectiveDates = " Effective Dates : "
Additional = " Additional Information : "
def __init__(self, multipathSource):
self.possibleProblemSource = SiteLog.simpleValue(multipathSource.possibleProblemSource)
try:
begin = SiteLog.date(multipathSource.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(multipathSource.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(multipathSource.notes)
self.sourceOfMultipathSource = type(self).ProblemSource
def output(self):
io = StringIO()
io.write(self.sourceOfMultipathSource + self.possibleProblemSource + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Additional + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "9.2." + str(index)
self.sourceOfMultipathSource = text + self.sourceOfMultipathSource[len(text):]
class SignalObstructionProperty(object):
Title = ""
Default = """9.3.x Signal Obstructions : (TREES/BUILDINGS/etc)
Effective Dates : (CCYY-MM-DD/CCYY-MM-DD)
Additional Information : (multiple lines)\n"""
def __init__(self, siteLog):
self.allSignalObstructions = []
itemList = siteLog.signalObstruction
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
signalObstruction = self.SignalObstruction(item.SignalObstruction)
signalObstruction.updateIndex(i)
self.allSignalObstructions.append(signalObstruction)
i += 1
def output(self):
io = StringIO()
for signalObstruction in self.allSignalObstructions:
io.write(signalObstruction.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class SignalObstruction(object):
ProblemSource = "9.3.x Signal Obstructions : "
EffectiveDates = " Effective Dates : "
Additional = " Additional Information : "
def __init__(self, signalObstruction):
self.possibleProblemSource = SiteLog.simpleValue(signalObstruction.possibleProblemSource)
try:
begin = SiteLog.date(signalObstruction.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(signalObstruction.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.notes = SiteLog.simpleValue(signalObstruction.notes)
self.sourceOfSignalObstruction = type(self).ProblemSource
def output(self):
io = StringIO()
io.write(self.sourceOfSignalObstruction + self.possibleProblemSource + "\n")
io.write(type(self).EffectiveDates + self.validTime + "\n")
io.write(type(self).Additional + SiteLog.toMultiple(self.notes))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "9.3." + str(index)
self.sourceOfSignalObstruction = text + self.sourceOfSignalObstruction[len(text):]
class LocalEpisodicEffectProperty(object):
Title = "10. Local Episodic Effects Possibly Affecting Data Quality\n"
Default = """10.x Date : (CCYY-MM-DD/CCYY-MM-DD)
Event : (TREE CLEARING/CONSTRUCTION/etc)\n"""
def __init__(self, siteLog):
self.allLocalEpisodicEffects = []
itemList = siteLog.localEpisodicEffect
if itemList:
i = 1
for item in itemList:
if isItemDeleted(item):
continue
localEpisodicEffect = self.LocalEpisodicEffect(item.LocalEpisodicEffect)
localEpisodicEffect.updateIndex(i)
self.allLocalEpisodicEffects.append(localEpisodicEffect)
i += 1
def output(self):
io = StringIO()
io.write(type(self).Title)
io.write("\n")
for localEpisodicEffect in self.allLocalEpisodicEffects:
io.write(localEpisodicEffect.output())
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
class LocalEpisodicEffect(object):
Date = "10.x Date : "
Event = " Event : "
def __init__(self, localEpisodicEffect):
try:
begin = SiteLog.date(localEpisodicEffect.validTime.AbstractTimePrimitive.beginPosition)
if not str(begin):
begin = "CCYY-MM-DD"
except:
begin = "CCYY-MM-DD"
try:
end = SiteLog.date(localEpisodicEffect.validTime.AbstractTimePrimitive.endPosition)
if not str(end):
end = "CCYY-MM-DD"
except:
end = "CCYY-MM-DD"
self.validTime = begin + "/" + end
self.event = SiteLog.simpleValue(localEpisodicEffect.event)
self.dateOfLocalEpisodicEffect = type(self).Date
def output(self):
io = StringIO()
io.write(self.dateOfLocalEpisodicEffect + self.validTime + "\n")
io.write(type(self).Event + SiteLog.toMultiple(self.event))
io.write("\n")
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
text = "10." + str(index)
self.dateOfLocalEpisodicEffect = text + self.dateOfLocalEpisodicEffect[len(text):]
class AgencyProperty(object):
# For siteContact mapping only
Agency = " Agency : "
Abbreviation = " Preferred Abbreviation : \n"
MailingAddress = " Mailing Address : "
Additional = " Additional Information : (multiple lines)\n"
Pattern = re.compile(r'agencyPropertyType', re.IGNORECASE)
def __init__(self, agencyProperty, title):
self.allCI_ResponsibleParties = []
self.title = title + '\n'
if not agencyProperty:
for i in range(0, 2):
responsibleParty = self.CI_ResponsibleParty(None)
responsibleParty.updateIndex(i)
self.allCI_ResponsibleParties.append(responsibleParty)
return
if re.match(type(self).Pattern, type(agencyProperty).__name__):
# For siteOwner or siteMetadataCustodian mapping
responsibleParty = self.CI_ResponsibleParty(agencyProperty.CI_ResponsibleParty)
responsibleParty.updateIndex(0)
self.allCI_ResponsibleParties.append(responsibleParty)
responsibleParty = self.CI_ResponsibleParty(None)
responsibleParty.updateIndex(1)
self.allCI_ResponsibleParties.append(responsibleParty)
return
#For siteContact mapping
itemList = agencyProperty
if itemList:
count = len(itemList)
if count == 0:
for i in range(0, 2):
responsibleParty = self.CI_ResponsibleParty(None)
responsibleParty.updateIndex(i)
self.allCI_ResponsibleParties.append(responsibleParty)
elif count == 1:
responsibleParty = self.CI_ResponsibleParty(itemList[0].CI_ResponsibleParty)
responsibleParty.updateIndex(0)
self.allCI_ResponsibleParties.append(responsibleParty)
responsibleParty = self.CI_ResponsibleParty(None)
responsibleParty.updateIndex(1)
self.allCI_ResponsibleParties.append(responsibleParty)
else:
i = 0
for item in itemList:
responsibleParty = self.CI_ResponsibleParty(item.CI_ResponsibleParty)
responsibleParty.updateIndex(i)
self.allCI_ResponsibleParties.append(responsibleParty)
i += 1
else:
for i in range(0, 2):
responsibleParty = self.CI_ResponsibleParty(None)
responsibleParty.updateIndex(i)
self.allCI_ResponsibleParties.append(responsibleParty)
def output(self):
io = StringIO()
io.write(self.title)
io.write("\n")
io.write(type(self).Agency + SiteLog.toMultiple(self.allCI_ResponsibleParties[0].organisationName))
io.write(type(self).Abbreviation)
size = len(self.allCI_ResponsibleParties[0].deliveryPoint)
if size == 0:
io.write(type(self).MailingAddress + "\n")
else:
io.write(type(self).MailingAddress + self.allCI_ResponsibleParties[0].deliveryPoint[0] + "\n")
for z in range(size-1):
io.write(" " + self.allCI_ResponsibleParties[0].deliveryPoint[z+1] + "\n")
if self.allCI_ResponsibleParties[0].city:
io.write(" " + self.allCI_ResponsibleParties[0].city + "\n")
if self.allCI_ResponsibleParties[0].postalCode:
io.write(" " + self.allCI_ResponsibleParties[0].postalCode + "\n")
if self.allCI_ResponsibleParties[0].country:
io.write(" " + self.allCI_ResponsibleParties[0].country + "\n")
for responsibleParty in self.allCI_ResponsibleParties:
io.write(responsibleParty.output())
io.write(type(self).Additional)
io.write("\n")
text = io.getvalue()
io.close()
return text
class CI_ResponsibleParty(object):
Chapters = [" Primary Contact\n",
" Secondary Contact\n"]
ContactName = " Contact Name : "
PrimaryPhone = " Telephone (primary) : "
SecondPhone = " Telephone (secondary) : "
Fax = " Fax : "
Email = " E-mail : "
def __init__(self, responsibleParty):
if not responsibleParty:
self.individualName = ""
self.organisationName = ""
self.deliveryPoint = []
self.city = ""
self.postalCode = ""
self.country = ""
self.electronicMailAddress = ""
self.primaryVoice = ""
self.secondVoice = ""
self.facsimile = ""
self.chapter = ""
return
self.individualName = SiteLog.simpleValue(responsibleParty.individualName.CharacterString)
self.organisationName = SiteLog.simpleValue(responsibleParty.organisationName.CharacterString)
try:
self.deliveryPoint = []
for deliveryPoint in responsibleParty.contactInfo.CI_Contact.address.CI_Address.deliveryPoint:
self.deliveryPoint.append(SiteLog.simpleValue(deliveryPoint.CharacterString))
except:
self.deliveryPoint = []
try:
self.city = SiteLog.simpleValue(responsibleParty.contactInfo.CI_Contact.address.CI_Address.city.CharacterString)
except:
self.city = ""
try:
self.postalCode = SiteLog.simpleValue(responsibleParty.contactInfo.CI_Contact.address.CI_Address.postalCode.CharacterString)
except:
self.postalCode = ""
try:
self.country = SiteLog.simpleValue(responsibleParty.contactInfo.CI_Contact.address.CI_Address.country.CharacterString)
except:
self.country = ""
try:
self.electronicMailAddress = SiteLog.simpleValue(responsibleParty.contactInfo.CI_Contact.address.CI_Address.electronicMailAddress[0].CharacterString)
except:
self.electronicMailAddress = ""
try:
self.primaryVoice = SiteLog.simpleValue(responsibleParty.contactInfo.CI_Contact.phone.CI_Telephone.voice[0].CharacterString)
except:
self.primaryVoice = ""
try:
self.secondVoice = SiteLog.simpleValue(responsibleParty.contactInfo.CI_Contact.phone.CI_Telephone.voice[1].CharacterString)
except:
self.secondVoice = ""
try:
self.facsimile = SiteLog.simpleValue(responsibleParty.contactInfo.CI_Contact.phone.CI_Telephone.facsimile[0].CharacterString)
except:
self.facsimile = ""
self.chapter = ""
def output(self):
io = StringIO()
io.write(self.chapter)
io.write(type(self).ContactName + SiteLog.toMultiple(self.individualName))
io.write(type(self).PrimaryPhone + SiteLog.toMultiple(self.primaryVoice))
io.write(type(self).SecondPhone + SiteLog.toMultiple(self.secondVoice))
io.write(type(self).Fax + SiteLog.toMultiple(self.facsimile))
io.write(type(self).Email + SiteLog.toMultiple(self.electronicMailAddress))
text = io.getvalue()
io.close()
return text
def updateIndex(self, index):
if index < 2:
self.chapter = type(self).Chapters[index]
class MoreInformation(object):
Default = """13. More Information
Primary Data Center :
Secondary Data Center :
URL for More Information :
Hardcopy on File
Site Map : (Y or URL)
Site Diagram : (Y or URL)
Horizon Mask : (Y or URL)
Monument Description : (Y or URL)
Site Pictures : (Y or URL)
Additional Information : (multiple lines)
Antenna Graphics with Dimensions
(insert text graphic from file antenna.gra)\n"""
def __init__(self, siteLog):
moreInformationPropertyType = siteLog.moreInformation
if not moreInformationPropertyType:
# MoreInformation can be a None type
self.isEmpty = True
return
moreInformation = moreInformationPropertyType.MoreInformation
self.isEmpty = False
self.primary = ""
self.secondary = ""
count = len(moreInformation.dataCenter)
if count == 1:
self.primary = SiteLog.simpleValue(moreInformation.dataCenter[0])
elif count == 2:
self.primary = SiteLog.simpleValue(moreInformation.dataCenter[0])
self.secondary = SiteLog.simpleValue(moreInformation.dataCenter[1])
self.urlForMoreInformation = SiteLog.simpleValue(moreInformation.urlForMoreInformation)
self.siteMap = SiteLog.simpleValue(moreInformation.siteMap)
self.siteDiagram = SiteLog.simpleValue(moreInformation.siteDiagram)
self.horizonMask = SiteLog.simpleValue(moreInformation.horizonMask)
self.monumentDescription = SiteLog.simpleValue(moreInformation.monumentDescription)
self.sitePictures = SiteLog.simpleValue(moreInformation.sitePictures)
self.notes = SiteLog.simpleValue(moreInformation.notes)
def output(self):
if self.isEmpty:
io = StringIO()
io.write(type(self).Default)
io.write("\n")
text = io.getvalue()
io.close()
return text
else:
io = StringIO()
io.write("13. More Information\n")
io.write("\n")
io.write(" Primary Data Center : " + self.primary + "\n")
io.write(" Secondary Data Center : " + self.secondary + "\n")
io.write(" URL for More Information : " + self.urlForMoreInformation + "\n")
io.write(" Hardcopy on File\n")
io.write(" Site Map : " + self.siteMap + "\n")
io.write(" Site Diagram : " + self.siteDiagram + "\n")
io.write(" Horizon Mask : " + self.horizonMask + "\n")
io.write(" Monument Description : " + self.monumentDescription + "\n")
io.write(" Site Pictures : " + self.sitePictures + "\n")
io.write(" Additional Information : " + SiteLog.toMultiple(self.notes))
io.write(" Antenna Graphics with Dimensions\n")
io.write("\n")
io.write(" (insert text graphic from file antenna.gra)\n")
io.write("\n")
text = io.getvalue()
io.close()
return text
def options():
options = argparse.ArgumentParser(description="Convert GeodesyML file to site log file")
options.add_argument('--version', action='version',
version='%(prog)s 1.0, Copyright (c) 2016 by Geodesy, Geoscience Australia')
options.add_argument("-x", "--geodesyML",
metavar='/fullpath/SSSS.xml',
required=True,
help='The geodesyML file for specific station')
options.add_argument("-l", "--sitelog",
metavar='ssss_yyyydoy.log',
help='Output text site log file (default stdout)')
options.add_argument("-v", "--verbose", help="log verbose information to file",
action="store_true")
return options.parse_args()
def main():
""" Convert XML to site log file """
args = options()
with open(args.geodesyML, 'r') as f:
xml = f.read()
defaultOutputFileName, outputContent = parseXML(xml)
outputFileName = args.sitelog
if (outputFileName):
with codecs.open(outputFileName, 'w', 'utf-8') as output:
output.write(outputContent)
print('\n\tSite log file \"' + outputFileName + '\" has been successfully generated')
else:
sys.stdout.write(outputContent)
def parseXML(xml):
siteLog = SiteLog(xml)
siteLogType = siteLog.siteLogType()
form = FormInformation(siteLogType)
datePrepared = form.datePrepared
datePrepared = datePrepared.replace('-', '')
identification = SiteIdentification(siteLogType)
intro = Introduction(identification.fourCharacterID)
location = SiteLocation(siteLogType)
siteLogFilename = identification.fourCharacterID.lower() + "_" + datePrepared + ".log"
receiverList = GnssReceiverProperty(siteLogType)
antennaList = GnssAntennaProperty(siteLogType)
localTieList = SurveyedLocalTieProperty(siteLogType)
frequencyStandardList = FrequencyStandardProperty(siteLogType)
collocationInformationList = CollocationInformationProperty(siteLogType)
humiditySensorList = HumiditySensorProperty(siteLogType)
pressureSensorList = PressureSensorProperty(siteLogType)
temperatureSensorList = TemperatureSensorProperty(siteLogType)
waterVaporSensorList = WaterVaporSensorProperty(siteLogType)
otherInstrumentationList = OtherInstrumentationProperty(siteLogType)
radioInterferenceList = RadioInterferenceProperty(siteLogType)
multipathSourceList = MultipathSourceProperty(siteLogType)
signalObstructionList = SignalObstructionProperty(siteLogType)
localEpisodicEffectList = LocalEpisodicEffectProperty(siteLogType)
siteContactList = AgencyProperty(siteLogType.siteContact, '11. On-Site, Point of Contact Agency Information')
#### siteOwner = AgencyProperty(siteLogType.siteOwner)
#### print(siteOwner.output())
siteMetadataCustodian = AgencyProperty(siteLogType.siteMetadataCustodian, '12. Responsible Agency (if different from 11.)')
moreInformation = MoreInformation(siteLogType)
outputContent = u'{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n{}\n'.format(
intro.output(), form.output(), identification.output(),
location.output(), receiverList.output(), antennaList.output(),
localTieList.output(), frequencyStandardList.output(), collocationInformationList.output(),
humiditySensorList.output(), pressureSensorList.output(), temperatureSensorList.output(),
waterVaporSensorList.output(), otherInstrumentationList.output(), radioInterferenceList.output(),
multipathSourceList.output(), signalObstructionList.output(), localEpisodicEffectList.output(),
siteContactList.output(), siteMetadataCustodian.output(), moreInformation.output())
return siteLogFilename, outputContent
if __name__ == '__main__':
main()
| {
"content_hash": "3e191505d8cf05b5e425366a35fff481",
"timestamp": "",
"source": "github",
"line_count": 1902,
"max_line_length": 165,
"avg_line_length": 40.34174553101998,
"alnum_prop": 0.5743776879968722,
"repo_name": "GeoscienceAustralia/GeodesyMLConverter",
"id": "7fb3f9284fd2ac110a421b1e10b34fe526066b63",
"size": "76730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GeodesyMLToSiteLog/geodesymltositelog.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HCL",
"bytes": "15617"
},
{
"name": "Nix",
"bytes": "785"
},
{
"name": "Python",
"bytes": "213522"
},
{
"name": "Shell",
"bytes": "5359"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ideascale', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='Campaign',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('name', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('text', models.TextField()),
('datetime', models.CharField(max_length=50)),
('positive_votes', models.IntegerField()),
('negative_votes', models.IntegerField(null=True)),
('comments', models.IntegerField(null=True)),
('parent_type', models.CharField(max_length=50)),
('parent_id', models.IntegerField(max_length=50)),
('url', models.URLField()),
],
),
migrations.CreateModel(
name='Idea',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('title', models.CharField(max_length=100)),
('text', models.TextField()),
('datetime', models.CharField(max_length=50)),
('positive_votes', models.IntegerField()),
('negative_votes', models.IntegerField(null=True)),
('comments', models.IntegerField(null=True)),
('url', models.URLField()),
('campaign', models.ForeignKey(to='ideascale.Campaign')),
],
),
migrations.CreateModel(
name='Location',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('country', models.CharField(max_length=50)),
('city', models.CharField(max_length=50)),
('latitude', models.FloatField()),
('longitude', models.FloatField()),
],
),
migrations.CreateModel(
name='Vote',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ideascale_id', models.IntegerField()),
('value', models.IntegerField()),
('datetime', models.CharField(max_length=50)),
('author', models.ForeignKey(to='ideascale.Author')),
('idea', models.ForeignKey(to='ideascale.Idea')),
],
),
migrations.AlterField(
model_name='initiative',
name='name',
field=models.CharField(max_length=100),
),
migrations.AddField(
model_name='idea',
name='location',
field=models.ForeignKey(to='ideascale.Location'),
),
migrations.AddField(
model_name='idea',
name='user',
field=models.ForeignKey(to='ideascale.Author'),
),
migrations.AddField(
model_name='comment',
name='location',
field=models.ForeignKey(to='ideascale.Location'),
),
migrations.AddField(
model_name='comment',
name='user',
field=models.ForeignKey(to='ideascale.Author'),
),
]
| {
"content_hash": "268d37173d43177e94b1c15bd28337a2",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 114,
"avg_line_length": 39.93396226415094,
"alnum_prop": 0.5161823765650838,
"repo_name": "joausaga/social-ideation",
"id": "4187faffd686e9ee2f49b898b1ad11ba18742647",
"size": "4257",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ideascale/migrations/0002_auto_20150424_1736.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1411"
},
{
"name": "HTML",
"bytes": "26382"
},
{
"name": "JavaScript",
"bytes": "986"
},
{
"name": "Python",
"bytes": "389664"
},
{
"name": "Shell",
"bytes": "1284"
}
],
"symlink_target": ""
} |
def extractMengmengmeng100WordpressCom(item):
'''
Parser for 'mengmengmeng100.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('mphcd', 'Meng Po’s Husband Chasing Diary', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | {
"content_hash": "0dcdfd6bcc9c23fca496f19763c2c7ac",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 31.238095238095237,
"alnum_prop": 0.6189024390243902,
"repo_name": "fake-name/ReadableWebProxy",
"id": "85bfe011c31b0c759ebf39966eb8d0760393f8e3",
"size": "658",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractMengmengmeng100WordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from datetime import date, datetime
from functools import partial
from flask import Blueprint
from flask.ext.restplus import fields, Api
from . import TestCase
class FieldTestCase(TestCase):
field_class = None
def setUp(self):
super(FieldTestCase, self).setUp()
blueprint = Blueprint('api', __name__)
self.api = Api(blueprint)
self.app.register_blueprint(blueprint)
class BaseFieldTestMixin(object):
def test_description(self):
field = self.field_class(description='A description')
self.assertIn('description', field.__schema__)
self.assertEqual(field.__schema__['description'], 'A description')
def test_title(self):
field = self.field_class(title='A title')
self.assertIn('title', field.__schema__)
self.assertEqual(field.__schema__['title'], 'A title')
def test_required(self):
field = self.field_class(required=True)
self.assertTrue(field.required)
def test_readonly(self):
field = self.field_class(readonly=True)
self.assertIn('readOnly', field.__schema__)
self.assertTrue(field.__schema__['readOnly'])
class NumberTestMixin(object):
def test_min(self):
field = self.field_class(min=0)
self.assertIn('minimum', field.__schema__)
self.assertEqual(field.__schema__['minimum'], 0)
self.assertNotIn('exclusiveMinimum', field.__schema__)
def test_min_exlusive(self):
field = self.field_class(min=0, exclusiveMin=True)
self.assertIn('minimum', field.__schema__)
self.assertEqual(field.__schema__['minimum'], 0)
self.assertIn('exclusiveMinimum', field.__schema__)
self.assertEqual(field.__schema__['exclusiveMinimum'], True)
def test_max(self):
field = self.field_class(max=42)
self.assertIn('maximum', field.__schema__)
self.assertEqual(field.__schema__['maximum'], 42)
self.assertNotIn('exclusiveMaximum', field.__schema__)
def test_max_exclusive(self):
field = self.field_class(max=42, exclusiveMax=True)
self.assertIn('maximum', field.__schema__)
self.assertEqual(field.__schema__['maximum'], 42)
self.assertIn('exclusiveMaximum', field.__schema__)
self.assertEqual(field.__schema__['exclusiveMaximum'], True)
def test_mulitple_of(self):
field = self.field_class(multiple=5)
self.assertIn('multipleOf', field.__schema__)
self.assertEqual(field.__schema__['multipleOf'], 5)
class StringTestMixin(object):
def test_min_length(self):
field = self.field_class(min_length=1)
self.assertIn('minLength', field.__schema__)
self.assertEqual(field.__schema__['minLength'], 1)
def test_max_length(self):
field = self.field_class(max_length=42)
self.assertIn('maxLength', field.__schema__)
self.assertEqual(field.__schema__['maxLength'], 42)
def test_pattern(self):
field = self.field_class(pattern='[a-z]')
self.assertIn('pattern', field.__schema__)
self.assertEqual(field.__schema__['pattern'], '[a-z]')
class RawFieldTest(BaseFieldTestMixin, FieldTestCase):
field_class = fields.Raw
def test_type(self):
field = fields.Raw()
self.assertEqual(field.__schema__['type'], 'object')
def test_default(self):
field = fields.Raw(default='aaa')
self.assertEqual(field.__schema__['default'], 'aaa')
class StringFieldTest(StringTestMixin, BaseFieldTestMixin, FieldTestCase):
field_class = fields.String
def test_defaults(self):
field = fields.String()
self.assertFalse(field.required)
self.assertFalse(field.discriminator)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_with_enum(self):
enum = ['A', 'B', 'C']
field = fields.String(enum=enum)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'enum': enum, 'example': enum[0]})
def test_with_callable_enum(self):
enum = lambda: ['A', 'B', 'C'] # noqa
field = fields.String(enum=enum)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'enum': ['A', 'B', 'C'], 'example': 'A'})
def test_with_default(self):
field = fields.String(default='aaa')
self.assertEqual(field.__schema__, {'type': 'string', 'default': 'aaa'})
def test_string_field_with_discriminator(self):
field = fields.String(discriminator=True)
self.assertTrue(field.discriminator)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_string_field_with_discriminator_override_require(self):
field = fields.String(discriminator=True, required=False)
self.assertTrue(field.discriminator)
self.assertTrue(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_discriminator_output(self):
model = self.api.model('Test', {
'name': fields.String(discriminator=True),
})
data = self.api.marshal({}, model)
self.assertEqual(data, {'name': 'Test'})
def test_multiple_discriminator_field(self):
model = self.api.model('Test', {
'name': fields.String(discriminator=True),
'name2': fields.String(discriminator=True),
})
with self.assertRaises(ValueError):
self.api.marshal(object(), model)
class IntegerFieldTest(BaseFieldTestMixin, NumberTestMixin, FieldTestCase):
field_class = fields.Integer
def test_defaults(self):
field = fields.Integer()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer'})
def test_with_default(self):
field = fields.Integer(default=42)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'integer', 'default': 42})
class BooleanFieldTest(BaseFieldTestMixin, FieldTestCase):
field_class = fields.Boolean
def test_defaults(self):
field = fields.Boolean()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean'})
def test_with_default(self):
field = fields.Boolean(default=True)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'boolean', 'default': True})
class FloatFieldTest(BaseFieldTestMixin, NumberTestMixin, FieldTestCase):
field_class = fields.Float
def test_defaults(self):
field = fields.Float()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_with_default(self):
field = fields.Float(default=0.5)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'default': 0.5})
class FixedFieldTest(BaseFieldTestMixin, NumberTestMixin, FieldTestCase):
field_class = fields.Fixed
def test_defaults(self):
field = fields.Fixed()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_with_default(self):
field = fields.Fixed(default=0.5)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number', 'default': 0.5})
class ArbitraryFieldTest(BaseFieldTestMixin, NumberTestMixin, FieldTestCase):
field_class = fields.Arbitrary
def test_defaults(self):
field = fields.Arbitrary()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'number'})
def test_with_default(self):
field = fields.Arbitrary(default=0.5)
self.assertEqual(field.__schema__, {'type': 'number', 'default': 0.5})
class DatetimeFieldTest(BaseFieldTestMixin, FieldTestCase):
field_class = fields.DateTime
def test_defaults(self):
field = fields.DateTime()
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string', 'format': 'date-time'})
def test_with_default(self):
field = fields.DateTime(default='2014-08-25')
self.assertEqual(field.__schema__, {'type': 'string', 'format': 'date-time', 'default': '2014-08-25'})
def test_min(self):
field = fields.DateTime(min='1984-06-07')
self.assertIn('minimum', field.__schema__)
self.assertEqual(field.__schema__['minimum'], '1984-06-07')
self.assertNotIn('exclusiveMinimum', field.__schema__)
def test_min_as_date(self):
field = fields.DateTime(min=date(1984, 6, 7))
self.assertIn('minimum', field.__schema__)
self.assertEqual(field.__schema__['minimum'], '1984-06-07')
self.assertNotIn('exclusiveMinimum', field.__schema__)
def test_min_as_datetime(self):
field = fields.DateTime(min=datetime(1984, 6, 7, 1, 2, 0))
self.assertIn('minimum', field.__schema__)
self.assertEqual(field.__schema__['minimum'], '1984-06-07T01:02:00')
self.assertNotIn('exclusiveMinimum', field.__schema__)
def test_min_exlusive(self):
field = fields.DateTime(min='1984-06-07', exclusiveMin=True)
self.assertIn('minimum', field.__schema__)
self.assertEqual(field.__schema__['minimum'], '1984-06-07')
self.assertIn('exclusiveMinimum', field.__schema__)
self.assertEqual(field.__schema__['exclusiveMinimum'], True)
def test_max(self):
field = fields.DateTime(max='1984-06-07')
self.assertIn('maximum', field.__schema__)
self.assertEqual(field.__schema__['maximum'], '1984-06-07')
self.assertNotIn('exclusiveMaximum', field.__schema__)
def test_max_as_date(self):
field = fields.DateTime(max=date(1984, 6, 7))
self.assertIn('maximum', field.__schema__)
self.assertEqual(field.__schema__['maximum'], '1984-06-07')
self.assertNotIn('exclusiveMaximum', field.__schema__)
def test_max_as_datetime(self):
field = fields.DateTime(max=datetime(1984, 6, 7, 1, 2, 0))
self.assertIn('maximum', field.__schema__)
self.assertEqual(field.__schema__['maximum'], '1984-06-07T01:02:00')
self.assertNotIn('exclusiveMaximum', field.__schema__)
def test_max_exclusive(self):
field = fields.DateTime(max='1984-06-07', exclusiveMax=True)
self.assertIn('maximum', field.__schema__)
self.assertEqual(field.__schema__['maximum'], '1984-06-07')
self.assertIn('exclusiveMaximum', field.__schema__)
self.assertEqual(field.__schema__['exclusiveMaximum'], True)
class FormatedStringFieldTest(StringTestMixin, BaseFieldTestMixin, FieldTestCase):
field_class = partial(fields.FormattedString, 'Hello {name}')
def test_defaults(self):
field = fields.FormattedString('Hello {name}')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
class UrlFieldTest(StringTestMixin, BaseFieldTestMixin, FieldTestCase):
field_class = partial(fields.Url, 'endpoint')
def test_defaults(self):
field = fields.Url('endpoint')
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'string'})
class NestedFieldTest(FieldTestCase):
def test_defaults(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
def test_with_required(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, required=True)
self.assertTrue(field.required)
self.assertFalse(field.allow_null)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
def test_with_description(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, description='A description')
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel', 'description': 'A description'})
def test_with_title(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, title='A title')
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel', 'title': 'A title'})
def test_with_allow_null(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, allow_null=True)
self.assertFalse(field.required)
self.assertTrue(field.allow_null)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
def test_with_readonly(self):
api = Api(self.app)
nested_fields = api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, readonly=True)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel', 'readOnly': True})
def test_as_list(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, as_list=True)
self.assertTrue(field.as_list)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}})
def test_as_list_is_reusable(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.Nested(nested_fields, as_list=True)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}})
field = fields.Nested(nested_fields)
self.assertEqual(field.__schema__, {'$ref': '#/definitions/NestedModel'})
class ListFieldTest(BaseFieldTestMixin, FieldTestCase):
field_class = partial(fields.List, fields.String)
def test_defaults(self):
field = fields.List(fields.String)
self.assertFalse(field.required)
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'type': 'string'}})
def test_with_nested_field(self):
nested_fields = self.api.model('NestedModel', {'name': fields.String})
field = fields.List(fields.Nested(nested_fields))
self.assertEqual(field.__schema__, {'type': 'array', 'items': {'$ref': '#/definitions/NestedModel'}})
def test_min_items(self):
field = fields.List(fields.String, min_items=5)
self.assertIn('minItems', field.__schema__)
self.assertEqual(field.__schema__['minItems'], 5)
def test_max_items(self):
field = fields.List(fields.String, max_items=42)
self.assertIn('maxItems', field.__schema__)
self.assertEqual(field.__schema__['maxItems'], 42)
def test_unique(self):
field = fields.List(fields.String, unique=True)
self.assertIn('uniqueItems', field.__schema__)
self.assertEqual(field.__schema__['uniqueItems'], True)
class ClassNameFieldTest(StringTestMixin, BaseFieldTestMixin, FieldTestCase):
field_class = fields.ClassName
def test_simple_string_field(self):
field = fields.ClassName()
self.assertFalse(field.required)
self.assertFalse(field.discriminator)
self.assertEqual(field.__schema__, {'type': 'string'})
def test_default_output_classname(self):
model = self.api.model('Test', {
'name': fields.ClassName(),
})
class FakeClass(object):
pass
data = self.api.marshal(FakeClass(), model)
self.assertEqual(data, {'name': 'FakeClass'})
def test_output_dash(self):
model = self.api.model('Test', {
'name': fields.ClassName(dash=True),
})
class FakeClass(object):
pass
data = self.api.marshal(FakeClass(), model)
self.assertEqual(data, {'name': 'fake_class'})
class PolymorphTest(FieldTestCase):
def test_polymorph_field(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
def data(cls):
return self.api.marshal({'owner': cls()}, thing)
self.assertEqual(data(Child1), {'owner': {
'name': 'child1',
'extra1': 'extra1'
}})
self.assertEqual(data(Child2), {'owner': {
'name': 'child2',
'extra2': 'extra2'
}})
def test_polymorph_field_no_common_ancestor(self):
child1 = self.api.model('Child1', {
'extra1': fields.String,
})
child2 = self.api.model('Child2', {
'extra2': fields.String,
})
class Child1(object):
pass
class Child2(object):
pass
mapping = {
Child1: child1,
Child2: child2
}
with self.assertRaises(ValueError):
fields.Polymorph(mapping)
def test_polymorph_field_unknown_class(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
with self.assertRaises(ValueError):
self.api.marshal({'owner': object()}, thing)
def test_polymorph_field_ambiguous_mapping(self):
parent = self.api.model('Parent', {
'name': fields.String,
})
child = self.api.inherit('Child', parent, {
'extra': fields.String,
})
class Parent(object):
name = 'parent'
class Child(Parent):
extra = 'extra'
mapping = {
Parent: parent,
Child: child
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
with self.assertRaises(ValueError):
self.api.marshal({'owner': Child()}, thing)
def test_polymorph_field_required_default(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping, required=True, default={'name': 'default'}),
})
data = self.api.marshal({}, thing)
self.assertEqual(data, {'owner': {
'name': 'default'
}})
def test_polymorph_field_not_required(self):
parent = self.api.model('Person', {
'name': fields.String,
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
data = self.api.marshal({}, thing)
self.assertEqual(data, {'owner': None})
def test_polymorph_with_discriminator(self):
parent = self.api.model('Person', {
'name': fields.String,
'model': fields.String(discriminator=True),
})
child1 = self.api.inherit('Child1', parent, {
'extra1': fields.String,
})
child2 = self.api.inherit('Child2', parent, {
'extra2': fields.String,
})
class Child1(object):
name = 'child1'
extra1 = 'extra1'
class Child2(object):
name = 'child2'
extra2 = 'extra2'
mapping = {
Child1: child1,
Child2: child2
}
thing = self.api.model('Thing', {
'owner': fields.Polymorph(mapping),
})
def data(cls):
return self.api.marshal({'owner': cls()}, thing)
self.assertEqual(data(Child1), {'owner': {
'name': 'child1',
'model': 'Child1',
'extra1': 'extra1'
}})
self.assertEqual(data(Child2), {'owner': {
'name': 'child2',
'model': 'Child2',
'extra2': 'extra2'
}})
class CustomFieldTest(FieldTestCase):
def test_custom_field(self):
class CustomField(fields.Integer):
__schema_format__ = 'int64'
field = CustomField()
self.assertEqual(field.__schema__, {'type': 'integer', 'format': 'int64'})
| {
"content_hash": "14cf9a9eb90a0fddef78aa9538fbe1d0",
"timestamp": "",
"source": "github",
"line_count": 675,
"max_line_length": 113,
"avg_line_length": 33.01037037037037,
"alnum_prop": 0.593752804954672,
"repo_name": "luminusnetworks/flask-restplus",
"id": "121c88295e24bceaf1830950a487fef15264e8e8",
"size": "22306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_swagger_fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3254"
},
{
"name": "Python",
"bytes": "276946"
}
],
"symlink_target": ""
} |
from model.contact import Contact
from random import randrange
def test_modify_first_contact(app):
if app.contact.count() == 0:
app.contact.create(Contact(firstname='test'))
old_contacts = app.contact.get_contact_list()
index = randrange(len(old_contacts))
contact = Contact(firstname="", lastname="", title="Test", company="Test", mobilephone="00000000", email="test@test.ru")
contact.id = old_contacts[index].id
app.contact.edit_contact_by_index(index, contact)
new_contacts = app.contact.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
#def test_edit_first_contact_to_empty(app):
# if app.contact.count() == 0:
# app.contact.create(Contact(firstname='test'))
# old_contacts = app.contact.get_contact_list()
# contact = Contact(firstname="", lastname="", title="", company="", mobilephone="", email="")
# contact.id = old_contacts[0].id
# app.contact.edit_first(contact)
# new_contacts = app.contact.get_contact_list()
# assert len(old_contacts) == len(new_contacts)
# old_contacts[0] = contact
# assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max) | {
"content_hash": "42990e89a676a982f89a5c2ee4fac1be",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 124,
"avg_line_length": 45.724137931034484,
"alnum_prop": 0.6855203619909502,
"repo_name": "trrigger/python_training",
"id": "27cef9dc7af1a6e98309ea2ef43c480b5cfb7447",
"size": "1326",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_edit_contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "558172"
}
],
"symlink_target": ""
} |
from PyQt4 import QtCore, QtGui
from vistrails.core.system import get_vistrails_basic_pkg_id
from vistrails.gui.theme import CurrentTheme
from vistrails.gui.modules.utils import get_widget_class
from vistrails.gui.modules.constant_configuration import ConstantWidgetMixin, \
StandardConstantWidget
from vistrails.core.modules.module_registry import get_module_registry
class QAliasSliderWidget(QtGui.QWidget):
def __init__(self, alias, vtparam, parent=None):
QtGui.QWidget.__init__(self, parent)
self.alias = alias
self.vtparam = vtparam
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
label = QtGui.QLabel(alias.name)
label.font().setBold(True)
self.value = QSliderWidget(param=vtparam, parent=self)
self.value.setRange(alias.component.minVal, alias.component.maxVal)
self.value.setSingleStep(alias.component.stepSize)
self.value.setContents(self.alias.component.val)
self.connect(self.value,
QtCore.SIGNAL("contentsChanged"),
self.contents_changed)
hbox = QtGui.QHBoxLayout()
hbox.setMargin(8)
hbox.addWidget(label)
hbox.addWidget(self.value)
self.setLayout(hbox)
def contents_changed(self, info):
#print "drop down emitting"
self.emit(QtCore.SIGNAL('contentsChanged'), (self, info))
def focusInEvent(self, event):
self.emit(QtCore.SIGNAL("receivedfocus"), self)
def focusOutEvent(self, event):
self.emit(QtCore.SIGNAL("removedfocus"), self)
###############################################################################
class QSliderWidget(ConstantWidgetMixin, QtGui.QSlider):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QSlider.__init__(self, QtCore.Qt.Horizontal, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
assert param.type in['Integer', 'Float']
self.sliderType = int if param.type == 'Integer' else float
assert param.identifier == get_vistrails_basic_pkg_id()
self.connect(self, QtCore.SIGNAL('valueChanged(int)'),self.change_val)
QtGui.QSlider.setSingleStep(self, 1)
QtGui.QSlider.setPageStep(self, 5)
self.floatMinVal = 0.0
self.floatMaxVal = 1.0
self.floatStepSize = 1
self.numSteps = 1
self.setContents(param.strValue)
self.setTickPosition(QtGui.QSlider.TicksAbove)
def contents(self):
floatVal = float(self.value()) * self.floatStepSize + self.floatMinVal
return self.sliderType(floatVal)
def setContents(self, strValue, silent=True):
""" encodes a number to a scaled integer """
if strValue:
value = strValue
else:
value = "0.0"
floatVal = float(value)
value = int((floatVal-self.floatMinVal)/self.floatStepSize)
self.setValue(int(value))
self.setToolTip("%g" % floatVal)
if not silent:
self.update_parent()
def change_val(self, newval):
""" decodes a scaled integer to the correct number """
floatVal = float(newval) * self.floatStepSize + self.floatMinVal
self.setToolTip("%g" % floatVal)
self.update_parent()
def setRange(self, minVal, maxVal):
self.floatMinVal = float(minVal)
self.floatMaxVal = float(maxVal)
QtGui.QSlider.setRange(self, 0, 1)
self.setSingleStep(self.floatStepSize)
def setSingleStep(self, stepSize):
""" stepSize tells the step between values. We need to calculate the
number of steps """
self.floatStepSize = float(stepSize)
self.numSteps = int((self.floatMaxVal - self.floatMinVal)/self.floatStepSize)
QtGui.QSlider.setRange(self, 0, self.numSteps)
###############################################################################
class QAliasNumericStepperWidget(QtGui.QWidget):
def __init__(self, alias, vtparam, parent=None):
QtGui.QWidget.__init__(self, parent)
self.alias = alias
self.vtparam = vtparam
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
label = QtGui.QLabel(alias.name)
label.font().setBold(True)
if self.alias.component.type == "Integer":
self.value = QNumericStepperIntegerWidget(param=vtparam,
parent=self)
self.value.setRange(int(alias.component.minVal),
int(alias.component.maxVal))
self.value.setSingleStep(int(alias.component.stepSize))
self.value.setContents(self.alias.component.val)
elif self.alias.component.type == "Float":
self.value = QNumericStepperFloatWidget(param=vtparam,
parent=self)
self.value.setRange(float(alias.component.minVal),
float(alias.component.maxVal))
self.value.setSingleStep(float(alias.component.stepSize))
self.value.setContents(self.alias.component.val)
self.connect(self.value,
QtCore.SIGNAL("contentsChanged"),
self.contents_changed)
hbox = QtGui.QHBoxLayout()
hbox.setMargin(8)
hbox.addWidget(label)
hbox.addWidget(self.value)
self.setLayout(hbox)
def contents_changed(self, info):
#print "drop down emitting"
self.emit(QtCore.SIGNAL('contentsChanged'), (self, info))
def focusInEvent(self, event):
self.emit(QtCore.SIGNAL("receivedfocus"), self)
def focusOutEvent(self, event):
self.emit(QtCore.SIGNAL("removedfocus"), self)
###############################################################################
class QNumericStepperIntegerWidget(ConstantWidgetMixin, QtGui.QSpinBox):
contentsChanged = QtCore.pyqtSignal(object, object)
def __init__(self, param, parent=None):
QtGui.QSpinBox.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
assert param.type == 'Integer'
assert param.identifier == get_vistrails_basic_pkg_id()
self.connect(self, QtCore.SIGNAL('valueChanged(int)'),
self.change_val)
self.setContents(param.strValue)
def contents(self):
return self.value()
def setContents(self, strValue, silent=True):
if strValue:
value = strValue
else:
value = "0"
self.setValue(int(value))
if not silent:
self.update_parent()
def change_val(self, newval):
self.update_parent()
###############################################################################
class QNumericStepperFloatWidget(ConstantWidgetMixin, QtGui.QDoubleSpinBox):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QDoubleSpinBox.__init__(self, parent)
ConstantWidgetMixin.__init__(self, param.strValue)
assert param.type == 'Float'
assert param.identifier == get_vistrails_basic_pkg_id()
self.connect(self, QtCore.SIGNAL('valueChanged(double)'),
self.change_val)
self.setContents(param.strValue)
def contents(self):
return self.value()
def setContents(self, strValue, silent=True):
if strValue:
value = strValue
else:
value = "0"
self.setValue(float(value))
if not silent:
self.update_parent()
def change_val(self, newval):
self.update_parent()
###############################################################################
class QDropDownWidget(QtGui.QWidget):
def __init__(self, alias, vtparam, parent=None):
QtGui.QWidget.__init__(self, parent)
self.alias = alias
self.vtparam = vtparam
self.palette().setColor(QtGui.QPalette.Window,
CurrentTheme.METHOD_SELECT_COLOR)
label = QtGui.QLabel(alias.name)
label.font().setBold(True)
self.value = self.createAliasWidget(val=self.alias.component.val,
parent=self)
self.connect(self.value,
QtCore.SIGNAL("contentsChanged"),
self.contents_changed)
self.dropdownbtn = QtGui.QToolButton(self)
self.dropdownbtn.setArrowType(QtCore.Qt.DownArrow)
self.dropdownbtn.setAutoRaise(True)
#menu button
self.createMenu()
self.dropdownbtn.setPopupMode(QtGui.QToolButton.InstantPopup)
hbox = QtGui.QHBoxLayout()
hbox.setMargin(8)
hbox.addWidget(label)
hbox.addWidget(self.value)
hbox.addWidget(self.dropdownbtn)
self.setLayout(hbox)
def createMenu(self):
self.menu = QMenuValue(self)
self.menu.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
mbox = QtGui.QVBoxLayout()
mbox.setSpacing(1)
mbox.setMargin(2)
self.menu_widgets = {}
valuelist = self.alias.component.valueList
for v in valuelist:
hbox = QtGui.QHBoxLayout()
rb = QMenuRadioButton()
rb.setChecked(False)
vw = self.createMenuAliasWidget(val=v, parent=self)
vw.setSizePolicy(QtGui.QSizePolicy.Preferred,
QtGui.QSizePolicy.Maximum)
vw.setReadOnly(True)
self.menu_widgets[rb] = vw
hbox.addWidget(rb)
hbox.addWidget(vw)
mbox.addLayout(hbox)
self.connect(rb,
QtCore.SIGNAL("clicked(bool)"),
self.menu.hide)
self.connect(vw,
QtCore.SIGNAL("clicked(bool)"),
rb.setChecked)
self.menu.setLayout(mbox)
self.dropdownbtn.setMenu(self.menu)
#there's a bug on a mac that causes the menu to be always displayed
#where it was shown for the first time... We need to ensure
#the right position.
self.connect(self.menu,
QtCore.SIGNAL("aboutToShow()"),
self.ensure_menu_position)
self.connect(self.menu,
QtCore.SIGNAL("aboutToHide()"),
self.value_selected)
def contents_changed(self, info):
#print "drop down emitting"
self.emit(QtCore.SIGNAL('contentsChanged'), (self, info))
def ensure_menu_position(self):
#print self.dropdownbtn.pos(),
newpos = QtCore.QPoint(self.dropdownbtn.pos().x(),
self.dropdownbtn.pos().y() + self.dropdownbtn.frameSize().height())
self.menu.move(self.mapToGlobal(newpos))
#print self.menu.pos()
def createAliasWidget(self, val=None, parent=None):
if self.vtparam.identifier == '':
idn = get_vistrails_basic_pkg_id()
else:
idn = self.vtparam.identifier
reg = get_module_registry()
p_descriptor = reg.get_descriptor_by_name(idn, self.vtparam.type,
self.vtparam.namespace)
widget_type = get_widget_class(p_descriptor)
if val:
self.vtparam.strValue = val
return widget_type(self.vtparam, parent)
def createMenuAliasWidget(self, val=None, parent=None):
widget = self.createAliasWidget(val)
return QMenuValueItem(widget, parent)
def value_selected(self):
#print "value_selected", self.menu.pos()
for rb, vw in self.menu_widgets.iteritems():
if rb.isChecked():
self.value.setContents(vw.contents(), silent=False)
vw.setFocus()
rb.setChecked(False)
self.menu.hide()
break
def focusInEvent(self, event):
self.emit(QtCore.SIGNAL("receivedfocus"), self)
def focusOutEvent(self, event):
self.emit(QtCore.SIGNAL("removedfocus"), self)
class QMenuRadioButton(QtGui.QRadioButton):
def focusInEvent(self, event):
self.setChecked(True)
#self.emit(QtCore.SIGNAL("clicked(bool)"), True)
QtGui.QRadioButton.focusInEvent(self, event)
class QMenuValue(QtGui.QMenu):
def mousePressEvent(self, e):
vw = self.childAt(e.pos())
while vw is not None and not isinstance(vw, QMenuValueItem):
vw = vw.parent()
if vw is not None:
vw.emit(QtCore.SIGNAL("clicked(bool)"), True)
QtGui.QMenu.mousePressEvent(self, e)
class QMenuValueItem(QtGui.QWidget):
def __init__(self, widget, parent=None):
QtGui.QWidget.__init__(self, parent)
self.widget = widget
vlayout = QtGui.QVBoxLayout()
vlayout.setMargin(0)
vlayout.setSpacing(0)
vlayout.addWidget(self.widget)
self.setLayout(vlayout)
def setReadOnly(self, on):
self.setEnabled(not on)
def contents(self):
return self.widget.contents()
def mousePressEvent(self, e):
self.emit(QtCore.SIGNAL("clicked(bool)"), True)
| {
"content_hash": "09c4925c6a9cbbbe41397dae1ebec633",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 98,
"avg_line_length": 38.82022471910113,
"alnum_prop": 0.5704052098408104,
"repo_name": "Nikea/VisTrails",
"id": "810f4c4efa2ae861d4c96f3a2d454c7a86d2857a",
"size": "15700",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vistrails/gui/mashups/mashups_widgets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1421"
},
{
"name": "Inno Setup",
"bytes": "19611"
},
{
"name": "Makefile",
"bytes": "768"
},
{
"name": "Mako",
"bytes": "66415"
},
{
"name": "PHP",
"bytes": "49038"
},
{
"name": "Python",
"bytes": "19674395"
},
{
"name": "R",
"bytes": "778864"
},
{
"name": "Rebol",
"bytes": "3972"
},
{
"name": "Shell",
"bytes": "34182"
},
{
"name": "TeX",
"bytes": "145219"
},
{
"name": "XSLT",
"bytes": "1090"
}
],
"symlink_target": ""
} |
from sys import maxsize
class Group:
def __init__(self,name=None,header=None,footer=None,id=None):
self.name=name
self.header=header
self.footer=footer
self.id=id
def __repr__(self):
return "%s:%s %s %s" % (self.id,self.name, self.header, self.footer)
def __eq__(self,other):
return (self.id is None or other.id is None or self.id == other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize
| {
"content_hash": "44253144f41a58a32888e9f762261132",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 103,
"avg_line_length": 21.53846153846154,
"alnum_prop": 0.5678571428571428,
"repo_name": "wsszczecin/python_training",
"id": "642c3c8cfe745dec1f55552e7b3edeab6cde0f24",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/group.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "35825"
}
],
"symlink_target": ""
} |
try:
import pydot_ng as pydot
except ImportError:
import pydot
if not pydot.find_graphviz():
raise RuntimeError('Failed to import pydot. You must install pydot'
' and graphviz for `pydotprint` to work.')
'''
I copied Keras Network Architecture Visualization Code here.
Just add some essential information.
'''
def model_to_dot(model, show_shapes=False, show_layer_names=True):
dot = pydot.Dot()
dot.set('rankdir', 'TB')
dot.set('concentrate', True)
dot.set_node_defaults(shape='record')
if model.__class__.__name__ == 'Sequential':
if not model.built:
model.build()
model = model.model
layers = model.layers
# first, populate the nodes of the graph
for layer in layers:
layer_id = str(id(layer))
if show_layer_names:
label = str(layer.name) + ' (' + layer.__class__.__name__ + ')'
else:
label = layer.__class__.__name__
# Why not add something more here?
if layer.__class__.__name__ == "Convolution2D":
label += "\nFilters: %d * (%d * %d) %s" % (layer.nb_filter, layer.nb_row, layer.nb_col, layer.border_mode)
label += "\n|Params:\n%d" % (layer.count_params())
elif layer.__class__.__name__ == "Activation":
label += "\n%s" % (layer.get_config()['activation'].capitalize())
elif layer.__class__.__name__ == "MaxPooling2D":
label += "\nPooling: %s Stride: %s" % (str(layer.pool_size), str(layer.strides[0]) if layer.strides[0] == layer.strides[1] else str(layer.strides))
elif layer.__class__.__name__ == "Dropout":
label += "\nDropout: %f" % (layer.p, )
elif layer.__class__.__name__ == "Dense":
label += "\n|Params:\n%d" % (layer.count_params())
if show_shapes:
# Build the label that will actually contain a table with the
# input/output
try:
outputlabels = str(layer.output_shape)
except:
outputlabels = 'multiple'
if hasattr(layer, 'input_shape'):
inputlabels = str(layer.input_shape)
elif hasattr(layer, 'input_shapes'):
inputlabels = ', '.join(
[str(ishape) for ishape in layer.input_shapes])
else:
inputlabels = 'multiple'
label = '%s\n|{input:|output:}|{{%s}|{%s}}' % (label, inputlabels, outputlabels)
node = pydot.Node(layer_id, label=label)
dot.add_node(node)
# second, add the edges
for layer in layers:
layer_id = str(id(layer))
for i, node in enumerate(layer.inbound_nodes):
node_key = layer.name + '_ib-' + str(i)
if node_key in model.container_nodes:
# add edges
for inbound_layer in node.inbound_layers:
inbound_layer_id = str(id(inbound_layer))
layer_id = str(id(layer))
dot.add_edge(pydot.Edge(inbound_layer_id, layer_id))
return dot
def plot(model, to_file='model.png', show_shapes=False, show_layer_names=True):
dot = model_to_dot(model, show_shapes, show_layer_names)
dot.write_png(to_file)
def output_architecture(model, out_path):
plot(model, to_file=out_path, show_shapes=True)
| {
"content_hash": "f0700b694fc9be7baea7fc56385dd724",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 159,
"avg_line_length": 40.48192771084337,
"alnum_prop": 0.5577380952380953,
"repo_name": "sjtudesigner/NeuralSight",
"id": "3b7f6fb929c2a1ac26a2d97b58be0b5a485e31ca",
"size": "3360",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/architecture_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1575"
},
{
"name": "HTML",
"bytes": "10086"
},
{
"name": "Python",
"bytes": "17040"
}
],
"symlink_target": ""
} |
import time
from indy import anoncreds, crypto, did, ledger, pool, wallet, blob_storage
import json
import logging
import argparse
import sys
from ctypes import *
from os.path import dirname
from indy.error import ErrorCode, IndyError
from src.utils import get_pool_genesis_txn_path, run_coroutine, PROTOCOL_VERSION
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
parser = argparse.ArgumentParser(description='Run python getting-started scenario (Alice/Faber)')
parser.add_argument('-t', '--storage_type', help='load custom wallet storage plug-in')
parser.add_argument('-l', '--library', help='dynamic library to load for plug-in')
parser.add_argument('-e', '--entrypoint', help='entry point for dynamic library')
parser.add_argument('-c', '--config', help='entry point for dynamic library')
parser.add_argument('-s', '--creds', help='entry point for dynamic library')
args = parser.parse_args()
# check if we need to dyna-load a custom wallet storage plug-in
if args.storage_type:
if not (args.library and args.entrypoint):
parser.print_help()
sys.exit(0)
stg_lib = CDLL(args.library)
result = stg_lib[args.entrypoint]()
if result != 0:
print("Error unable to load wallet storage", result)
parser.print_help()
sys.exit(0)
# for postgres storage, also call the storage init (non-standard)
if args.storage_type == "postgres_storage":
try:
print("Calling init_storagetype() for postgres:", args.config, args.creds)
init_storagetype = stg_lib["init_storagetype"]
c_config = c_char_p(args.config.encode('utf-8'))
c_credentials = c_char_p(args.creds.encode('utf-8'))
result = init_storagetype(c_config, c_credentials)
print(" ... returns ", result)
except RuntimeError as e:
print("Error initializing storage, ignoring ...", e)
print("Success, loaded wallet storage", args.storage_type)
async def run():
logger.info("Getting started -> started")
pool_ = {
'name': 'pool1'
}
logger.info("Open Pool Ledger: {}".format(pool_['name']))
pool_['genesis_txn_path'] = get_pool_genesis_txn_path(pool_['name'])
pool_['config'] = json.dumps({"genesis_txn": str(pool_['genesis_txn_path'])})
# Set protocol version 2 to work with Indy Node 1.4
await pool.set_protocol_version(PROTOCOL_VERSION)
try:
await pool.create_pool_ledger_config(pool_['name'], pool_['config'])
except IndyError as ex:
if ex.error_code == ErrorCode.PoolLedgerConfigAlreadyExistsError:
pass
pool_['handle'] = await pool.open_pool_ledger(pool_['name'], None)
logger.info("==============================")
logger.info("=== Getting Trust Anchor credentials for Faber, Acme, Thrift and Government ==")
logger.info("------------------------------")
logger.info("\"Sovrin Steward\" -> Create wallet")
steward = {
'name': "Sovrin Steward",
'wallet_config': json.dumps({'id': 'sovrin_steward_wallet'}),
'wallet_credentials': json.dumps({'key': 'steward_wallet_key'}),
'pool': pool_['handle'],
'seed': '000000000000000000000000Steward1'
}
try:
await wallet.create_wallet(steward['wallet_config'], steward['wallet_credentials'])
except IndyError as ex:
if ex.error_code == ErrorCode.WalletAlreadyExistsError:
pass
steward['wallet'] = await wallet.open_wallet(steward['wallet_config'], steward['wallet_credentials'])
logger.info("\"Sovrin Steward\" -> Create and store in Wallet DID from seed")
steward['did_info'] = json.dumps({'seed': steward['seed']})
steward['did'], steward['key'] = await did.create_and_store_my_did(steward['wallet'], steward['did_info'])
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Government Onboarding ==")
logger.info("------------------------------")
government = {
'name': 'Government',
'wallet_config': json.dumps({'id': 'government_wallet'}),
'wallet_credentials': json.dumps({'key': 'government_wallet_key'}),
'pool': pool_['handle'],
'role': 'TRUST_ANCHOR'
}
steward['did_for_government'], steward['key_for_government'], government['did_for_steward'], \
government['key_for_steward'], _ = await onboarding(steward, government)
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Government getting Verinym ==")
logger.info("------------------------------")
government['did'] = await get_verinym(steward, steward['did_for_government'], steward['key_for_government'],
government, government['did_for_steward'], government['key_for_steward'])
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Faber Onboarding ==")
logger.info("------------------------------")
faber = {
'name': 'Faber',
'wallet_config': json.dumps({'id': 'faber_wallet'}),
'wallet_credentials': json.dumps({'key': 'faber_wallet_key'}),
'pool': pool_['handle'],
'role': 'TRUST_ANCHOR'
}
steward['did_for_faber'], steward['key_for_faber'], faber['did_for_steward'], faber['key_for_steward'], _ = \
await onboarding(steward, faber)
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Faber getting Verinym ==")
logger.info("------------------------------")
faber['did'] = \
await get_verinym(steward, steward['did_for_faber'], steward['key_for_faber'],
faber, faber['did_for_steward'], faber['key_for_steward'])
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Acme Onboarding ==")
logger.info("------------------------------")
acme = {
'name': 'Acme',
'wallet_config': json.dumps({'id': 'acme_wallet'}),
'wallet_credentials': json.dumps({'key': 'acme_wallet_key'}),
'pool': pool_['handle'],
'role': 'TRUST_ANCHOR'
}
steward['did_for_acme'], steward['key_for_acme'], acme['did_for_steward'], acme['key_for_steward'], _ = \
await onboarding(steward, acme)
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Acme getting Verinym ==")
logger.info("------------------------------")
acme['did'] = await get_verinym(steward, steward['did_for_acme'], steward['key_for_acme'],
acme, acme['did_for_steward'], acme['key_for_steward'])
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Thrift Onboarding ==")
logger.info("------------------------------")
thrift = {
'name': 'Thrift',
'wallet_config': json.dumps({'id': 'thrift_wallet'}),
'wallet_credentials': json.dumps({'key': 'thrift_wallet_key'}),
'pool': pool_['handle'],
'role': 'TRUST_ANCHOR'
}
steward['did_for_thrift'], steward['key_for_thrift'], thrift['did_for_steward'], thrift['key_for_steward'], _ = \
await onboarding(steward, thrift)
logger.info("==============================")
logger.info("== Getting Trust Anchor credentials - Thrift getting Verinym ==")
logger.info("------------------------------")
thrift['did'] = await get_verinym(steward, steward['did_for_thrift'], steward['key_for_thrift'],
thrift, thrift['did_for_steward'], thrift['key_for_steward'])
logger.info("==============================")
logger.info("=== Credential Schemas Setup ==")
logger.info("------------------------------")
logger.info("\"Government\" -> Create \"Job-Certificate\" Schema")
job_certificate = {
'name': 'Job-Certificate',
'version': '0.2',
'attributes': ['first_name', 'last_name', 'salary', 'employee_status', 'experience']
}
(government['job_certificate_schema_id'], government['job_certificate_schema']) = \
await anoncreds.issuer_create_schema(government['did'], job_certificate['name'], job_certificate['version'],
json.dumps(job_certificate['attributes']))
job_certificate_schema_id = government['job_certificate_schema_id']
logger.info("\"Government\" -> Send \"Job-Certificate\" Schema to Ledger")
await send_schema(government['pool'], government['wallet'], government['did'], government['job_certificate_schema'])
logger.info("\"Government\" -> Create \"Transcript\" Schema")
transcript = {
'name': 'Transcript',
'version': '1.2',
'attributes': ['first_name', 'last_name', 'degree', 'status', 'year', 'average', 'ssn']
}
(government['transcript_schema_id'], government['transcript_schema']) = \
await anoncreds.issuer_create_schema(government['did'], transcript['name'], transcript['version'],
json.dumps(transcript['attributes']))
transcript_schema_id = government['transcript_schema_id']
logger.info("\"Government\" -> Send \"Transcript\" Schema to Ledger")
await send_schema(government['pool'], government['wallet'], government['did'], government['transcript_schema'])
time.sleep(1) # sleep 1 second before getting schema
logger.info("==============================")
logger.info("=== Faber Credential Definition Setup ==")
logger.info("------------------------------")
logger.info("\"Faber\" -> Get \"Transcript\" Schema from Ledger")
(faber['transcript_schema_id'], faber['transcript_schema']) = \
await get_schema(faber['pool'], faber['did'], transcript_schema_id)
logger.info("\"Faber\" -> Create and store in Wallet \"Faber Transcript\" Credential Definition")
transcript_cred_def = {
'tag': 'TAG1',
'type': 'CL',
'config': {"support_revocation": False}
}
(faber['transcript_cred_def_id'], faber['transcript_cred_def']) = \
await anoncreds.issuer_create_and_store_credential_def(faber['wallet'], faber['did'],
faber['transcript_schema'], transcript_cred_def['tag'],
transcript_cred_def['type'],
json.dumps(transcript_cred_def['config']))
logger.info("\"Faber\" -> Send \"Faber Transcript\" Credential Definition to Ledger")
await send_cred_def(faber['pool'], faber['wallet'], faber['did'], faber['transcript_cred_def'])
logger.info("==============================")
logger.info("=== Acme Credential Definition Setup ==")
logger.info("------------------------------")
logger.info("\"Acme\" -> Get from Ledger \"Job-Certificate\" Schema")
(acme['job_certificate_schema_id'], acme['job_certificate_schema']) = \
await get_schema(acme['pool'], acme['did'], job_certificate_schema_id)
logger.info("\"Acme\" -> Create and store in Wallet \"Acme Job-Certificate\" Credential Definition")
job_certificate_cred_def = {
'tag': 'TAG1',
'type': 'CL',
'config': {"support_revocation": True}
}
(acme['job_certificate_cred_def_id'], acme['job_certificate_cred_def']) = \
await anoncreds.issuer_create_and_store_credential_def(acme['wallet'], acme['did'],
acme['job_certificate_schema'],
job_certificate_cred_def['tag'],
job_certificate_cred_def['type'],
json.dumps(job_certificate_cred_def['config']))
logger.info("\"Acme\" -> Send \"Acme Job-Certificate\" Credential Definition to Ledger")
await send_cred_def(acme['pool'], acme['wallet'], acme['did'], acme['job_certificate_cred_def'])
logger.info("\"Acme\" -> Creates Revocation Registry")
acme['tails_writer_config'] = json.dumps({'base_dir': "/tmp/indy_acme_tails", 'uri_pattern': ''})
tails_writer = await blob_storage.open_writer('default', acme['tails_writer_config'])
(acme['revoc_reg_id'], acme['revoc_reg_def'], acme['revoc_reg_entry']) = \
await anoncreds.issuer_create_and_store_revoc_reg(acme['wallet'], acme['did'], 'CL_ACCUM', 'TAG1',
acme['job_certificate_cred_def_id'],
json.dumps({'max_cred_num': 5,
'issuance_type': 'ISSUANCE_ON_DEMAND'}),
tails_writer)
logger.info("\"Acme\" -> Post Revocation Registry Definition to Ledger")
acme['revoc_reg_def_request'] = await ledger.build_revoc_reg_def_request(acme['did'], acme['revoc_reg_def'])
await ledger.sign_and_submit_request(acme['pool'], acme['wallet'], acme['did'], acme['revoc_reg_def_request'])
logger.info("\"Acme\" -> Post Revocation Registry Entry to Ledger")
acme['revoc_reg_entry_request'] = \
await ledger.build_revoc_reg_entry_request(acme['did'], acme['revoc_reg_id'], 'CL_ACCUM',
acme['revoc_reg_entry'])
await ledger.sign_and_submit_request(acme['pool'], acme['wallet'], acme['did'], acme['revoc_reg_entry_request'])
logger.info("==============================")
logger.info("=== Getting Transcript with Faber ==")
logger.info("==============================")
logger.info("== Getting Transcript with Faber - Onboarding ==")
logger.info("------------------------------")
alice = {
'name': 'Alice',
'wallet_config': json.dumps({'id': 'alice_wallet'}),
'wallet_credentials': json.dumps({'key': 'alice_wallet_key'}),
'pool': pool_['handle'],
}
faber['did_for_alice'], faber['key_for_alice'], alice['did_for_faber'], alice['key_for_faber'], \
faber['alice_connection_response'] = await onboarding(faber, alice)
logger.info("==============================")
logger.info("== Getting Transcript with Faber - Getting Transcript Credential ==")
logger.info("------------------------------")
logger.info("\"Faber\" -> Create \"Transcript\" Credential Offer for Alice")
faber['transcript_cred_offer'] = \
await anoncreds.issuer_create_credential_offer(faber['wallet'], faber['transcript_cred_def_id'])
logger.info("\"Faber\" -> Get key for Alice did")
faber['alice_key_for_faber'] = \
await did.key_for_did(faber['pool'], faber['wallet'], faber['alice_connection_response']['did'])
logger.info("\"Faber\" -> Authcrypt \"Transcript\" Credential Offer for Alice")
faber['authcrypted_transcript_cred_offer'] = \
await crypto.auth_crypt(faber['wallet'], faber['key_for_alice'], faber['alice_key_for_faber'],
faber['transcript_cred_offer'].encode('utf-8'))
logger.info("\"Faber\" -> Send authcrypted \"Transcript\" Credential Offer to Alice")
alice['authcrypted_transcript_cred_offer'] = faber['authcrypted_transcript_cred_offer']
logger.info("\"Alice\" -> Authdecrypted \"Transcript\" Credential Offer from Faber")
alice['faber_key_for_alice'], alice['transcript_cred_offer'], authdecrypted_transcript_cred_offer = \
await auth_decrypt(alice['wallet'], alice['key_for_faber'], alice['authcrypted_transcript_cred_offer'])
alice['transcript_schema_id'] = authdecrypted_transcript_cred_offer['schema_id']
alice['transcript_cred_def_id'] = authdecrypted_transcript_cred_offer['cred_def_id']
logger.info("\"Alice\" -> Create and store \"Alice\" Master Secret in Wallet")
alice['master_secret_id'] = await anoncreds.prover_create_master_secret(alice['wallet'], None)
logger.info("\"Alice\" -> Get \"Faber Transcript\" Credential Definition from Ledger")
(alice['faber_transcript_cred_def_id'], alice['faber_transcript_cred_def']) = \
await get_cred_def(alice['pool'], alice['did_for_faber'], alice['transcript_cred_def_id'])
logger.info("\"Alice\" -> Create \"Transcript\" Credential Request for Faber")
(alice['transcript_cred_request'], alice['transcript_cred_request_metadata']) = \
await anoncreds.prover_create_credential_req(alice['wallet'], alice['did_for_faber'],
alice['transcript_cred_offer'], alice['faber_transcript_cred_def'],
alice['master_secret_id'])
logger.info("\"Alice\" -> Authcrypt \"Transcript\" Credential Request for Faber")
alice['authcrypted_transcript_cred_request'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_faber'], alice['faber_key_for_alice'],
alice['transcript_cred_request'].encode('utf-8'))
logger.info("\"Alice\" -> Send authcrypted \"Transcript\" Credential Request to Faber")
faber['authcrypted_transcript_cred_request'] = alice['authcrypted_transcript_cred_request']
logger.info("\"Faber\" -> Authdecrypt \"Transcript\" Credential Request from Alice")
faber['alice_key_for_faber'], faber['transcript_cred_request'], _ = \
await auth_decrypt(faber['wallet'], faber['key_for_alice'], faber['authcrypted_transcript_cred_request'])
logger.info("\"Faber\" -> Create \"Transcript\" Credential for Alice")
faber['alice_transcript_cred_values'] = json.dumps({
"first_name": {"raw": "Alice", "encoded": "1139481716457488690172217916278103335"},
"last_name": {"raw": "Garcia", "encoded": "5321642780241790123587902456789123452"},
"degree": {"raw": "Bachelor of Science, Marketing", "encoded": "12434523576212321"},
"status": {"raw": "graduated", "encoded": "2213454313412354"},
"ssn": {"raw": "123-45-6789", "encoded": "3124141231422543541"},
"year": {"raw": "2015", "encoded": "2015"},
"average": {"raw": "5", "encoded": "5"}
})
faber['transcript_cred'], _, _ = \
await anoncreds.issuer_create_credential(faber['wallet'], faber['transcript_cred_offer'],
faber['transcript_cred_request'],
faber['alice_transcript_cred_values'], None, None)
logger.info("\"Faber\" -> Authcrypt \"Transcript\" Credential for Alice")
faber['authcrypted_transcript_cred'] = \
await crypto.auth_crypt(faber['wallet'], faber['key_for_alice'], faber['alice_key_for_faber'],
faber['transcript_cred'].encode('utf-8'))
logger.info("\"Faber\" -> Send authcrypted \"Transcript\" Credential to Alice")
alice['authcrypted_transcript_cred'] = faber['authcrypted_transcript_cred']
logger.info("\"Alice\" -> Authdecrypted \"Transcript\" Credential from Faber")
_, alice['transcript_cred'], _ = \
await auth_decrypt(alice['wallet'], alice['key_for_faber'], alice['authcrypted_transcript_cred'])
logger.info("\"Alice\" -> Store \"Transcript\" Credential from Faber")
_, alice['transcript_cred_def'] = await get_cred_def(alice['pool'], alice['did_for_faber'],
alice['transcript_cred_def_id'])
await anoncreds.prover_store_credential(alice['wallet'], None, alice['transcript_cred_request_metadata'],
alice['transcript_cred'], alice['transcript_cred_def'], None)
logger.info("==============================")
logger.info("=== Apply for the job with Acme ==")
logger.info("==============================")
logger.info("== Apply for the job with Acme - Onboarding ==")
logger.info("------------------------------")
acme['did_for_alice'], acme['key_for_alice'], alice['did_for_acme'], alice['key_for_acme'], \
acme['alice_connection_response'] = await onboarding(acme, alice)
logger.info("==============================")
logger.info("== Apply for the job with Acme - Transcript proving ==")
logger.info("------------------------------")
logger.info("\"Acme\" -> Create \"Job-Application\" Proof Request")
nonce = await anoncreds.generate_nonce()
acme['job_application_proof_request'] = json.dumps({
'nonce': nonce,
'name': 'Job-Application',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {
'name': 'first_name'
},
'attr2_referent': {
'name': 'last_name'
},
'attr3_referent': {
'name': 'degree',
'restrictions': [{'cred_def_id': faber['transcript_cred_def_id']}]
},
'attr4_referent': {
'name': 'status',
'restrictions': [{'cred_def_id': faber['transcript_cred_def_id']}]
},
'attr5_referent': {
'name': 'ssn',
'restrictions': [{'cred_def_id': faber['transcript_cred_def_id']}]
},
'attr6_referent': {
'name': 'phone_number'
}
},
'requested_predicates': {
'predicate1_referent': {
'name': 'average',
'p_type': '>=',
'p_value': 4,
'restrictions': [{'cred_def_id': faber['transcript_cred_def_id']}]
}
}
})
logger.info("\"Acme\" -> Get key for Alice did")
acme['alice_key_for_acme'] = \
await did.key_for_did(acme['pool'], acme['wallet'], acme['alice_connection_response']['did'])
logger.info("\"Acme\" -> Authcrypt \"Job-Application\" Proof Request for Alice")
acme['authcrypted_job_application_proof_request'] = \
await crypto.auth_crypt(acme['wallet'], acme['key_for_alice'], acme['alice_key_for_acme'],
acme['job_application_proof_request'].encode('utf-8'))
logger.info("\"Acme\" -> Send authcrypted \"Job-Application\" Proof Request to Alice")
alice['authcrypted_job_application_proof_request'] = acme['authcrypted_job_application_proof_request']
logger.info("\"Alice\" -> Authdecrypt \"Job-Application\" Proof Request from Acme")
alice['acme_key_for_alice'], alice['job_application_proof_request'], _ = \
await auth_decrypt(alice['wallet'], alice['key_for_acme'], alice['authcrypted_job_application_proof_request'])
logger.info("\"Alice\" -> Get credentials for \"Job-Application\" Proof Request")
search_for_job_application_proof_request = \
await anoncreds.prover_search_credentials_for_proof_req(alice['wallet'],
alice['job_application_proof_request'], None)
cred_for_attr1 = await get_credential_for_referent(search_for_job_application_proof_request, 'attr1_referent')
cred_for_attr2 = await get_credential_for_referent(search_for_job_application_proof_request, 'attr2_referent')
cred_for_attr3 = await get_credential_for_referent(search_for_job_application_proof_request, 'attr3_referent')
cred_for_attr4 = await get_credential_for_referent(search_for_job_application_proof_request, 'attr4_referent')
cred_for_attr5 = await get_credential_for_referent(search_for_job_application_proof_request, 'attr5_referent')
cred_for_predicate1 = \
await get_credential_for_referent(search_for_job_application_proof_request, 'predicate1_referent')
await anoncreds.prover_close_credentials_search_for_proof_req(search_for_job_application_proof_request)
alice['creds_for_job_application_proof'] = {cred_for_attr1['referent']: cred_for_attr1,
cred_for_attr2['referent']: cred_for_attr2,
cred_for_attr3['referent']: cred_for_attr3,
cred_for_attr4['referent']: cred_for_attr4,
cred_for_attr5['referent']: cred_for_attr5,
cred_for_predicate1['referent']: cred_for_predicate1}
alice['schemas_for_job_application'], alice['cred_defs_for_job_application'], \
alice['revoc_states_for_job_application'] = \
await prover_get_entities_from_ledger(alice['pool'], alice['did_for_acme'],
alice['creds_for_job_application_proof'], alice['name'])
logger.info("\"Alice\" -> Create \"Job-Application\" Proof")
alice['job_application_requested_creds'] = json.dumps({
'self_attested_attributes': {
'attr1_referent': 'Alice',
'attr2_referent': 'Garcia',
'attr6_referent': '123-45-6789'
},
'requested_attributes': {
'attr3_referent': {'cred_id': cred_for_attr3['referent'], 'revealed': True},
'attr4_referent': {'cred_id': cred_for_attr4['referent'], 'revealed': True},
'attr5_referent': {'cred_id': cred_for_attr5['referent'], 'revealed': True},
},
'requested_predicates': {'predicate1_referent': {'cred_id': cred_for_predicate1['referent']}}
})
alice['job_application_proof'] = \
await anoncreds.prover_create_proof(alice['wallet'], alice['job_application_proof_request'],
alice['job_application_requested_creds'], alice['master_secret_id'],
alice['schemas_for_job_application'],
alice['cred_defs_for_job_application'],
alice['revoc_states_for_job_application'])
logger.info("\"Alice\" -> Authcrypt \"Job-Application\" Proof for Acme")
alice['authcrypted_job_application_proof'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_acme'], alice['acme_key_for_alice'],
alice['job_application_proof'].encode('utf-8'))
logger.info("\"Alice\" -> Send authcrypted \"Job-Application\" Proof to Acme")
acme['authcrypted_job_application_proof'] = alice['authcrypted_job_application_proof']
logger.info("\"Acme\" -> Authdecrypted \"Job-Application\" Proof from Alice")
_, acme['job_application_proof'], decrypted_job_application_proof = \
await auth_decrypt(acme['wallet'], acme['key_for_alice'], acme['authcrypted_job_application_proof'])
acme['schemas_for_job_application'], acme['cred_defs_for_job_application'], \
acme['revoc_ref_defs_for_job_application'], acme['revoc_regs_for_job_application'] = \
await verifier_get_entities_from_ledger(acme['pool'], acme['did'],
decrypted_job_application_proof['identifiers'], acme['name'])
logger.info("\"Acme\" -> Verify \"Job-Application\" Proof from Alice")
assert 'Bachelor of Science, Marketing' == \
decrypted_job_application_proof['requested_proof']['revealed_attrs']['attr3_referent']['raw']
assert 'graduated' == \
decrypted_job_application_proof['requested_proof']['revealed_attrs']['attr4_referent']['raw']
assert '123-45-6789' == \
decrypted_job_application_proof['requested_proof']['revealed_attrs']['attr5_referent']['raw']
assert 'Alice' == decrypted_job_application_proof['requested_proof']['self_attested_attrs']['attr1_referent']
assert 'Garcia' == decrypted_job_application_proof['requested_proof']['self_attested_attrs']['attr2_referent']
assert '123-45-6789' == decrypted_job_application_proof['requested_proof']['self_attested_attrs']['attr6_referent']
assert await anoncreds.verifier_verify_proof(acme['job_application_proof_request'], acme['job_application_proof'],
acme['schemas_for_job_application'],
acme['cred_defs_for_job_application'],
acme['revoc_ref_defs_for_job_application'],
acme['revoc_regs_for_job_application'])
logger.info("==============================")
logger.info("== Apply for the job with Acme - Getting Job-Certificate Credential ==")
logger.info("------------------------------")
logger.info("\"Acme\" -> Create \"Job-Certificate\" Credential Offer for Alice")
acme['job_certificate_cred_offer'] = \
await anoncreds.issuer_create_credential_offer(acme['wallet'], acme['job_certificate_cred_def_id'])
logger.info("\"Acme\" -> Get key for Alice did")
acme['alice_key_for_acme'] = \
await did.key_for_did(acme['pool'], acme['wallet'], acme['alice_connection_response']['did'])
logger.info("\"Acme\" -> Authcrypt \"Job-Certificate\" Credential Offer for Alice")
acme['authcrypted_job_certificate_cred_offer'] = \
await crypto.auth_crypt(acme['wallet'], acme['key_for_alice'], acme['alice_key_for_acme'],
acme['job_certificate_cred_offer'].encode('utf-8'))
logger.info("\"Acme\" -> Send authcrypted \"Job-Certificate\" Credential Offer to Alice")
alice['authcrypted_job_certificate_cred_offer'] = acme['authcrypted_job_certificate_cred_offer']
logger.info("\"Alice\" -> Authdecrypted \"Job-Certificate\" Credential Offer from Acme")
alice['acme_key_for_alice_alice'], alice['job_certificate_cred_offer'], job_certificate_cred_offer = \
await auth_decrypt(alice['wallet'], alice['key_for_acme'], alice['authcrypted_job_certificate_cred_offer'])
logger.info("\"Alice\" -> Get \"Acme Job-Certificate\" Credential Definition from Ledger")
(alice['acme_job_certificate_cred_def_id'], alice['acme_job_certificate_cred_def']) = \
await get_cred_def(alice['pool'], alice['did_for_acme'], job_certificate_cred_offer['cred_def_id'])
logger.info("\"Alice\" -> Create and store in Wallet \"Job-Certificate\" Credential Request for Acme")
(alice['job_certificate_cred_request'], alice['job_certificate_cred_request_metadata']) = \
await anoncreds.prover_create_credential_req(alice['wallet'], alice['did_for_acme'],
alice['job_certificate_cred_offer'],
alice['acme_job_certificate_cred_def'], alice['master_secret_id'])
logger.info("\"Alice\" -> Authcrypt \"Job-Certificate\" Credential Request for Acme")
alice['authcrypted_job_certificate_cred_request'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_acme'], alice['acme_key_for_alice'],
alice['job_certificate_cred_request'].encode('utf-8'))
logger.info("\"Alice\" -> Send authcrypted \"Job-Certificate\" Credential Request to Acme")
alice['job_certificate_cred_values'] = json.dumps({
"first_name": {"raw": "Alice", "encoded": "245712572474217942457235975012103335"},
"last_name": {"raw": "Garcia", "encoded": "312643218496194691632153761283356127"},
"employee_status": {"raw": "Permanent", "encoded": "2143135425425143112321314321"},
"salary": {"raw": "2400", "encoded": "2400"},
"experience": {"raw": "10", "encoded": "10"}
})
acme['authcrypted_job_certificate_cred_request'] = alice['authcrypted_job_certificate_cred_request']
acme['job_certificate_cred_values'] = alice['job_certificate_cred_values']
logger.info("\"Acme\" -> Authdecrypt \"Job-Certificate\" Credential Request from Alice")
acme['alice_key_for_acme'], acme['job_certificate_cred_request'], _ = \
await auth_decrypt(acme['wallet'], acme['key_for_alice'], acme['authcrypted_job_certificate_cred_request'])
logger.info("\"Acme\" -> Create \"Job-Certificate\" Credential for Alice")
acme['blob_storage_reader_cfg_handle'] = await blob_storage.open_reader('default', acme['tails_writer_config'])
acme['job_certificate_cred'], acme['job_certificate_cred_rev_id'], acme['alice_cert_rev_reg_delta'] = \
await anoncreds.issuer_create_credential(acme['wallet'], acme['job_certificate_cred_offer'],
acme['job_certificate_cred_request'],
acme['job_certificate_cred_values'],
acme['revoc_reg_id'],
acme['blob_storage_reader_cfg_handle'])
logger.info("\"Acme\" -> Post Revocation Registry Delta to Ledger")
acme['revoc_reg_entry_req'] = \
await ledger.build_revoc_reg_entry_request(acme['did'], acme['revoc_reg_id'], 'CL_ACCUM',
acme['alice_cert_rev_reg_delta'])
await ledger.sign_and_submit_request(acme['pool'], acme['wallet'], acme['did'], acme['revoc_reg_entry_req'])
logger.info("\"Acme\" -> Authcrypt \"Job-Certificate\" Credential for Alice")
acme['authcrypted_job_certificate_cred'] = \
await crypto.auth_crypt(acme['wallet'], acme['key_for_alice'], acme['alice_key_for_acme'],
acme['job_certificate_cred'].encode('utf-8'))
logger.info("\"Acme\" -> Send authcrypted \"Job-Certificate\" Credential to Alice")
alice['authcrypted_job_certificate_cred'] = acme['authcrypted_job_certificate_cred']
logger.info("\"Alice\" -> Authdecrypted \"Job-Certificate\" Credential from Acme")
_, alice['job_certificate_cred'], alice_job_certificate_cred = \
await auth_decrypt(alice['wallet'], alice['key_for_acme'], alice['authcrypted_job_certificate_cred'])
logger.info("\"Alice\" -> Gets RevocationRegistryDefinition for \"Job-Certificate\" Credential from Acme")
alice['acme_revoc_reg_des_req'] = \
await ledger.build_get_revoc_reg_def_request(alice['did_for_acme'],
alice_job_certificate_cred['rev_reg_id'])
alice['acme_revoc_reg_des_resp'] = await ledger.submit_request(alice['pool'], alice['acme_revoc_reg_des_req'])
(alice['acme_revoc_reg_def_id'], alice['acme_revoc_reg_def_json']) = \
await ledger.parse_get_revoc_reg_def_response(alice['acme_revoc_reg_des_resp'])
logger.info("\"Alice\" -> Store \"Job-Certificate\" Credential")
await anoncreds.prover_store_credential(alice['wallet'], None, alice['job_certificate_cred_request_metadata'],
alice['job_certificate_cred'],
alice['acme_job_certificate_cred_def'], alice['acme_revoc_reg_def_json'])
logger.info("==============================")
logger.info("=== Apply for the loan with Thrift ==")
logger.info("==============================")
logger.info("== Apply for the loan with Thrift - Onboarding ==")
logger.info("------------------------------")
thrift['did_for_alice'], thrift['key_for_alice'], alice['did_for_thrift'], alice['key_for_thrift'], \
thrift['alice_connection_response'] = await onboarding(thrift, alice)
async def apply_loan_basic():
# This method will be called twice: once with a valid Job-Certificate and
# the second time after the Job-Certificate has been revoked.
logger.info("==============================")
logger.info("== Apply for the loan with Thrift - Job-Certificate proving ==")
logger.info("------------------------------")
logger.info("\"Thrift\" -> Create \"Loan-Application-Basic\" Proof Request")
nonce = await anoncreds.generate_nonce()
thrift['apply_loan_proof_request'] = json.dumps({
'nonce': nonce,
'name': 'Loan-Application-Basic',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {
'name': 'employee_status',
'restrictions': [{'cred_def_id': acme['job_certificate_cred_def_id']}]
}
},
'requested_predicates': {
'predicate1_referent': {
'name': 'salary',
'p_type': '>=',
'p_value': 2000,
'restrictions': [{'cred_def_id': acme['job_certificate_cred_def_id']}]
},
'predicate2_referent': {
'name': 'experience',
'p_type': '>=',
'p_value': 1,
'restrictions': [{'cred_def_id': acme['job_certificate_cred_def_id']}]
}
},
'non_revoked': {'to': int(time.time())}
})
logger.info("\"Thrift\" -> Get key for Alice did")
thrift['alice_key_for_thrift'] = \
await did.key_for_did(thrift['pool'], thrift['wallet'], thrift['alice_connection_response']['did'])
logger.info("\"Thrift\" -> Authcrypt \"Loan-Application-Basic\" Proof Request for Alice")
thrift['authcrypted_apply_loan_proof_request'] = \
await crypto.auth_crypt(thrift['wallet'], thrift['key_for_alice'], thrift['alice_key_for_thrift'],
thrift['apply_loan_proof_request'].encode('utf-8'))
logger.info("\"Thrift\" -> Send authcrypted \"Loan-Application-Basic\" Proof Request to Alice")
alice['authcrypted_apply_loan_proof_request'] = thrift['authcrypted_apply_loan_proof_request']
logger.info("\"Alice\" -> Authdecrypt \"Loan-Application-Basic\" Proof Request from Thrift")
alice['thrift_key_for_alice'], alice['apply_loan_proof_request'], _ = \
await auth_decrypt(alice['wallet'], alice['key_for_thrift'], alice['authcrypted_apply_loan_proof_request'])
logger.info("\"Alice\" -> Get credentials for \"Loan-Application-Basic\" Proof Request")
search_for_apply_loan_proof_request = \
await anoncreds.prover_search_credentials_for_proof_req(alice['wallet'],
alice['apply_loan_proof_request'], None)
cred_for_attr1 = await get_credential_for_referent(search_for_apply_loan_proof_request, 'attr1_referent')
cred_for_predicate1 = await get_credential_for_referent(search_for_apply_loan_proof_request, 'predicate1_referent')
cred_for_predicate2 = await get_credential_for_referent(search_for_apply_loan_proof_request, 'predicate2_referent')
await anoncreds.prover_close_credentials_search_for_proof_req(search_for_apply_loan_proof_request)
alice['creds_for_apply_loan_proof'] = {cred_for_attr1['referent']: cred_for_attr1,
cred_for_predicate1['referent']: cred_for_predicate1,
cred_for_predicate2['referent']: cred_for_predicate2}
requested_timestamp = int(json.loads(thrift['apply_loan_proof_request'])['non_revoked']['to'])
alice['schemas_for_loan_app'], alice['cred_defs_for_loan_app'], alice['revoc_states_for_loan_app'] = \
await prover_get_entities_from_ledger(alice['pool'], alice['did_for_thrift'],
alice['creds_for_apply_loan_proof'],
alice['name'], None, requested_timestamp)
logger.info("\"Alice\" -> Create \"Loan-Application-Basic\" Proof")
revoc_states_for_loan_app = json.loads(alice['revoc_states_for_loan_app'])
timestamp_for_attr1 = get_timestamp_for_attribute(cred_for_attr1, revoc_states_for_loan_app)
timestamp_for_predicate1 = get_timestamp_for_attribute(cred_for_predicate1, revoc_states_for_loan_app)
timestamp_for_predicate2 = get_timestamp_for_attribute(cred_for_predicate2, revoc_states_for_loan_app)
alice['apply_loan_requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {
'attr1_referent': {'cred_id': cred_for_attr1['referent'], 'revealed': True, 'timestamp': timestamp_for_attr1}
},
'requested_predicates': {
'predicate1_referent': {'cred_id': cred_for_predicate1['referent'], 'timestamp': timestamp_for_predicate1},
'predicate2_referent': {'cred_id': cred_for_predicate2['referent'], 'timestamp': timestamp_for_predicate2}
}
})
alice['apply_loan_proof'] = \
await anoncreds.prover_create_proof(alice['wallet'], alice['apply_loan_proof_request'],
alice['apply_loan_requested_creds'], alice['master_secret_id'],
alice['schemas_for_loan_app'], alice['cred_defs_for_loan_app'],
alice['revoc_states_for_loan_app'])
logger.info("\"Alice\" -> Authcrypt \"Loan-Application-Basic\" Proof for Thrift")
alice['authcrypted_alice_apply_loan_proof'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_thrift'], alice['thrift_key_for_alice'],
alice['apply_loan_proof'].encode('utf-8'))
logger.info("\"Alice\" -> Send authcrypted \"Loan-Application-Basic\" Proof to Thrift")
thrift['authcrypted_alice_apply_loan_proof'] = alice['authcrypted_alice_apply_loan_proof']
logger.info("\"Thrift\" -> Authdecrypted \"Loan-Application-Basic\" Proof from Alice")
_, thrift['alice_apply_loan_proof'], authdecrypted_alice_apply_loan_proof = \
await auth_decrypt(thrift['wallet'], thrift['key_for_alice'], thrift['authcrypted_alice_apply_loan_proof'])
logger.info("\"Thrift\" -> Get Schemas, Credential Definitions and Revocation Registries from Ledger"
" required for Proof verifying")
thrift['schemas_for_loan_app'], thrift['cred_defs_for_loan_app'], thrift['revoc_defs_for_loan_app'], \
thrift['revoc_regs_for_loan_app'] = \
await verifier_get_entities_from_ledger(thrift['pool'], thrift['did'],
authdecrypted_alice_apply_loan_proof['identifiers'],
thrift['name'], requested_timestamp)
logger.info("\"Thrift\" -> Verify \"Loan-Application-Basic\" Proof from Alice")
assert 'Permanent' == \
authdecrypted_alice_apply_loan_proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
await apply_loan_basic()
assert await anoncreds.verifier_verify_proof(thrift['apply_loan_proof_request'],
thrift['alice_apply_loan_proof'],
thrift['schemas_for_loan_app'],
thrift['cred_defs_for_loan_app'],
thrift['revoc_defs_for_loan_app'],
thrift['revoc_regs_for_loan_app'])
logger.info("==============================")
logger.info("== Apply for the loan with Thrift - Transcript and Job-Certificate proving ==")
logger.info("------------------------------")
logger.info("\"Thrift\" -> Create \"Loan-Application-KYC\" Proof Request")
nonce = await anoncreds.generate_nonce()
thrift['apply_loan_kyc_proof_request'] = json.dumps({
'nonce': nonce,
'name': 'Loan-Application-KYC',
'version': '0.1',
'requested_attributes': {
'attr1_referent': {'name': 'first_name'},
'attr2_referent': {'name': 'last_name'},
'attr3_referent': {'name': 'ssn'}
},
'requested_predicates': {}
})
logger.info("\"Thrift\" -> Get key for Alice did")
thrift['alice_key_for_thrift'] = await did.key_for_did(thrift['pool'], thrift['wallet'],
thrift['alice_connection_response']['did'])
logger.info("\"Thrift\" -> Authcrypt \"Loan-Application-KYC\" Proof Request for Alice")
thrift['authcrypted_apply_loan_kyc_proof_request'] = \
await crypto.auth_crypt(thrift['wallet'], thrift['key_for_alice'], thrift['alice_key_for_thrift'],
thrift['apply_loan_kyc_proof_request'].encode('utf-8'))
logger.info("\"Thrift\" -> Send authcrypted \"Loan-Application-KYC\" Proof Request to Alice")
alice['authcrypted_apply_loan_kyc_proof_request'] = thrift['authcrypted_apply_loan_kyc_proof_request']
logger.info("\"Alice\" -> Authdecrypt \"Loan-Application-KYC\" Proof Request from Thrift")
alice['thrift_key_for_alice'], alice['apply_loan_kyc_proof_request'], _ = \
await auth_decrypt(alice['wallet'], alice['key_for_thrift'], alice['authcrypted_apply_loan_kyc_proof_request'])
logger.info("\"Alice\" -> Get credentials for \"Loan-Application-KYC\" Proof Request")
search_for_apply_loan_kyc_proof_request = \
await anoncreds.prover_search_credentials_for_proof_req(alice['wallet'],
alice['apply_loan_kyc_proof_request'], None)
cred_for_attr1 = await get_credential_for_referent(search_for_apply_loan_kyc_proof_request, 'attr1_referent')
cred_for_attr2 = await get_credential_for_referent(search_for_apply_loan_kyc_proof_request, 'attr2_referent')
cred_for_attr3 = await get_credential_for_referent(search_for_apply_loan_kyc_proof_request, 'attr3_referent')
await anoncreds.prover_close_credentials_search_for_proof_req(search_for_apply_loan_kyc_proof_request)
alice['creds_for_apply_loan_kyc_proof'] = {cred_for_attr1['referent']: cred_for_attr1,
cred_for_attr2['referent']: cred_for_attr2,
cred_for_attr3['referent']: cred_for_attr3}
alice['schemas_for_loan_kyc_app'], alice['cred_defs_for_loan_kyc_app'], alice['revoc_states_for_loan_kyc_app'] = \
await prover_get_entities_from_ledger(alice['pool'], alice['did_for_thrift'],
alice['creds_for_apply_loan_kyc_proof'], alice['name'],)
logger.info("\"Alice\" -> Create \"Loan-Application-KYC\" Proof")
revoc_states_for_loan_app = json.loads(alice['revoc_states_for_loan_kyc_app'])
timestamp_for_attr1 = get_timestamp_for_attribute(cred_for_attr1, revoc_states_for_loan_app)
timestamp_for_attr2 = get_timestamp_for_attribute(cred_for_attr2, revoc_states_for_loan_app)
timestamp_for_attr3 = get_timestamp_for_attribute(cred_for_attr3, revoc_states_for_loan_app)
alice['apply_loan_kyc_requested_creds'] = json.dumps({
'self_attested_attributes': {},
'requested_attributes': {
'attr1_referent': {'cred_id': cred_for_attr1['referent'], 'revealed': True, 'timestamp': timestamp_for_attr1},
'attr2_referent': {'cred_id': cred_for_attr2['referent'], 'revealed': True, 'timestamp': timestamp_for_attr2},
'attr3_referent': {'cred_id': cred_for_attr3['referent'], 'revealed': True, 'timestamp': timestamp_for_attr3}
},
'requested_predicates': {}
})
alice['apply_loan_kyc_proof'] = \
await anoncreds.prover_create_proof(alice['wallet'], alice['apply_loan_kyc_proof_request'],
alice['apply_loan_kyc_requested_creds'], alice['master_secret_id'],
alice['schemas_for_loan_kyc_app'], alice['cred_defs_for_loan_kyc_app'],
alice['revoc_states_for_loan_kyc_app'])
logger.info("\"Alice\" -> Authcrypt \"Loan-Application-KYC\" Proof for Thrift")
alice['authcrypted_alice_apply_loan_kyc_proof'] = \
await crypto.auth_crypt(alice['wallet'], alice['key_for_thrift'], alice['thrift_key_for_alice'],
alice['apply_loan_kyc_proof'].encode('utf-8'))
logger.info("\"Alice\" -> Send authcrypted \"Loan-Application-KYC\" Proof to Thrift")
thrift['authcrypted_alice_apply_loan_kyc_proof'] = alice['authcrypted_alice_apply_loan_kyc_proof']
logger.info("\"Thrift\" -> Authdecrypted \"Loan-Application-KYC\" Proof from Alice")
_, thrift['alice_apply_loan_kyc_proof'], alice_apply_loan_kyc_proof = \
await auth_decrypt(thrift['wallet'], thrift['key_for_alice'], thrift['authcrypted_alice_apply_loan_kyc_proof'])
logger.info("\"Thrift\" -> Get Schemas, Credential Definitions and Revocation Registries from Ledger"
" required for Proof verifying")
thrift['schemas_for_loan_kyc_app'], thrift['cred_defs_for_loan_kyc_app'], thrift['revoc_defs_for_loan_kyc_app'], \
thrift['revoc_regs_for_loan_kyc_app'] = \
await verifier_get_entities_from_ledger(thrift['pool'], thrift['did'],
alice_apply_loan_kyc_proof['identifiers'], thrift['name'])
logger.info("\"Thrift\" -> Verify \"Loan-Application-KYC\" Proof from Alice")
assert 'Alice' == \
alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr1_referent']['raw']
assert 'Garcia' == \
alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr2_referent']['raw']
assert '123-45-6789' == \
alice_apply_loan_kyc_proof['requested_proof']['revealed_attrs']['attr3_referent']['raw']
assert await anoncreds.verifier_verify_proof(thrift['apply_loan_kyc_proof_request'],
thrift['alice_apply_loan_kyc_proof'],
thrift['schemas_for_loan_kyc_app'], thrift['cred_defs_for_loan_kyc_app'],
thrift['revoc_defs_for_loan_kyc_app'],
thrift['revoc_regs_for_loan_kyc_app'])
logger.info("==============================")
logger.info("==============================")
logger.info("== Credential revocation - Acme revokes Alice's Job-Certificate ==")
logger.info("------------------------------")
logger.info("\"Acme\" - Revoke credential")
acme['alice_cert_rev_reg_delta'] = \
await anoncreds.issuer_revoke_credential(acme['wallet'],
acme['blob_storage_reader_cfg_handle'],
acme['revoc_reg_id'],
acme['job_certificate_cred_rev_id'])
logger.info("\"Acme\" - Post RevocationRegistryDelta to Ledger")
acme['revoc_reg_entry_req'] = \
await ledger.build_revoc_reg_entry_request(acme['did'], acme['revoc_reg_id'], 'CL_ACCUM',
acme['alice_cert_rev_reg_delta'])
await ledger.sign_and_submit_request(acme['pool'], acme['wallet'], acme['did'], acme['revoc_reg_entry_req'])
logger.info("==============================")
logger.info("==============================")
logger.info("== Apply for the loan with Thrift again - Job-Certificate proving ==")
logger.info("------------------------------")
await apply_loan_basic()
assert not await anoncreds.verifier_verify_proof(thrift['apply_loan_proof_request'],
thrift['alice_apply_loan_proof'],
thrift['schemas_for_loan_app'],
thrift['cred_defs_for_loan_app'],
thrift['revoc_defs_for_loan_app'],
thrift['revoc_regs_for_loan_app'])
logger.info("==============================")
logger.info(" \"Sovrin Steward\" -> Close and Delete wallet")
await wallet.close_wallet(steward['wallet'])
await wallet.delete_wallet(steward['wallet_config'], steward['wallet_credentials'])
logger.info("\"Government\" -> Close and Delete wallet")
await wallet.close_wallet(government['wallet'])
await wallet.delete_wallet(wallet_config("delete", government['wallet_config']), wallet_credentials("delete", government['wallet_credentials']))
logger.info("\"Faber\" -> Close and Delete wallet")
await wallet.close_wallet(faber['wallet'])
await wallet.delete_wallet(wallet_config("delete", faber['wallet_config']), wallet_credentials("delete", faber['wallet_credentials']))
logger.info("\"Acme\" -> Close and Delete wallet")
await wallet.close_wallet(acme['wallet'])
await wallet.delete_wallet(wallet_config("delete", acme['wallet_config']), wallet_credentials("delete", acme['wallet_credentials']))
logger.info("\"Thrift\" -> Close and Delete wallet")
await wallet.close_wallet(thrift['wallet'])
await wallet.delete_wallet(wallet_config("delete", thrift['wallet_config']), wallet_credentials("delete", thrift['wallet_credentials']))
logger.info("\"Alice\" -> Close and Delete wallet")
await wallet.close_wallet(alice['wallet'])
await wallet.delete_wallet(wallet_config("delete", alice['wallet_config']), wallet_credentials("delete", alice['wallet_credentials']))
logger.info("Close and Delete pool")
await pool.close_pool_ledger(pool_['handle'])
await pool.delete_pool_ledger_config(pool_['name'])
logger.info("Getting started -> done")
async def onboarding(_from, to):
logger.info("\"{}\" -> Create and store in Wallet \"{} {}\" DID".format(_from['name'], _from['name'], to['name']))
(from_to_did, from_to_key) = await did.create_and_store_my_did(_from['wallet'], "{}")
logger.info("\"{}\" -> Send Nym to Ledger for \"{} {}\" DID".format(_from['name'], _from['name'], to['name']))
await send_nym(_from['pool'], _from['wallet'], _from['did'], from_to_did, from_to_key, None)
logger.info("\"{}\" -> Send connection request to {} with \"{} {}\" DID and nonce"
.format(_from['name'], to['name'], _from['name'], to['name']))
connection_request = {
'did': from_to_did,
'nonce': 123456789
}
if 'wallet' not in to:
logger.info("\"{}\" -> Create wallet".format(to['name']))
try:
await wallet.create_wallet(wallet_config("create", to['wallet_config']), wallet_credentials("create", to['wallet_credentials']))
except IndyError as ex:
if ex.error_code == ErrorCode.PoolLedgerConfigAlreadyExistsError:
pass
to['wallet'] = await wallet.open_wallet(wallet_config("open", to['wallet_config']), wallet_credentials("open", to['wallet_credentials']))
logger.info("\"{}\" -> Create and store in Wallet \"{} {}\" DID".format(to['name'], to['name'], _from['name']))
(to_from_did, to_from_key) = await did.create_and_store_my_did(to['wallet'], "{}")
logger.info("\"{}\" -> Get key for did from \"{}\" connection request".format(to['name'], _from['name']))
from_to_verkey = await did.key_for_did(_from['pool'], to['wallet'], connection_request['did'])
logger.info("\"{}\" -> Anoncrypt connection response for \"{}\" with \"{} {}\" DID, verkey and nonce"
.format(to['name'], _from['name'], to['name'], _from['name']))
to['connection_response'] = json.dumps({
'did': to_from_did,
'verkey': to_from_key,
'nonce': connection_request['nonce']
})
to['anoncrypted_connection_response'] = \
await crypto.anon_crypt(from_to_verkey, to['connection_response'].encode('utf-8'))
logger.info("\"{}\" -> Send anoncrypted connection response to \"{}\"".format(to['name'], _from['name']))
_from['anoncrypted_connection_response'] = to['anoncrypted_connection_response']
logger.info("\"{}\" -> Anondecrypt connection response from \"{}\"".format(_from['name'], to['name']))
_from['connection_response'] = \
json.loads((await crypto.anon_decrypt(_from['wallet'], from_to_key,
_from['anoncrypted_connection_response'])).decode("utf-8"))
logger.info("\"{}\" -> Authenticates \"{}\" by comparision of Nonce".format(_from['name'], to['name']))
assert connection_request['nonce'] == _from['connection_response']['nonce']
logger.info("\"{}\" -> Send Nym to Ledger for \"{} {}\" DID".format(_from['name'], to['name'], _from['name']))
await send_nym(_from['pool'], _from['wallet'], _from['did'], to_from_did, to_from_key, None)
return from_to_did, from_to_key, to_from_did, to_from_key, _from['connection_response']
def wallet_config(operation, wallet_config_str):
if not args.storage_type:
return wallet_config_str
wallet_config_json = json.loads(wallet_config_str)
wallet_config_json['storage_type'] = args.storage_type
if args.config:
wallet_config_json['storage_config'] = json.loads(args.config)
#print(operation, json.dumps(wallet_config_json))
return json.dumps(wallet_config_json)
def wallet_credentials(operation, wallet_credentials_str):
if not args.storage_type:
return wallet_credentials_str
wallet_credentials_json = json.loads(wallet_credentials_str)
if args.creds:
wallet_credentials_json['storage_credentials'] = json.loads(args.creds)
#print(operation, json.dumps(wallet_credentials_json))
return json.dumps(wallet_credentials_json)
async def get_verinym(_from, from_to_did, from_to_key, to, to_from_did, to_from_key):
logger.info("\"{}\" -> Create and store in Wallet \"{}\" new DID".format(to['name'], to['name']))
(to_did, to_key) = await did.create_and_store_my_did(to['wallet'], "{}")
logger.info("\"{}\" -> Authcrypt \"{} DID info\" for \"{}\"".format(to['name'], to['name'], _from['name']))
to['did_info'] = json.dumps({
'did': to_did,
'verkey': to_key
})
to['authcrypted_did_info'] = \
await crypto.auth_crypt(to['wallet'], to_from_key, from_to_key, to['did_info'].encode('utf-8'))
logger.info("\"{}\" -> Send authcrypted \"{} DID info\" to {}".format(to['name'], to['name'], _from['name']))
logger.info("\"{}\" -> Authdecrypted \"{} DID info\" from {}".format(_from['name'], to['name'], to['name']))
sender_verkey, authdecrypted_did_info_json, authdecrypted_did_info = \
await auth_decrypt(_from['wallet'], from_to_key, to['authcrypted_did_info'])
logger.info("\"{}\" -> Authenticate {} by comparision of Verkeys".format(_from['name'], to['name'], ))
assert sender_verkey == await did.key_for_did(_from['pool'], _from['wallet'], to_from_did)
logger.info("\"{}\" -> Send Nym to Ledger for \"{} DID\" with {} Role"
.format(_from['name'], to['name'], to['role']))
await send_nym(_from['pool'], _from['wallet'], _from['did'], authdecrypted_did_info['did'],
authdecrypted_did_info['verkey'], to['role'])
return to_did
async def send_nym(pool_handle, wallet_handle, _did, new_did, new_key, role):
nym_request = await ledger.build_nym_request(_did, new_did, new_key, None, role)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, _did, nym_request)
async def send_schema(pool_handle, wallet_handle, _did, schema):
schema_request = await ledger.build_schema_request(_did, schema)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, _did, schema_request)
async def send_cred_def(pool_handle, wallet_handle, _did, cred_def_json):
cred_def_request = await ledger.build_cred_def_request(_did, cred_def_json)
await ledger.sign_and_submit_request(pool_handle, wallet_handle, _did, cred_def_request)
async def get_schema(pool_handle, _did, schema_id):
get_schema_request = await ledger.build_get_schema_request(_did, schema_id)
get_schema_response = await ledger.submit_request(pool_handle, get_schema_request)
return await ledger.parse_get_schema_response(get_schema_response)
async def get_cred_def(pool_handle, _did, cred_def_id):
get_cred_def_request = await ledger.build_get_cred_def_request(_did, cred_def_id)
get_cred_def_response = await ledger.submit_request(pool_handle, get_cred_def_request)
return await ledger.parse_get_cred_def_response(get_cred_def_response)
async def get_credential_for_referent(search_handle, referent):
credentials = json.loads(
await anoncreds.prover_fetch_credentials_for_proof_req(search_handle, referent, 10))
return credentials[0]['cred_info']
def get_timestamp_for_attribute(cred_for_attribute, revoc_states):
if cred_for_attribute['rev_reg_id'] in revoc_states:
return int(next(iter(revoc_states[cred_for_attribute['rev_reg_id']])))
else:
return None
async def prover_get_entities_from_ledger(pool_handle, _did, identifiers, actor, timestamp_from=None, timestamp_to=None):
schemas = {}
cred_defs = {}
rev_states = {}
for item in identifiers.values():
logger.info("\"{}\" -> Get Schema from Ledger".format(actor))
(received_schema_id, received_schema) = await get_schema(pool_handle, _did, item['schema_id'])
schemas[received_schema_id] = json.loads(received_schema)
logger.info("\"{}\" -> Get Claim Definition from Ledger".format(actor))
(received_cred_def_id, received_cred_def) = await get_cred_def(pool_handle, _did, item['cred_def_id'])
cred_defs[received_cred_def_id] = json.loads(received_cred_def)
if 'rev_reg_id' in item and item['rev_reg_id'] is not None:
# Create Revocations States
logger.info("\"{}\" -> Get Revocation Registry Definition from Ledger".format(actor))
get_revoc_reg_def_request = await ledger.build_get_revoc_reg_def_request(_did, item['rev_reg_id'])
get_revoc_reg_def_response = await ledger.submit_request(pool_handle, get_revoc_reg_def_request)
(rev_reg_id, revoc_reg_def_json) = await ledger.parse_get_revoc_reg_def_response(get_revoc_reg_def_response)
logger.info("\"{}\" -> Get Revocation Registry Delta from Ledger".format(actor))
if not timestamp_to: timestamp_to = int(time.time())
get_revoc_reg_delta_request = \
await ledger.build_get_revoc_reg_delta_request(_did, item['rev_reg_id'], timestamp_from, timestamp_to)
get_revoc_reg_delta_response = \
await ledger.submit_request(pool_handle, get_revoc_reg_delta_request)
(rev_reg_id, revoc_reg_delta_json, t) = \
await ledger.parse_get_revoc_reg_delta_response(get_revoc_reg_delta_response)
tails_reader_config = json.dumps(
{'base_dir': dirname(json.loads(revoc_reg_def_json)['value']['tailsLocation']),
'uri_pattern': ''})
blob_storage_reader_cfg_handle = await blob_storage.open_reader('default', tails_reader_config)
logger.info('%s - Create Revocation State', actor)
rev_state_json = \
await anoncreds.create_revocation_state(blob_storage_reader_cfg_handle, revoc_reg_def_json,
revoc_reg_delta_json, t, item['cred_rev_id'])
rev_states[rev_reg_id] = {t: json.loads(rev_state_json)}
return json.dumps(schemas), json.dumps(cred_defs), json.dumps(rev_states)
async def verifier_get_entities_from_ledger(pool_handle, _did, identifiers, actor, timestamp=None):
schemas = {}
cred_defs = {}
rev_reg_defs = {}
rev_regs = {}
for item in identifiers:
logger.info("\"{}\" -> Get Schema from Ledger".format(actor))
(received_schema_id, received_schema) = await get_schema(pool_handle, _did, item['schema_id'])
schemas[received_schema_id] = json.loads(received_schema)
logger.info("\"{}\" -> Get Claim Definition from Ledger".format(actor))
(received_cred_def_id, received_cred_def) = await get_cred_def(pool_handle, _did, item['cred_def_id'])
cred_defs[received_cred_def_id] = json.loads(received_cred_def)
if 'rev_reg_id' in item and item['rev_reg_id'] is not None:
# Get Revocation Definitions and Revocation Registries
logger.info("\"{}\" -> Get Revocation Definition from Ledger".format(actor))
get_revoc_reg_def_request = await ledger.build_get_revoc_reg_def_request(_did, item['rev_reg_id'])
get_revoc_reg_def_response = await ledger.submit_request(pool_handle, get_revoc_reg_def_request)
(rev_reg_id, revoc_reg_def_json) = await ledger.parse_get_revoc_reg_def_response(get_revoc_reg_def_response)
logger.info("\"{}\" -> Get Revocation Registry from Ledger".format(actor))
if not timestamp: timestamp = item['timestamp']
get_revoc_reg_request = \
await ledger.build_get_revoc_reg_request(_did, item['rev_reg_id'], timestamp)
get_revoc_reg_response = await ledger.submit_request(pool_handle, get_revoc_reg_request)
(rev_reg_id, rev_reg_json, timestamp2) = await ledger.parse_get_revoc_reg_response(get_revoc_reg_response)
rev_regs[rev_reg_id] = {timestamp2: json.loads(rev_reg_json)}
rev_reg_defs[rev_reg_id] = json.loads(revoc_reg_def_json)
return json.dumps(schemas), json.dumps(cred_defs), json.dumps(rev_reg_defs), json.dumps(rev_regs)
async def auth_decrypt(wallet_handle, key, message):
from_verkey, decrypted_message_json = await crypto.auth_decrypt(wallet_handle, key, message)
decrypted_message_json = decrypted_message_json.decode("utf-8")
decrypted_message = json.loads(decrypted_message_json)
return from_verkey, decrypted_message_json, decrypted_message
if __name__ == '__main__':
run_coroutine(run)
time.sleep(1) # FIXME waiting for libindy thread complete
| {
"content_hash": "924697bb80dfb5357f556da1805ff24e",
"timestamp": "",
"source": "github",
"line_count": 1183,
"max_line_length": 148,
"avg_line_length": 55.92307692307692,
"alnum_prop": 0.5867104010157655,
"repo_name": "peacekeeper/indy-sdk",
"id": "a236946fc5faabf0a3961ccec396a10c586281c6",
"size": "66157",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "samples/python/src/getting_started.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "221376"
},
{
"name": "C#",
"bytes": "854385"
},
{
"name": "C++",
"bytes": "280810"
},
{
"name": "CSS",
"bytes": "137079"
},
{
"name": "Dockerfile",
"bytes": "24968"
},
{
"name": "Groovy",
"bytes": "115236"
},
{
"name": "HTML",
"bytes": "897750"
},
{
"name": "Java",
"bytes": "914219"
},
{
"name": "JavaScript",
"bytes": "202339"
},
{
"name": "Makefile",
"bytes": "328"
},
{
"name": "Objective-C",
"bytes": "591695"
},
{
"name": "Objective-C++",
"bytes": "747317"
},
{
"name": "Perl",
"bytes": "8271"
},
{
"name": "Python",
"bytes": "821636"
},
{
"name": "Ruby",
"bytes": "80522"
},
{
"name": "Rust",
"bytes": "6771861"
},
{
"name": "Shell",
"bytes": "267807"
},
{
"name": "Swift",
"bytes": "1114"
},
{
"name": "TypeScript",
"bytes": "236119"
}
],
"symlink_target": ""
} |
from __future__ import print_function, division, absolute_import
from contextlib import contextmanager
import pytest
pytest.importorskip('s3fs')
import boto3
import moto
from toolz import concat, valmap, partial
from s3fs import S3FileSystem
from dask import compute, get
from dask.bytes.s3 import read_bytes, open_files, getsize
compute = partial(compute, get=get)
test_bucket_name = 'test'
files = {'test/accounts.1.json': (b'{"amount": 100, "name": "Alice"}\n'
b'{"amount": 200, "name": "Bob"}\n'
b'{"amount": 300, "name": "Charlie"}\n'
b'{"amount": 400, "name": "Dennis"}\n'),
'test/accounts.2.json': (b'{"amount": 500, "name": "Alice"}\n'
b'{"amount": 600, "name": "Bob"}\n'
b'{"amount": 700, "name": "Charlie"}\n'
b'{"amount": 800, "name": "Dennis"}\n')}
@pytest.yield_fixture
def s3():
# writable local S3 system
with moto.mock_s3() as m:
client = boto3.client('s3')
client.create_bucket(Bucket=test_bucket_name, ACL='public-read-write')
for f, data in files.items():
client.put_object(Bucket=test_bucket_name, Key=f, Body=data)
yield S3FileSystem(anon=True)
@contextmanager
def s3_context(bucket, files):
m = moto.mock_s3()
m.start()
client = boto3.client('s3')
client.create_bucket(Bucket=bucket, ACL='public-read-write')
for f, data in files.items():
client.put_object(Bucket=bucket, Key=f, Body=data)
yield S3FileSystem(anon=True)
for f, data in files.items():
try:
client.delete_object(Bucket=bucket, Key=f, Body=data)
except:
pass
m.stop()
def test_read_bytes(s3):
sample, values = read_bytes(test_bucket_name+'/test/accounts.*', s3=s3)
assert isinstance(sample, bytes)
assert sample[:5] == files[sorted(files)[0]][:5]
assert isinstance(values, (list, tuple))
assert isinstance(values[0], (list, tuple))
assert hasattr(values[0][0], 'dask')
assert sum(map(len, values)) >= len(files)
results = compute(*concat(values))
assert set(results) == set(files.values())
def test_read_bytes_blocksize_none(s3):
_, values = read_bytes(test_bucket_name+'/test/accounts.*', blocksize=None,
s3=s3)
assert sum(map(len, values)) == len(files)
@pytest.mark.slow
def test_read_bytes_blocksize_on_large_data():
_, L = read_bytes('dask-data/nyc-taxi/2015/yellow_tripdata_2015-01.csv',
blocksize=None)
assert len(L) == 1
_, L = read_bytes('dask-data/nyc-taxi/2014/*.csv', blocksize=None)
assert len(L) == 12
@pytest.mark.parametrize('blocksize', [5, 15, 45, 1500])
def test_read_bytes_block(s3, blocksize):
_, vals = read_bytes(test_bucket_name+'/test/account*',
blocksize=blocksize, s3=s3)
assert (list(map(len, vals)) ==
[(len(v) // blocksize + 1) for v in files.values()])
results = compute(*concat(vals))
assert (sum(len(r) for r in results) ==
sum(len(v) for v in files.values()))
ourlines = b"".join(results).split(b'\n')
testlines = b"".join(files.values()).split(b'\n')
assert set(ourlines) == set(testlines)
@pytest.mark.parametrize('blocksize', [5, 15, 45, 1500])
def test_read_bytes_delimited(s3, blocksize):
_, values = read_bytes(test_bucket_name+'/test/accounts*',
blocksize=blocksize, delimiter=b'\n', s3=s3)
_, values2 = read_bytes(test_bucket_name+'/test/accounts*',
blocksize=blocksize, delimiter=b'foo', s3=s3)
assert ([a.key for a in concat(values)] !=
[b.key for b in concat(values2)])
results = compute(*concat(values))
res = [r for r in results if r]
assert all(r.endswith(b'\n') for r in res)
ourlines = b''.join(res).split(b'\n')
testlines = b"".join(files[k] for k in sorted(files)).split(b'\n')
assert ourlines == testlines
# delimiter not at the end
d = b'}'
_, values = read_bytes(test_bucket_name+'/test/accounts*',
blocksize=blocksize, delimiter=d, s3=s3)
results = compute(*concat(values))
res = [r for r in results if r]
# All should end in } except EOF
assert sum(r.endswith(b'}') for r in res) == len(res) - 2
ours = b"".join(res)
test = b"".join(files[v] for v in sorted(files))
assert ours == test
def test_registered(s3):
from dask.bytes.core import read_bytes
sample, values = read_bytes('s3://' + test_bucket_name +
'/test/accounts.*.json', s3=s3)
results = compute(*concat(values))
assert set(results) == set(files.values())
def test_registered_open_files(s3):
from dask.bytes.core import open_files
myfiles = open_files('s3://' + test_bucket_name + '/test/accounts.*.json',
s3=s3)
assert len(myfiles) == len(files)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k] for k in sorted(files)]
def test_registered_open_text_files(s3):
from dask.bytes.core import open_text_files
myfiles = open_text_files('s3://' + test_bucket_name + '/test/accounts.*.json',
s3=s3)
assert len(myfiles) == len(files)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k].decode() for k in sorted(files)]
from dask.bytes.compression import compress, files as cfiles, seekable_files
fmt_bs = [(fmt, None) for fmt in cfiles] + [(fmt, 10) for fmt in seekable_files]
@pytest.mark.parametrize('fmt,blocksize', fmt_bs)
def test_compression(s3, fmt, blocksize):
with s3_context('compress', valmap(compress[fmt], files)) as s3:
sample, values = read_bytes('compress/test/accounts.*', s3=s3,
compression=fmt, blocksize=blocksize)
assert sample.startswith(files[sorted(files)[0]][:10])
results = compute(*concat(values))
assert b''.join(results) == b''.join([files[k] for k in sorted(files)])
def test_files(s3):
myfiles = open_files(test_bucket_name+'/test/accounts.*', s3=s3)
assert len(myfiles) == len(files)
data = compute(*[file.read() for file in myfiles])
assert list(data) == [files[k] for k in sorted(files)]
@pytest.mark.parametrize('fmt', list(seekable_files))
def test_getsize(fmt):
with s3_context('compress', {'x': compress[fmt](b'1234567890')}) as s3:
assert getsize('compress/x', fmt, s3=s3) == 10
double = lambda x: x * 2
def test_modification_time_read_bytes():
with s3_context('compress', files) as s3:
_, a = read_bytes('compress/test/accounts.*', s3=s3)
_, b = read_bytes('compress/test/accounts.*', s3=s3)
assert [aa._key for aa in concat(a)] == [bb._key for bb in concat(b)]
with s3_context('compress', valmap(double, files)) as s3:
_, c = read_bytes('compress/test/accounts.*', s3=s3)
assert [aa._key for aa in concat(a)] != [cc._key for cc in concat(c)]
def test_modification_time_open_files():
with s3_context('compress', files) as s3:
a = open_files('compress/test/accounts.*', s3=s3)
b = open_files('compress/test/accounts.*', s3=s3)
assert [aa._key for aa in a] == [bb._key for bb in b]
with s3_context('compress', valmap(double, files)) as s3:
c = open_files('compress/test/accounts.*', s3=s3)
assert [aa._key for aa in a] != [cc._key for cc in c]
def test_read_csv_passes_through_options():
dd = pytest.importorskip('dask.dataframe')
with s3_context('csv', {'a.csv': b'a,b\n1,2\n3,4'}) as s3:
df = dd.read_csv('s3://csv/*.csv', storage_options={'s3': s3})
assert df.a.sum().compute() == 1 + 3
| {
"content_hash": "d744f3f7207b8c2c8486103c75810cb3",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 83,
"avg_line_length": 35.25,
"alnum_prop": 0.5982776089159068,
"repo_name": "mikegraham/dask",
"id": "0741f417ed5125528987329fc9ddddb1ec3c8dc6",
"size": "7896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dask/bytes/tests/test_s3.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1187699"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import json
import os
import re
from twitter.common.collections import maybe_list
from pants.base.build_environment import get_buildroot
from pants.build_graph.intermediate_target_factory import hash_target
from pants.util.process_handler import subprocess
from pants_test.backend.project_info.tasks.resolve_jars_test_mixin import ResolveJarsTestMixin
from pants_test.pants_run_integration_test import PantsRunIntegrationTest, ensure_resolver
class ExportIntegrationTest(ResolveJarsTestMixin, PantsRunIntegrationTest):
_confs_args = [
'--export-libraries-sources',
'--export-libraries-javadocs',
]
def run_export(self, test_target, workdir, load_libs=False, only_default=False, extra_args=None):
"""Runs ./pants export ... and returns its json output.
:param string|list test_target: spec of the targets to run on.
:param string workdir: working directory to run pants with.
:param bool load_libs: whether to load external libraries (of any conf).
:param bool only_default: if loading libraries, whether to only resolve the default conf, or to
additionally resolve sources and javadocs.
:param list extra_args: list of extra arguments for the pants invocation.
:return: the json output of the console task.
:rtype: dict
"""
export_out_file = os.path.join(workdir, 'export_out.txt')
args = ['export',
'--output-file={out_file}'.format(out_file=export_out_file)] + maybe_list(test_target)
libs_args = ['--no-export-libraries'] if not load_libs else self._confs_args
if load_libs and only_default:
libs_args = []
pants_run = self.run_pants_with_workdir(args + libs_args + (extra_args or []), workdir)
self.assert_success(pants_run)
self.assertTrue(os.path.exists(export_out_file),
msg='Could not find export output file in {out_file}'
.format(out_file=export_out_file))
with open(export_out_file) as json_file:
json_data = json.load(json_file)
if not load_libs:
self.assertIsNone(json_data.get('libraries'))
return json_data
def evaluate_subtask(self, targets, workdir, load_extra_confs, extra_args, expected_jars):
json_data = self.run_export(targets, workdir, load_libs=True, only_default=not load_extra_confs,
extra_args=extra_args)
for jar in expected_jars:
self.assertIn(jar, json_data['libraries'])
for path in json_data['libraries'][jar].values():
self.assertTrue(os.path.exists(path), 'Expected jar at {} to actually exist.'.format(path))
@ensure_resolver
def test_export_code_gen(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
thrift_target_name = ('examples.src.thrift.org.pantsbuild.example.precipitation'
'.precipitation-java')
codegen_target_regex = os.path.join(os.path.relpath(workdir, get_buildroot()),
'gen/thrift-java/[^/]*/[^/:]*/[^/:]*:{0}'.format(thrift_target_name))
p = re.compile(codegen_target_regex)
self.assertTrue(any(p.match(target) for target in json_data.get('targets').keys()))
@ensure_resolver
def test_export_json_transitive_jar(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
targets = json_data.get('targets')
self.assertIn('org.hamcrest:hamcrest-core:1.3', targets[test_target]['libraries'])
@ensure_resolver
def test_export_jar_path_with_excludes(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:foo'
json_data = self.run_export(test_target, workdir, load_libs=True)
self.assertIsNone(json_data
.get('libraries')
.get('com.typesafe.sbt:incremental-compiler:0.13.7'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
@ensure_resolver
def test_export_jar_path_with_excludes_soft(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/java/org/pantsbuild/testproject/exclude:'
json_data = self.run_export(test_target,
workdir,
load_libs=True,
extra_args=['--export-soft-excludes'])
self.assertIsNotNone(json_data
.get('libraries')
.get('com.martiansoftware:nailgun-server:0.9.1'))
self.assertIsNotNone(json_data.get('libraries').get('org.pantsbuild:jmake:1.3.8-10'))
foo_target = (json_data
.get('targets')
.get('testprojects/src/java/org/pantsbuild/testproject/exclude:foo'))
self.assertTrue('com.typesafe.sbt:incremental-compiler' in foo_target.get('excludes'))
self.assertTrue('org.pantsbuild' in foo_target.get('excludes'))
@ensure_resolver
def test_export_jar_path(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/usethrift:usethrift'
json_data = self.run_export(test_target, workdir, load_libs=True)
common_lang_lib_info = json_data.get('libraries').get('junit:junit:4.12')
self.assertIsNotNone(common_lang_lib_info)
self.assertIn(
'junit-4.12.jar',
common_lang_lib_info.get('default')
)
self.assertIn(
'junit-4.12-javadoc.jar',
common_lang_lib_info.get('javadoc')
)
self.assertIn(
'junit-4.12-sources.jar',
common_lang_lib_info.get('sources')
)
@ensure_resolver
def test_dep_map_for_java_sources(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/scala/org/pantsbuild/example/scala_with_java_sources'
json_data = self.run_export(test_target, workdir)
targets = json_data.get('targets')
self.assertIn('examples/src/java/org/pantsbuild/example/java_sources:java_sources', targets)
@ensure_resolver
def test_sources_and_javadocs(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/src/scala/org/pantsbuild/testproject/unicode/shapeless'
json_data = self.run_export(test_target, workdir, load_libs=True)
shapeless_lib = json_data.get('libraries').get('com.chuusai:shapeless_2.11:2.2.5')
self.assertIsNotNone(shapeless_lib)
self.assertIsNotNone(shapeless_lib['default'])
self.assertIsNotNone(shapeless_lib['sources'])
self.assertIsNotNone(shapeless_lib['javadoc'])
@ensure_resolver
def test_classifiers(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/ivyclassifier:ivyclassifier'
json_data = self.run_export(test_target, workdir, load_libs=True)
avro_lib_info = json_data.get('libraries').get('org.apache.avro:avro:1.7.7')
self.assertIsNotNone(avro_lib_info)
self.assertIn(
'avro-1.7.7.jar',
avro_lib_info.get('default'),
)
self.assertIn(
'avro-1.7.7-tests.jar',
avro_lib_info.get('tests'),
)
self.assertIn(
'avro-1.7.7-javadoc.jar',
avro_lib_info.get('javadoc'),
)
self.assertIn(
'avro-1.7.7-sources.jar',
avro_lib_info.get('sources'),
)
@ensure_resolver
def test_distributions_and_platforms(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/src/java/org/pantsbuild/example/hello/simple'
json_data = self.run_export(test_target, workdir, load_libs=False, extra_args=[
'--jvm-platform-default-platform=java7',
'--jvm-platform-platforms={'
' "java7": {"source": "1.7", "target": "1.7", "args": [ "-X123" ]},'
' "java8": {"source": "1.8", "target": "1.8", "args": [ "-X456" ]}'
'}',
'--jvm-distributions-paths={'
' "macos": [ "/Library/JDK" ],'
' "linux": [ "/usr/lib/jdk7", "/usr/lib/jdk8"]'
'}'
])
self.assertFalse('python_setup' in json_data)
target_name = 'examples/src/java/org/pantsbuild/example/hello/simple:simple'
targets = json_data.get('targets')
self.assertEquals('java7', targets[target_name]['platform'])
self.assertEquals(
{
'default_platform' : 'java7',
'platforms': {
'java7': {
'source_level': '1.7',
'args': ['-X123'],
'target_level': '1.7'},
'java8': {
'source_level': '1.8',
'args': ['-X456'],
'target_level': '1.8'},
}
},
json_data['jvm_platforms'])
@ensure_resolver
def test_test_platform(self):
with self.temporary_workdir() as workdir:
test_target = 'testprojects/tests/java/org/pantsbuild/testproject/testjvms:eight-test-platform'
json_data = self.run_export(test_target, workdir)
self.assertEquals('java7', json_data['targets'][test_target]['platform'])
self.assertEquals('java8', json_data['targets'][test_target]['test_platform'])
@ensure_resolver
def test_intellij_integration(self):
with self.temporary_workdir() as workdir:
exported_file = os.path.join(workdir, "export_file.json")
p = subprocess.Popen(['build-support/pants-intellij.sh', '--export-output-file=' + exported_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.communicate()
self.assertEqual(p.returncode, 0)
with open(exported_file) as data_file:
json_data = json.load(data_file)
python_setup = json_data['python_setup']
self.assertIsNotNone(python_setup)
self.assertIsNotNone(python_setup['interpreters'])
default_interpreter = python_setup['default_interpreter']
self.assertIsNotNone(default_interpreter)
self.assertIsNotNone(python_setup['interpreters'][default_interpreter])
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['binary']))
self.assertTrue(os.path.exists(python_setup['interpreters'][default_interpreter]['chroot']))
python_target = json_data['targets']['src/python/pants/backend/python/targets:targets']
self.assertIsNotNone(python_target)
self.assertEquals(default_interpreter, python_target['python_interpreter'])
@ensure_resolver
def test_intransitive_and_scope(self):
with self.temporary_workdir() as workdir:
test_path = 'testprojects/maven_layout/provided_patching/one/src/main/java'
test_target = '{}:common'.format(test_path)
json_data = self.run_export(test_target, workdir)
h = hash_target('{}:shadow'.format(test_path), 'provided')
synthetic_target = '{}:shadow-unstable-provided-{}'.format(test_path, h)
self.assertEquals(False, json_data['targets'][synthetic_target]['transitive'])
self.assertEquals('compile test', json_data['targets'][synthetic_target]['scope'])
@ensure_resolver
def test_export_is_target_roots(self):
with self.temporary_workdir() as workdir:
test_target = 'examples/tests/java/org/pantsbuild/example/::'
json_data = self.run_export(test_target, workdir, load_libs=False)
for target_address, attributes in json_data['targets'].items():
# Make sure all targets under `test_target`'s directory are target roots.
self.assertEqual(
attributes['is_target_root'],
target_address.startswith("examples/tests/java/org/pantsbuild/example")
)
| {
"content_hash": "b875992c964b644ca248abe0329e7774",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 111,
"avg_line_length": 45.996212121212125,
"alnum_prop": 0.6502511735156057,
"repo_name": "foursquare/pants",
"id": "10d9411d8f76b25ee783faa05338f761f451da43",
"size": "12290",
"binary": false,
"copies": "3",
"ref": "refs/heads/1.7.0+fsX",
"path": "tests/python/pants_test/backend/project_info/tasks/test_export_integration.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "343"
},
{
"name": "C++",
"bytes": "1138"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "3034"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1922"
},
{
"name": "HTML",
"bytes": "49126"
},
{
"name": "Java",
"bytes": "490360"
},
{
"name": "JavaScript",
"bytes": "33289"
},
{
"name": "Python",
"bytes": "5461553"
},
{
"name": "Rust",
"bytes": "443987"
},
{
"name": "Scala",
"bytes": "76065"
},
{
"name": "Shell",
"bytes": "77142"
},
{
"name": "Starlark",
"bytes": "357125"
},
{
"name": "Thrift",
"bytes": "3365"
}
],
"symlink_target": ""
} |
"""
Commands for rendering various parts of the app stack.
"""
from glob import glob
import os
from fabric.api import local, task
import app
def _fake_context(path):
"""
Create a fact request context for a given path.
"""
return app.app.test_request_context(path=path)
def _view_from_name(name):
"""
Determine what module a view resides in, then get
a reference to it.
"""
bits = name.split('.')
# Determine which module the view resides in
if len(bits) > 1:
module, name = bits
else:
module = 'app'
return globals()[module].__dict__[name]
@task
def less():
"""
Render LESS files to CSS.
"""
for path in glob('less/*.less'):
filename = os.path.split(path)[-1]
name = os.path.splitext(filename)[0]
out_path = 'www/css/%s.less.css' % name
try:
local('node_modules/less/bin/lessc %s %s' % (path, out_path))
except:
print 'It looks like "lessc" isn\'t installed. Try running: "npm install"'
raise
@task
def jst():
"""
Render Underscore templates to a JST package.
"""
try:
local('node_modules/universal-jst/bin/jst.js --template underscore jst www/js/templates.js')
except:
print 'It looks like "jst" isn\'t installed. Try running: "npm install"'
@task
def app_config_js():
"""
Render app_config.js to file.
"""
from static import _app_config_js
with _fake_context('/js/app_config.js'):
response = _app_config_js()
with open('www/js/app_config.js', 'w') as f:
f.write(response.data)
@task(default=True)
def render_all():
"""
Render HTML templates and compile assets.
"""
from flask import g
less()
jst()
app_config_js()
compiled_includes = {}
# Loop over all views in the app
for rule in app.app.url_map.iter_rules():
rule_string = rule.rule
name = rule.endpoint
# Skip utility views
if name == 'static' or name.startswith('_'):
print 'Skipping %s' % name
continue
# Convert trailing slashes to index.html files
if rule_string.endswith('/'):
filename = 'www' + rule_string + 'index.html'
elif rule_string.endswith('.html'):
filename = 'www' + rule_string
else:
print 'Skipping %s' % name
continue
# Create the output path
dirname = os.path.dirname(filename)
if not (os.path.exists(dirname)):
os.makedirs(dirname)
print 'Rendering %s' % (filename)
# Render views, reusing compiled assets
with _fake_context(rule_string):
g.compile_includes = True
g.compiled_includes = compiled_includes
view = _view_from_name(name)
content = view().data
compiled_includes = g.compiled_includes
# Write rendered view
# NB: Flask response object has utf-8 encoded the data
with open(filename, 'w') as f:
f.write(content)
| {
"content_hash": "6b0db20e2b7c86034cbe5878b3c3beaf",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 100,
"avg_line_length": 24.37007874015748,
"alnum_prop": 0.5764135702746365,
"repo_name": "sahilchinoy/public-trust",
"id": "0697cbc147a105cfe67fc28fb60938531fe472a8",
"size": "3118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/render.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "103681"
},
{
"name": "HTML",
"bytes": "29113"
},
{
"name": "JavaScript",
"bytes": "525942"
},
{
"name": "Nginx",
"bytes": "136"
},
{
"name": "Python",
"bytes": "58202"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import bpy
import os
sr_description_path = '/home/user/projects/shadow_robot/base_deps/src/sr_common/sr_description'
file_names = ['forearm', 'forearm_muscle', 'forearm_muscle_disk', 'forearm_lite', 'wrist', 'palm', 'knuckle',
'lfmetacarpal', 'F1', 'F2', 'F3', 'TH1_z', 'TH2_z', 'TH3_z']
for file_name in file_names:
source_file_name = '{0}/meshes/hand/{1}.dae'.format(sr_description_path, file_name)
dest_file_name = '{0}/mujoco_models/meshes/arm_and_hand_meshes/{1}.stl'.format(sr_description_path, file_name)
print('Converting {0} to {1}...'.format(source_file_name, dest_file_name))
bpy.ops.object.select_all(action='SELECT')
bpy.ops.object.delete()
bpy.ops.wm.collada_import(filepath=source_file_name) # change this line
bpy.ops.object.select_all(action='SELECT')
bpy.ops.transform.rotate(value=4.71238898038, axis=(1.0, 0, 0))
bpy.ops.export_mesh.stl(filepath=dest_file_name)
| {
"content_hash": "ee67a9819f0d20f80ec5301ed222b5b5",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 114,
"avg_line_length": 40.833333333333336,
"alnum_prop": 0.6806122448979591,
"repo_name": "shadow-robot/sr_common",
"id": "3c11af0e3ce0850162822878e106ed112965a327",
"size": "2574",
"binary": false,
"copies": "1",
"ref": "refs/heads/noetic-devel",
"path": "sr_description/mujoco_models/meshes/arm_and_hand_meshes/conversion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "3210"
},
{
"name": "CMake",
"bytes": "2941"
},
{
"name": "Python",
"bytes": "11163"
},
{
"name": "Shell",
"bytes": "5470"
}
],
"symlink_target": ""
} |
"""Support for RainMachine devices."""
import asyncio
from datetime import timedelta
import logging
from regenmaschine import Client
from regenmaschine.errors import RainMachineError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
)
from homeassistant.core import callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.service import verify_domain_control
from .const import (
CONF_ZONE_RUN_TIME,
DATA_CLIENT,
DATA_PROGRAMS,
DATA_PROVISION_SETTINGS,
DATA_RESTRICTIONS_CURRENT,
DATA_RESTRICTIONS_UNIVERSAL,
DATA_ZONES,
DATA_ZONES_DETAILS,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DEFAULT_ZONE_RUN,
DOMAIN,
PROGRAM_UPDATE_TOPIC,
SENSOR_UPDATE_TOPIC,
ZONE_UPDATE_TOPIC,
)
_LOGGER = logging.getLogger(__name__)
CONF_CONTROLLERS = "controllers"
CONF_PROGRAM_ID = "program_id"
CONF_SECONDS = "seconds"
CONF_ZONE_ID = "zone_id"
DEFAULT_ATTRIBUTION = "Data provided by Green Electronics LLC"
DEFAULT_ICON = "mdi:water"
DEFAULT_SSL = True
SERVICE_ALTER_PROGRAM = vol.Schema({vol.Required(CONF_PROGRAM_ID): cv.positive_int})
SERVICE_ALTER_ZONE = vol.Schema({vol.Required(CONF_ZONE_ID): cv.positive_int})
SERVICE_PAUSE_WATERING = vol.Schema({vol.Required(CONF_SECONDS): cv.positive_int})
SERVICE_START_PROGRAM_SCHEMA = vol.Schema(
{vol.Required(CONF_PROGRAM_ID): cv.positive_int}
)
SERVICE_START_ZONE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN): cv.positive_int,
}
)
SERVICE_STOP_PROGRAM_SCHEMA = vol.Schema(
{vol.Required(CONF_PROGRAM_ID): cv.positive_int}
)
SERVICE_STOP_ZONE_SCHEMA = vol.Schema({vol.Required(CONF_ZONE_ID): cv.positive_int})
CONTROLLER_SCHEMA = vol.Schema(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): vol.All(
cv.time_period, lambda value: value.total_seconds()
),
vol.Optional(CONF_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN): cv.positive_int,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONTROLLERS): vol.All(
cv.ensure_list, [CONTROLLER_SCHEMA]
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the RainMachine component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_CLIENT] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
for controller in conf[CONF_CONTROLLERS]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=controller
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up RainMachine as config entry."""
if not config_entry.unique_id:
hass.config_entries.async_update_entry(
config_entry, unique_id=config_entry.data[CONF_IP_ADDRESS]
)
_verify_domain_control = verify_domain_control(hass, DOMAIN)
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(session=websession)
try:
await client.load_local(
config_entry.data[CONF_IP_ADDRESS],
config_entry.data[CONF_PASSWORD],
port=config_entry.data[CONF_PORT],
ssl=config_entry.data.get(CONF_SSL, DEFAULT_SSL),
)
except RainMachineError as err:
_LOGGER.error("An error occurred: %s", err)
raise ConfigEntryNotReady
else:
# regenmaschine can load multiple controllers at once, but we only grab the one
# we loaded above:
controller = next(iter(client.controllers.values()))
rainmachine = RainMachine(
hass,
controller,
config_entry.data.get(CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN),
config_entry.data.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL.total_seconds()
),
)
# Update the data object, which at this point (prior to any sensors registering
# "interest" in the API), will focus on grabbing the latest program and zone data:
await rainmachine.async_update()
hass.data[DOMAIN][DATA_CLIENT][config_entry.entry_id] = rainmachine
for component in ("binary_sensor", "sensor", "switch"):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
@_verify_domain_control
async def disable_program(call):
"""Disable a program."""
await rainmachine.controller.programs.disable(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def disable_zone(call):
"""Disable a zone."""
await rainmachine.controller.zones.disable(call.data[CONF_ZONE_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def enable_program(call):
"""Enable a program."""
await rainmachine.controller.programs.enable(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def enable_zone(call):
"""Enable a zone."""
await rainmachine.controller.zones.enable(call.data[CONF_ZONE_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def pause_watering(call):
"""Pause watering for a set number of seconds."""
await rainmachine.controller.watering.pause_all(call.data[CONF_SECONDS])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def start_program(call):
"""Start a particular program."""
await rainmachine.controller.programs.start(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def start_zone(call):
"""Start a particular zone for a certain amount of time."""
await rainmachine.controller.zones.start(
call.data[CONF_ZONE_ID], call.data[CONF_ZONE_RUN_TIME]
)
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def stop_all(call):
"""Stop all watering."""
await rainmachine.controller.watering.stop_all()
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def stop_program(call):
"""Stop a program."""
await rainmachine.controller.programs.stop(call.data[CONF_PROGRAM_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def stop_zone(call):
"""Stop a zone."""
await rainmachine.controller.zones.stop(call.data[CONF_ZONE_ID])
await rainmachine.async_update_programs_and_zones()
@_verify_domain_control
async def unpause_watering(call):
"""Unpause watering."""
await rainmachine.controller.watering.unpause_all()
await rainmachine.async_update_programs_and_zones()
for service, method, schema in [
("disable_program", disable_program, SERVICE_ALTER_PROGRAM),
("disable_zone", disable_zone, SERVICE_ALTER_ZONE),
("enable_program", enable_program, SERVICE_ALTER_PROGRAM),
("enable_zone", enable_zone, SERVICE_ALTER_ZONE),
("pause_watering", pause_watering, SERVICE_PAUSE_WATERING),
("start_program", start_program, SERVICE_START_PROGRAM_SCHEMA),
("start_zone", start_zone, SERVICE_START_ZONE_SCHEMA),
("stop_all", stop_all, {}),
("stop_program", stop_program, SERVICE_STOP_PROGRAM_SCHEMA),
("stop_zone", stop_zone, SERVICE_STOP_ZONE_SCHEMA),
("unpause_watering", unpause_watering, {}),
]:
hass.services.async_register(DOMAIN, service, method, schema=schema)
return True
async def async_unload_entry(hass, config_entry):
"""Unload an OpenUV config entry."""
hass.data[DOMAIN][DATA_CLIENT].pop(config_entry.entry_id)
tasks = [
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in ("binary_sensor", "sensor", "switch")
]
await asyncio.gather(*tasks)
return True
class RainMachine:
"""Define a generic RainMachine object."""
def __init__(self, hass, controller, default_zone_runtime, scan_interval):
"""Initialize."""
self._async_cancel_time_interval_listener = None
self._scan_interval_seconds = scan_interval
self.controller = controller
self.data = {}
self.default_zone_runtime = default_zone_runtime
self.device_mac = controller.mac
self.hass = hass
self._api_category_count = {
DATA_PROVISION_SETTINGS: 0,
DATA_RESTRICTIONS_CURRENT: 0,
DATA_RESTRICTIONS_UNIVERSAL: 0,
}
self._api_category_locks = {
DATA_PROVISION_SETTINGS: asyncio.Lock(),
DATA_RESTRICTIONS_CURRENT: asyncio.Lock(),
DATA_RESTRICTIONS_UNIVERSAL: asyncio.Lock(),
}
async def _async_update_listener_action(self, now):
"""Define an async_track_time_interval action to update data."""
await self.async_update()
@callback
def async_deregister_sensor_api_interest(self, api_category):
"""Decrement the number of entities with data needs from an API category."""
# If this deregistration should leave us with no registration at all, remove the
# time interval:
if sum(self._api_category_count.values()) == 0:
if self._async_cancel_time_interval_listener:
self._async_cancel_time_interval_listener()
self._async_cancel_time_interval_listener = None
return
self._api_category_count[api_category] -= 1
async def async_fetch_from_api(self, api_category):
"""Execute the appropriate coroutine to fetch particular data from the API."""
if api_category == DATA_PROGRAMS:
data = await self.controller.programs.all(include_inactive=True)
elif api_category == DATA_PROVISION_SETTINGS:
data = await self.controller.provisioning.settings()
elif api_category == DATA_RESTRICTIONS_CURRENT:
data = await self.controller.restrictions.current()
elif api_category == DATA_RESTRICTIONS_UNIVERSAL:
data = await self.controller.restrictions.universal()
elif api_category == DATA_ZONES:
data = await self.controller.zones.all(include_inactive=True)
elif api_category == DATA_ZONES_DETAILS:
# This API call needs to be separate from the DATA_ZONES one above because,
# maddeningly, the DATA_ZONES_DETAILS API call doesn't include the current
# state of the zone:
data = await self.controller.zones.all(details=True, include_inactive=True)
self.data[api_category] = data
async def async_register_sensor_api_interest(self, api_category):
"""Increment the number of entities with data needs from an API category."""
# If this is the first registration we have, start a time interval:
if not self._async_cancel_time_interval_listener:
self._async_cancel_time_interval_listener = async_track_time_interval(
self.hass,
self._async_update_listener_action,
timedelta(seconds=self._scan_interval_seconds),
)
self._api_category_count[api_category] += 1
# If a sensor registers interest in a particular API call and the data doesn't
# exist for it yet, make the API call and grab the data:
async with self._api_category_locks[api_category]:
if api_category not in self.data:
await self.async_fetch_from_api(api_category)
async def async_update(self):
"""Update all RainMachine data."""
tasks = [self.async_update_programs_and_zones(), self.async_update_sensors()]
await asyncio.gather(*tasks)
async def async_update_sensors(self):
"""Update sensor/binary sensor data."""
_LOGGER.debug("Updating sensor data for RainMachine")
# Fetch an API category if there is at least one interested entity:
tasks = {}
for category, count in self._api_category_count.items():
if count == 0:
continue
tasks[category] = self.async_fetch_from_api(category)
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for api_category, result in zip(tasks, results):
if isinstance(result, RainMachineError):
_LOGGER.error(
"There was an error while updating %s: %s", api_category, result
)
continue
async_dispatcher_send(self.hass, SENSOR_UPDATE_TOPIC)
async def async_update_programs_and_zones(self):
"""Update program and zone data.
Program and zone updates always go together because of how linked they are:
programs affect zones and certain combinations of zones affect programs.
Note that this call does not take into account interested entities when making
the API calls; we make the reasonable assumption that switches will always be
enabled.
"""
_LOGGER.debug("Updating program and zone data for RainMachine")
tasks = {
DATA_PROGRAMS: self.async_fetch_from_api(DATA_PROGRAMS),
DATA_ZONES: self.async_fetch_from_api(DATA_ZONES),
DATA_ZONES_DETAILS: self.async_fetch_from_api(DATA_ZONES_DETAILS),
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for api_category, result in zip(tasks, results):
if isinstance(result, RainMachineError):
_LOGGER.error(
"There was an error while updating %s: %s", api_category, result
)
async_dispatcher_send(self.hass, PROGRAM_UPDATE_TOPIC)
async_dispatcher_send(self.hass, ZONE_UPDATE_TOPIC)
class RainMachineEntity(Entity):
"""Define a generic RainMachine entity."""
def __init__(self, rainmachine):
"""Initialize."""
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._device_class = None
self._name = None
self.rainmachine = rainmachine
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self.rainmachine.controller.mac)},
"name": self.rainmachine.controller.name,
"manufacturer": "RainMachine",
"model": (
f"Version {self.rainmachine.controller.hardware_version} "
f"(API: {self.rainmachine.controller.api_version})"
),
"sw_version": self.rainmachine.controller.software_version,
}
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def should_poll(self):
"""Disable polling."""
return False
@callback
def _update_state(self):
"""Update the state."""
self.update_from_latest_data()
self.async_write_ha_state()
@callback
def update_from_latest_data(self):
"""Update the entity."""
raise NotImplementedError
| {
"content_hash": "4b00480a7a93b0bbfda62b7ef416ba17",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 88,
"avg_line_length": 35.97821350762527,
"alnum_prop": 0.6453917887852731,
"repo_name": "titilambert/home-assistant",
"id": "239878d02190e3a54f72177a60bfb35c5bae8221",
"size": "16514",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/rainmachine/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "25849092"
},
{
"name": "Shell",
"bytes": "4410"
}
],
"symlink_target": ""
} |
"""
aggregator.py
Created by Thomas Mangin on 2012-07-14.
Copyright (c) 2009-2015 Exa Networks. All rights reserved.
"""
from exabgp.bgp.message.open.asn import ASN
from exabgp.protocol.ip import IPv4
from exabgp.bgp.message.update.attribute.attribute import Attribute
# =============================================================== AGGREGATOR (7)
#
@Attribute.register()
class Aggregator (Attribute):
ID = Attribute.CODE.AGGREGATOR
FLAG = Attribute.Flag.TRANSITIVE | Attribute.Flag.OPTIONAL
CACHING = True
__slots__ = ['asn','speaker','_str']
def __init__ (self, asn, speaker):
self.asn = asn
self.speaker = speaker
self._str = None
def __eq__ (self, other):
return \
self.ID == other.ID and \
self.FLAG == other.FLAG and \
self.asn == other.asn and \
self.speaker == other.speaker
def __ne__ (self, other):
return not self.__eq__(other)
def pack (self, negotiated):
if negotiated.asn4:
return self._attribute(self.asn.pack(True)+self.speaker.pack())
elif self.asn.asn4():
return self._attribute(self.asn.trans()+self.speaker.pack()) + Aggregator4(self.asn,self.speaker).pack(negotiated)
else:
return self._attribute(self.asn.pack()+self.speaker.pack())
def __len__ (self):
raise RuntimeError('size can be 6 or 8 - we can not say - or can we ?')
def __repr__ (self):
if not self._str:
self._str = '%s:%s' % (self.asn,self.speaker)
return self._str
def json (self):
return '{ "asn" : %d, "speaker" : "%d" }' % (self.asn,self.speaker)
@classmethod
def unpack (cls, data, negotiated):
if negotiated.asn4:
return cls(ASN.unpack(data[:4]),IPv4.unpack(data[-4:]))
return cls(ASN.unpack(data[:2]),IPv4.unpack(data[-4:]))
# ============================================================== AGGREGATOR (18)
#
@Attribute.register()
class Aggregator4 (Aggregator):
ID = Attribute.CODE.AS4_AGGREGATOR
__slots__ = ['pack']
def pack (self, negotiated):
return self._attribute(self.asn.pack(True)+self.speaker.pack())
| {
"content_hash": "59a5cfc88995d3500a8aa49720c82464",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 117,
"avg_line_length": 26.6,
"alnum_prop": 0.6300751879699248,
"repo_name": "benagricola/exabgp",
"id": "88137bde0ffb7a1fd547b15fe18f75834ab32450",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/exabgp/bgp/message/update/attribute/aggregator.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "1225011"
},
{
"name": "Shell",
"bytes": "18795"
}
],
"symlink_target": ""
} |
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Team.connection_only'
db.add_column(u'tickets_team', 'connection_only',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Team.connection_only'
db.delete_column(u'tickets_team', 'connection_only')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tickets.changelog': {
'Meta': {'object_name': 'ChangeLog'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ticket': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Ticket']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'tickets.department': {
'Meta': {'object_name': 'Department'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.reason': {
'Meta': {'object_name': 'Reason'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.subscribertype': {
'Meta': {'object_name': 'SubscriberType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.team': {
'Meta': {'object_name': 'Team'},
'connection_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'days_off': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'department': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.ticket': {
'Meta': {'object_name': 'Ticket'},
'account': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'date_assigned': ('django.db.models.fields.DateTimeField', [], {}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'price': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'reason': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Reason']", 'null': 'True', 'blank': 'True'}),
'solution': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'subscriber_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.SubscriberType']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Team']"}),
'technical_data': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.TimeField', [], {'default': 'datetime.timedelta(0, 3600)'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Type']"}),
'urgence': ('django.db.models.fields.related.ForeignKey', [], {'default': '2', 'to': u"orm['tickets.Urgence']"}),
'user_created': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticket_created'", 'to': u"orm['auth.User']"}),
'user_modified': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'ticket_modified'", 'to': u"orm['auth.User']"})
},
u'tickets.type': {
'Meta': {'object_name': 'Type'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.urgence': {
'Meta': {'object_name': 'Urgence'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'tickets.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'department': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['tickets.Department']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['tickets'] | {
"content_hash": "4c80b6c64093d3e1e6c2cef304c3c407",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 187,
"avg_line_length": 69.81203007518798,
"alnum_prop": 0.5460420032310178,
"repo_name": "desecho/tickets",
"id": "ec41bc4631861f2ab3a7ad56ba6f6bf486184eb4",
"size": "9309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tickets_project/tickets/migrations/0002_auto__add_field_team_connection_only.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2774"
},
{
"name": "HTML",
"bytes": "14662"
},
{
"name": "JavaScript",
"bytes": "10558"
},
{
"name": "Python",
"bytes": "79097"
},
{
"name": "Shell",
"bytes": "112"
}
],
"symlink_target": ""
} |
"""Create interactive Leaflet web maps of graphs and routes via folium."""
import json
from . import utils_graph
# folium is an optional dependency for the folium plotting functions
try:
import folium
except ImportError: # pragma: no cover
folium = None
def plot_graph_folium(
G,
graph_map=None,
popup_attribute=None,
tiles="cartodbpositron",
zoom=1,
fit_bounds=True,
**kwargs,
):
"""
Plot a graph as an interactive Leaflet web map.
Note that anything larger than a small city can produce a large web map
file that is slow to render in your browser.
Parameters
----------
G : networkx.MultiDiGraph
input graph
graph_map : folium.folium.Map
if not None, plot the graph on this preexisting folium map object
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked
tiles : string
name of a folium tileset
zoom : int
initial zoom level for the map
fit_bounds : bool
if True, fit the map to the boundaries of the graph's edges
kwargs
keyword arguments to pass to folium.PolyLine(), see folium docs for
options (for example `color="#333333", weight=5, opacity=0.7`)
Returns
-------
folium.folium.Map
"""
# create gdf of all graph edges
gdf_edges = utils_graph.graph_to_gdfs(G, nodes=False)
return _plot_folium(gdf_edges, graph_map, popup_attribute, tiles, zoom, fit_bounds, **kwargs)
def plot_route_folium(
G,
route,
route_map=None,
popup_attribute=None,
tiles="cartodbpositron",
zoom=1,
fit_bounds=True,
**kwargs,
):
"""
Plot a route as an interactive Leaflet web map.
Parameters
----------
G : networkx.MultiDiGraph
input graph
route : list
the route as a list of nodes
route_map : folium.folium.Map
if not None, plot the route on this preexisting folium map object
popup_attribute : string
edge attribute to display in a pop-up when an edge is clicked
tiles : string
name of a folium tileset
zoom : int
initial zoom level for the map
fit_bounds : bool
if True, fit the map to the boundaries of the route's edges
kwargs
keyword arguments to pass to folium.PolyLine(), see folium docs for
options (for example `color="#cc0000", weight=5, opacity=0.7`)
Returns
-------
folium.folium.Map
"""
# create gdf of the route edges in order
node_pairs = zip(route[:-1], route[1:])
uvk = ((u, v, min(G[u][v].items(), key=lambda k: k[1]["length"])[0]) for u, v in node_pairs)
gdf_edges = utils_graph.graph_to_gdfs(G.subgraph(route), nodes=False).loc[uvk]
return _plot_folium(gdf_edges, route_map, popup_attribute, tiles, zoom, fit_bounds, **kwargs)
def _plot_folium(gdf, m, popup_attribute, tiles, zoom, fit_bounds, **kwargs):
"""
Plot a GeoDataFrame of LineStrings on a folium map object.
Parameters
----------
gdf : geopandas.GeoDataFrame
a GeoDataFrame of LineString geometries and attributes
m : folium.folium.Map or folium.FeatureGroup
if not None, plot on this preexisting folium map object
popup_attribute : string
attribute to display in pop-up on-click, if None, no popup
tiles : string
name of a folium tileset
zoom : int
initial zoom level for the map
fit_bounds : bool
if True, fit the map to gdf's boundaries
kwargs
keyword arguments to pass to folium.PolyLine()
Returns
-------
m : folium.folium.Map
"""
# check if we were able to import folium successfully
if folium is None: # pragma: no cover
raise ImportError("folium must be installed to use this optional feature")
# get centroid
x, y = gdf.unary_union.centroid.xy
centroid = (y[0], x[0])
# create the folium web map if one wasn't passed-in
if m is None:
m = folium.Map(location=centroid, zoom_start=zoom, tiles=tiles)
# identify the geometry and popup columns
if popup_attribute is None:
attrs = ["geometry"]
else:
attrs = ["geometry", popup_attribute]
# add each edge to the map
for vals in gdf[attrs].values:
params = dict(zip(["geom", "popup_val"], vals))
pl = _make_folium_polyline(**params, **kwargs)
pl.add_to(m)
# if fit_bounds is True, fit the map to the bounds of the route by passing
# list of lat-lng points as [southwest, northeast]
if fit_bounds and isinstance(m, folium.Map):
tb = gdf.total_bounds
m.fit_bounds([(tb[1], tb[0]), (tb[3], tb[2])])
return m
def _make_folium_polyline(geom, popup_val=None, **kwargs):
"""
Turn LineString geometry into a folium PolyLine with attributes.
Parameters
----------
geom : shapely LineString
geometry of the line
popup_val : string
text to display in pop-up when a line is clicked, if None, no popup
kwargs
keyword arguments to pass to folium.PolyLine()
Returns
-------
pl : folium.PolyLine
"""
# locations is a list of points for the polyline folium takes coords in
# lat,lng but geopandas provides them in lng,lat so we must reverse them
locations = [(lat, lng) for lng, lat in geom.coords]
# create popup if popup_val is not None
if popup_val is None:
popup = None
else:
# folium doesn't interpret html, so can't do newlines without iframe
popup = folium.Popup(html=json.dumps(popup_val))
# create a folium polyline with attributes
pl = folium.PolyLine(locations=locations, popup=popup, **kwargs)
return pl
| {
"content_hash": "6cd460a591b9b151a7ce2f7607f7b0d9",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 97,
"avg_line_length": 30.45212765957447,
"alnum_prop": 0.639825327510917,
"repo_name": "gboeing/osmnx",
"id": "d33da4db642c9ee57fd97a43bef6e13508684ebe",
"size": "5725",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "osmnx/folium.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "738"
},
{
"name": "Dockerfile",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "344015"
},
{
"name": "Shell",
"bytes": "5583"
}
],
"symlink_target": ""
} |
import logging
import regex
from fuzzywuzzy import fuzz
from fuzzywuzzy import process as fuzz_process
from will import settings
from will.decorators import require_settings
from will.utils import Bunch
from .base import GenerationBackend, GeneratedOption
class FuzzyAllMatchesBackend(GenerationBackend):
def _generate_compiled_regex(self, method_meta):
if not hasattr(self, "cached_regex"):
self.cached_regex = {}
method_path = method_meta["plugin_info"]["parent_path"]
if not method_path in self.cached_regex:
regex_string = method_meta["regex_pattern"]
if "case_sensitive" in method_meta and not method_meta["case_sensitive"]:
regex_string = "(?i)%s" % regex_string
if method_meta["multiline"]:
try:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex_string,
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.MULTILINE | regex.DOTALL | regex.ENHANCEMATCH)
except:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex.escape(regex_string),
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.MULTILINE | regex.DOTALL | regex.ENHANCEMATCH)
else:
try:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex_string,
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.ENHANCEMATCH)
except:
self.cached_regex[method_path] = regex.compile("%s{e<=%s}" % (
regex.escape(regex_string),
settings.FUZZY_REGEX_ALLOWABLE_ERRORS
), regex.ENHANCEMATCH)
return self.cached_regex[method_path]
def do_generate(self, event):
exclude_list = ["fn", ]
matches = []
message = event.data
# TODO: add token_sort_ratio
if message.content:
if not hasattr(self, "match_choices"):
self.match_choices = []
self.match_methods = {}
for name, l in self.bot.message_listeners.items():
if not l["regex_pattern"] in self.match_methods:
self.match_methods[l["regex_pattern"]] = l
self.match_choices.append(l["regex_pattern"])
search_matches = fuzz_process.extract(message.content, self.match_choices)
for match_str, confidence in search_matches:
logging.debug(" Potential (%s) - %s" % (confidence, match_str))
l = self.match_methods[match_str]
if (
# The search regex matches and
# regex_matches
# We're confident enough
(confidence >= settings.FUZZY_MINIMUM_MATCH_CONFIDENCE)
# It's not from me, or this search includes me, and
and (
message.will_said_it is False
or ("include_me" in l and l["include_me"])
)
# I'm mentioned, or this is an overheard, or we're in a 1-1
and (
message.is_private_chat
or ("direct_mentions_only" not in l or not l["direct_mentions_only"])
or message.is_direct
)
):
logging.info(" Match (%s) - %s" % (confidence, match_str))
fuzzy_regex = self._generate_compiled_regex(l)
regex_matches = fuzzy_regex.search(message.content)
context = Bunch()
for k, v in l.items():
if k not in exclude_list:
context[k] = v
if regex_matches and hasattr(regex_matches, "groupdict"):
context.search_matches = regex_matches.groupdict()
else:
context.search_matches = {}
o = GeneratedOption(context=context, backend="regex", score=confidence)
matches.append(o)
return matches
| {
"content_hash": "e85915a78f11db5565f9fbd89bc83d1c",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 97,
"avg_line_length": 41.91509433962264,
"alnum_prop": 0.49966239027683995,
"repo_name": "wontonst/will",
"id": "b7773ebf1ab1d77239b00f20bde9b5c0f2e4aeb4",
"size": "4443",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "will/backends/generation/fuzzy_all_matches.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1409"
},
{
"name": "HTML",
"bytes": "2008"
},
{
"name": "Python",
"bytes": "316183"
},
{
"name": "Shell",
"bytes": "1940"
}
],
"symlink_target": ""
} |
import re
import optparse
from lib.networkutils import netcat
VALUE_PARSING_RE = r'VAL=\"(.*?)\"'
def _get_ganglia_metrics(hostname, port, file_):
"""
Returns string (xml) representation of ganglia metrics. If file_ is passed
it will read the metrics from the file; otherwise, will ask ganglia running
on hostname:port for these metric.
@param hostname: str, nullable
@param port: int, nullable
@param file_: str, nullable
@return ganglia_metrics: str
"""
if file_:
f = open(file_, 'r')
return "".join(f.readlines())
else:
return netcat(hostname, port, '')
def _get_error_code(ganglia_metrics, hostname, metric, warning, critical):
"""
Extracts the value for metric on hostname from ganglia_metrics by regex,
and returns an error code based on the warning/critical thresholds
passed in.
@params ganglia_metrics: str
@params hostname: str
@params metric: str
@params warning: float
@params critical: float
@return error_code: int
"""
lines = ganglia_metrics.split('\n')
for i, line in enumerate(lines):
if hostname in line:
for j in range(i, len(lines)):
if metric in lines[j]:
m = re.search(VALUE_PARSING_RE, lines[j])
val = float(m.group(1))
if (not critical is None) and val > critical:
print ("ERROR - hostname %s, metric %s, val %s, critical %s" %
(hostname, metric, val, critical,))
return(2)
if (not warning is None) and val > warning:
print ("WARNING - hostname %s, metric %s, val %s, warning %s" %
(hostname, metric, val, warning,))
return(1)
print ("OK - hostname %s, metric %s, val %s, warning %s" %
(hostname, metric, val, warning,))
return(0)
print ("WARNING - no value for hostname %s, metric %s" %
(hostname, metric))
return(1)
def main():
p = optparse.OptionParser()
p.add_option('-F', '--file',
help="only used for debugging. will load the ganglia\
values from file instead of getting it from ganglia.",
)
p.add_option('-G', '--ganglia_host', default='localhost',
help="host that ganglia is running on.",
)
p.add_option('-P', '--ganglia_port', type=int, default=8649,
help="port that ganglia is running on.",
)
p.add_option('-H', '--hostname',
help="hostname to check the metric for.",
)
p.add_option('-M', '--metric',
help="metric to check the value of.",
)
p.add_option('-W', '--warning', type=float, default=None,
help="warning value",
)
p.add_option('-C', '--critical', type=float, default=None,
help="critical value",
)
options, arguments = p.parse_args()
if not options.critical and not options.warning:
print 'USER_ERROR - must provide either a critical or warning value'
return -1
metrics = _get_ganglia_metrics(options.ganglia_host,
options.ganglia_port,
options.file,
)
error_code = _get_error_code(metrics, options.hostname, options.metric,
options.warning,
options.critical,
)
return error_code
if __name__ == '__main__':
try:
exit(main())
except Exception, e:
print 'WARNING - failed %s' % e
exit(1)
| {
"content_hash": "f6c3baba22bc18c6b6421f393f295c27",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 87,
"avg_line_length": 36.7196261682243,
"alnum_prop": 0.510307966403665,
"repo_name": "daniyalzade/nagios-ganglia-plugin",
"id": "d5ca56ffa5eb9a93ee1a39e4f584d2af4bfcfe8e",
"size": "5010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "check_ganglia_metric.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
from datetime import timedelta
from acouchbase.cluster import Cluster, get_event_loop
from couchbase.auth import PasswordAuthenticator
# **DEPRECATED**, use: from couchbase.options import DeltaValue, SignedInt64
# **DEPRECATED**, import ALL options from `couchbase.options`
from couchbase.collection import (DecrementOptions,
DeltaValue,
GetOptions,
IncrementOptions,
InsertOptions,
RemoveOptions,
ReplaceOptions,
SignedInt64,
UpsertOptions)
from couchbase.durability import (ClientDurability,
Durability,
PersistTo,
ReplicateTo,
ServerDurability)
from couchbase.exceptions import CASMismatchException, CouchbaseException
async def main():
cluster = Cluster(
"couchbase://localhost",
authenticator=PasswordAuthenticator(
"Administrator",
"password"))
await cluster.on_connect()
bucket = cluster.bucket("default")
await bucket.on_connect()
collection = bucket.default_collection()
# setup
try:
result = await collection.remove("document-key")
result = await collection.remove("document-key-opts")
except CouchbaseException as ex:
pass # may not exist in this example
# Insert document
document = {"foo": "bar", "bar": "foo"}
result = await collection.insert("document-key", document)
print("Result: {}; CAS: {}".format(result, result.cas))
# Insert document with options
document = {"foo": "bar", "bar": "foo"}
opts = InsertOptions(timeout=timedelta(seconds=5))
result = await collection.insert("document-key-opts",
document,
opts,
expiry=timedelta(seconds=30))
try:
# Replace document with CAS
document = {"foo": "bar", "bar": "foo"}
result = await collection.replace(
"document-key",
document,
cas=12345,
timeout=timedelta(
minutes=1))
except CASMismatchException as ex:
# we expect an exception here as the CAS value is chosen
# for example purposes
print('Caught CAS mismatch: {}'.format(ex))
try:
# Replace document with CAS
result = await collection.get("document-key")
doc = result.content_as[dict]
doc["bar"] = "baz"
opts = ReplaceOptions(cas=result.cas)
result = await collection.replace("document-key", doc, opts)
except CouchbaseException as ex:
print('Caught Couchbase exception: {}'.format(ex))
try:
# Upsert with Durability (Couchbase Server >= 6.5) level Majority
document = dict(foo="bar", bar="foo")
opts = UpsertOptions(durability=ServerDurability(Durability.MAJORITY))
result = await collection.upsert("document-key", document, opts)
except CouchbaseException as ex:
print('Caught Couchbase exception: {}'.format(ex))
# @TODO: couchbase++ doesn't implement observe based durability
# try:
# # Upsert with observe based durability (Couchbase Server < 6.5)
# document = {"foo": "bar", "bar": "foo"}
# opts = UpsertOptions(
# durability=ClientDurability(
# ReplicateTo.ONE,
# PersistTo.ONE))
# result = await collection.upsert("document-key", document, opts)
# except CouchbaseException as ex:
# print(ex)
result = await collection.get("document-key")
print(result.content_as[dict])
opts = GetOptions(timeout=timedelta(seconds=5))
result = await collection.get("document-key", opts)
print(result.content_as[dict])
try:
# remove document with options
result = await collection.remove(
"document-key",
RemoveOptions(
cas=12345,
durability=ServerDurability(
Durability.MAJORITY)))
except CouchbaseException as ex:
# we expect an exception here as the CAS value is chosen
# for example purposes
print('Caught Couchbase exception: {}'.format(ex))
result = await collection.touch("document-key", timedelta(seconds=10))
result = await collection.get("document-key", GetOptions(with_expiry=True))
print("Expiry of result: {}".format(result.expiryTime))
result = await collection.get_and_touch("document-key", timedelta(seconds=10))
# Increment binary value by 1
await collection.binary().increment(
"counter-key",
IncrementOptions(
delta=DeltaValue(1)))
# Increment binary value by 5, if key doesn't exist, seed it at 1000
await collection.binary().increment(
"counter-key",
IncrementOptions(
delta=DeltaValue(5),
initial=SignedInt64(1000)))
# Decrement binary value by 1
await collection.binary().decrement(
"counter-key",
DecrementOptions(
delta=DeltaValue(1)))
# Decrement binary value by 2, if key doesn't exist, seed it at 1000
await collection.binary().decrement(
"counter-key",
DecrementOptions(
delta=DeltaValue(2),
initial=SignedInt64(1000)))
if __name__ == "__main__":
loop = get_event_loop()
loop.run_until_complete(main())
| {
"content_hash": "401c20136c6428ca4ea5eb1136e0bfbd",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 82,
"avg_line_length": 36.619354838709675,
"alnum_prop": 0.5847427766032417,
"repo_name": "couchbase/couchbase-python-client",
"id": "caa2ce0e581ad00b134e32f72f6829bb433d8718",
"size": "5676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/acouchbase/acouchbase_kv_operations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "779634"
},
{
"name": "CMake",
"bytes": "5320"
},
{
"name": "Python",
"bytes": "2787486"
}
],
"symlink_target": ""
} |
from simplepyged.gedcom import *
from LatexReport import *
#g = Gedcom(os.path.abspath('../../test/mcintyre.ged'))
#g = Gedcom(os.path.abspath('../../test/wright.ged'))
g = Gedcom(os.path.abspath('/home/nick/slova/rodoslovlje/moje.ged'))
l = LatexReport(g)
#l.home_person = g.get_individual('@P405366386@') # mary
#l.home_person = g.get_individual('@I0282@') # me
l.home_person = g.get_individual('@I0282@').mother().father() # deda novosel
#l.home_person = g.get_individual('@I13@') # evica
#l.home_person = g.get_individual('@I34@') # ante
# This is some old code that doesn't work anymore (TODO: rewrite this)
#fam = g.get_family('@F5@')
#stack = [fam]
#stack = construct_stack(stack, 6)
print l.get_home_person_latex()
#print l.get_latex()
| {
"content_hash": "4d569a31df37b4ea5dcf6f8b08808877",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 34.04545454545455,
"alnum_prop": 0.678237650200267,
"repo_name": "andersardo/gedMerge",
"id": "623f46c7bd12656ba8661383d590a140c0a3a03d",
"size": "749",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simplepyged/docs/examples/latex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4116"
},
{
"name": "JavaScript",
"bytes": "1778"
},
{
"name": "Makefile",
"bytes": "4610"
},
{
"name": "PHP",
"bytes": "883993"
},
{
"name": "Python",
"bytes": "438770"
},
{
"name": "Roff",
"bytes": "2674"
},
{
"name": "Smarty",
"bytes": "69984"
},
{
"name": "TeX",
"bytes": "3112"
}
],
"symlink_target": ""
} |
from agate.columns.base import Column
class BooleanColumn(Column):
"""
A column containing :class:`bool` data.
"""
pass
| {
"content_hash": "63500b0ac2ddb7021c24b71f3657c216",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 43,
"avg_line_length": 19.571428571428573,
"alnum_prop": 0.656934306569343,
"repo_name": "TylerFisher/agate",
"id": "b1a618c00197c21972a5ab9f2ad3e2c17941260b",
"size": "160",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "agate/columns/boolean.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "167484"
}
],
"symlink_target": ""
} |
from GenericRequest import GenericRequest
from kol.manager import PatternManager
from kol.util import ChatUtils
from kol.util import Report
from kol.util import StringUtils
class GetChatMessagesRequest(GenericRequest):
def __init__(self, session, lastTime=0):
super(GetChatMessagesRequest, self).__init__(session)
self.url = session.serverURL + "newchatmessages.php?lasttime=%s" % lastTime
def parseResponse(self):
# Get the timestamp we should send to the server next time we make a request.
lastSeenPattern = PatternManager.getOrCompilePattern("chatLastSeen")
match = lastSeenPattern.search(self.responseText)
self.responseData["lastSeen"] = match.group(1)
# Parse the chat messages.
text = self.responseText[:self.responseText.find('<!--lastseen')]
self.responseData["chatMessages"] = ChatUtils.parseIncomingChatMessage(self.responseText)
| {
"content_hash": "47bb1aa071de39ddc193d64817b3ad8e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 97,
"avg_line_length": 46.25,
"alnum_prop": 0.7340540540540541,
"repo_name": "KevZho/buffbot",
"id": "e77e148e4b32a3ee75606430fb8460fde6dfc8b5",
"size": "925",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kol/request/GetChatMessagesRequest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2078509"
}
],
"symlink_target": ""
} |
import json
import logging
import traceback
from google.appengine.api import users
import webapp2
from shared import utils
from shared.config import AUTO_TAGGED_FIELDS, CQ_BOT_PASSWORD_KEY
from shared.parsing import (
parse_fields,
parse_record_key,
parse_request,
parse_strings,
)
from model.password import Password
from model.record import Record
def update_record(key=None, tags=None, fields=None): # pragma: no cover
tags = tags or []
fields = fields or {}
if not key and len(tags) == 0 and len(fields) == 0:
raise ValueError('Empty record entries disallowed')
if not 'project' in fields:
raise ValueError('"Project" field missing')
for item in fields:
if item in AUTO_TAGGED_FIELDS:
tags.append('%s=%s' % (item, fields[item]))
record = Record(id=key)
record.tags = list(set(tags))
record.fields = fields
record.put()
class Post(webapp2.RequestHandler): # pragma: no cover
def get(self):
if not utils.is_valid_user():
self.redirect(users.create_login_url('/'))
return
try:
update_record(**parse_request(self.request, {
'key': parse_record_key,
'tags': parse_strings,
'fields': parse_fields,
}))
except ValueError as e:
logging.warning(traceback.format_exc())
self.response.write(e)
def post(self):
if not utils.is_valid_user() and not self._is_cq_bot():
self.response.set_status(403)
return
try:
packets = map(json.loads, self.request.get_all('p'))
for packet in packets:
if not isinstance(packet, dict):
raise ValueError('JSON dictionary expected.')
except ValueError as e:
logging.warning(traceback.format_exc())
self.response.write('Invalid packet: %s' % e)
return
try:
for packet in packets:
update_record(**utils.filter_dict(packet, ('key', 'tags', 'fields')))
except ValueError as e:
logging.warning(traceback.format_exc())
self.response.write(e)
def _is_cq_bot(self):
password = self.request.get('password')
if not password:
return False
sha1 = utils.password_sha1(password)
return sha1 == Password.get_by_id(CQ_BOT_PASSWORD_KEY).sha1
| {
"content_hash": "eb89d8add1aace1b198f51cd09497cef",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 77,
"avg_line_length": 28.649350649350648,
"alnum_prop": 0.6613780598368088,
"repo_name": "nicko96/Chrome-Infra",
"id": "7a57e7bb31b68679f0fe37fb568b5971f925f1fd",
"size": "2369",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "appengine/chromium_cq_status/handlers/post.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "100398"
},
{
"name": "Go",
"bytes": "648467"
},
{
"name": "HTML",
"bytes": "7323317"
},
{
"name": "JavaScript",
"bytes": "913960"
},
{
"name": "Makefile",
"bytes": "11281"
},
{
"name": "Protocol Buffer",
"bytes": "2730"
},
{
"name": "Python",
"bytes": "4034630"
},
{
"name": "Shell",
"bytes": "21687"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from gdelt.api import GDELTResource
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
gdelt_resource = GDELTResource()
urlpatterns = patterns('',
# Examples:
url(r'^$', 'gdelt.views.home', name='home'),
url(r'^api/', include(gdelt_resource.urls)),
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "740c921b580cf22af3f340b0cceb716a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 66,
"avg_line_length": 25.789473684210527,
"alnum_prop": 0.6979591836734694,
"repo_name": "Berico-Technologies/CLAVIN-contrib",
"id": "e898cef7f3f586608512a9cb910218118a7c18a9",
"size": "490",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gdelt/gdelt/gdelt/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "0"
},
{
"name": "Java",
"bytes": "2928"
},
{
"name": "Python",
"bytes": "20790"
},
{
"name": "Scala",
"bytes": "4172"
},
{
"name": "Shell",
"bytes": "290"
}
],
"symlink_target": ""
} |
import glumpy
import numpy as np
from glumpy import app, gl, gloo, glm
vertex = """
#version 120
// Uniforms
// --------
uniform sampler2D tex_data;
uniform vec2 tex_size;
uniform float char_width;
uniform float char_height;
uniform float rows;
uniform float cols;
uniform float scale;
uniform vec4 foreground;
uniform vec4 background;
uniform vec2 selection;
uniform mat4 projection;
// Attributes
// ----------
attribute float pindex;
attribute float gindex;
// Varyings
// --------
varying vec2 v_texcoord;
varying vec4 v_foreground;
varying vec4 v_background;
// Main
// ----
void main (void)
{
// Compute char position from pindex
float x = mod(pindex, cols);
float y = floor(pindex/cols);
vec2 P = (vec2(x,y) * vec2(char_width, char_height)) * scale;
P += vec2(char_height, char_height)*scale/2.0;
P += vec2(2.0, 2.0);
gl_Position = projection*vec4(P, 0.0, 1.0);
gl_PointSize = char_height * scale;
// Compute color (selection)
if( (pindex >= selection.x) && (pindex < selection.y))
v_background = vec4(v_foreground.rgb, 0.1);
else
v_background = background;
// Compute glyph tex coord from gindex
float n = tex_size.x/char_width;
x = 0.5 + mod(gindex, n) * char_width;
y = 0.5 + floor(gindex/n) * char_height;
v_texcoord = vec2(x/tex_size.x, y/tex_size.y);
}
"""
fragment = """
#version 120
// Uniforms
// --------
uniform sampler2D tex_data;
uniform vec2 tex_size;
uniform float char_width;
uniform float char_height;
uniform float rows;
uniform float cols;
uniform float scale;
uniform vec2 selection;
uniform vec4 foreground;
// Varyings
// --------
varying vec2 v_texcoord;
varying vec4 v_background;
// Main
// ----
void main(void)
{
vec2 uv = floor(gl_PointCoord.xy * char_height);
if(uv.x > (char_width-1.0)) discard;
if(uv.y > (char_height-1.0)) discard;
float v = texture2D(tex_data, v_texcoord+uv/tex_size).r;
gl_FragColor = v * foreground + (1.0-v) * v_background.a;
}
"""
class TextBuffer(object):
"""
"""
def __init__(self, rows=24, cols=80, x=0, y=0, scale=2):
# Build program first
self._program = gloo.Program(vertex, fragment)
# Build a font array that holds regular, italic & bold font
# Regular: 0 to 65536-1
# Italic : 65536 to 2*65536-1
# Bold : 2*65536 to 3*65536-1
regular = glumpy.data.get("6x13-regular.npy")
italic = glumpy.data.get("6x13-italic.npy")
bold = glumpy.data.get("6x13-bold.npy")
n1 = len(regular)
n2 = len(italic)
n3 = len(bold)
n = n1+n2+n3
dtype = [ ("code", np.uint32, 1),
("data", np.uint8, 10)]
font = np.zeros(n, dtype)
font[:n1] = regular
font[n1:n1+n2] = italic
font[n1:n1+n2]["code"] += 1*65536
font[n1+n2:n1+n2+n3] = bold
font[n1+n2:n1+n2+n3]["code"] += 2*65536
# Build a texture out of glyph arrays (need to unpack bits)
# This code is specific for a character size of 6x13
n = len(font)
G = np.unpackbits(font["data"].ravel())
G = G.reshape(n,80)[:,:78].reshape(n,13,6)
width, height = 6*128, 13*((n//128)+1)
data = np.zeros((height,width), np.ubyte)
for i in range(n):
r = 13*(i//128)
c = 6*(i % 128)
data[r:r+13,c:c+6] = G[i]*255
# Store char codes
self._codes = font["code"]
# Fill program uniforms
self._program["tex_data"] = data.view(gloo.Texture2D)
self._program["tex_data"].interpolation = gl.GL_NEAREST
self._program["tex_data"].wrapping = gl.GL_CLAMP
self._program["tex_size"] = width, height
self._program["char_width"] = 6.0
self._program["char_height"]= 13.0
self._program["rows"] = rows
self._program["cols"] = cols
self._program["scale"]= int(max(1.0, scale))
self._program["foreground"] = 0, 0, 0, 1
self._program["background"] = 0, 0, 0, 0
self._program['selection'] = -1,-1
# Build vertex buffer
self._vbuffer = np.zeros(rows*cols, [("pindex", np.float32, 1),
("gindex", np.float32, 1)])
self._vbuffer = self._vbuffer.view(gloo.VertexBuffer)
self._vbuffer["pindex"] = np.arange(rows*cols)
self._vbuffer["gindex"] = 1 # index of space in our font
self._program.bind(self._vbuffer)
self._rows = rows
self._cols = cols
self._scale = int(max(scale,1))
self._selection = None
def on_init(self):
gl.glEnable(gl.GL_VERTEX_PROGRAM_POINT_SIZE)
gl.glEnable(gl.GL_POINT_SPRITE)
def on_resize(self, width, height):
self._program["projection"] = glm.ortho(0, width, height, 0, -1, +1)
def draw(self):
self._program.draw(gl.GL_POINTS)
def __contains__(self, xy):
(x,y) = xy
width = self._cols*self._scale*6
height = self._rows*self._scale*13
if 0 <= x < width and 0 <= y < height:
return True
return False
@property
def scale(self):
""" Font scale """
return self._scale
@property
def rows(self):
""" Number of rows """
return self._rows
@property
def cols(self):
""" Number of columns """
return self._cols
@property
def selection_bounds(self):
""" Selection bounds """
start,end = self._selection
if end < start:
start,end = end,start
return max(0, start), min(self.rows*self.cols, end)
def clear(self, start=0, end=-1):
"""
Clear the text buffer
"""
self._vbuffer["gindex"] = 1 # index of space in our font
self.clear_selection()
def clear_selection(self):
"""
Clear current selection
"""
self._selection = None
self._program["selection"] = -1,-1
def put(self, row, col, text, style=0):
""" Put text at (row,col) """
# Make style argument is of the right type
style = np.atleast_1d(style)
index = row*self.cols + col
# Decode text
if isinstance(text, str):
text = str(text)
codes = np.array([ord(c) for c in text]).astype(np.uint32)
else:
codes = text.astype(np.uint32).ravel()
# Crop if necessary
n = len(codes)
imax = self.rows*self.cols
if index + n > imax:
n = imax - index
codes = codes[:n]
style = style[:n]
# Tweak code to take style into account
codes += np.uint32(style*65536)
# Replace unknown glyphs with glyph 0
codes *= np.in1d(codes, self._codes)
# Put glyphs data into buffer
self._vbuffer["gindex"][index:index+n] = np.searchsorted(self._codes, codes)
# -----------------------------------------------------------------------------
class Console(TextBuffer):
def __init__(self, rows=24, cols=80, x=3, y=3, scale=2, cache=1000):
TextBuffer.__init__(self, rows, cols, x, y, scale)
# We use a ring buffer to avoid to have to move things around
self._buffer_start = 0
self._buffer_end = 0
cache = max(cache, rows)
self._buffer = np.ones((cache+rows,cols),
dtype=[("code", np.uint16, 1),
("style", np.uint16, 1)])
self._buffer["code"] = 32 # space
self._scroll = -self.rows
self._default_foreground = 0,0,0,1 # Black
self._default_background = 0,0,0,0 # Transparent black
self._default_style = 0 # Regular
self._buffer["style"] = self._default_style
def write(self, text="", style=None):
""" Write at current position into the buffer and rotate buffer """
if style is None:
style = self._default_style
n = len(self._buffer)
empty = 32, 0
# Clear line
self._buffer[self._buffer_end] = empty
# Write line
self._buffer["code"][self._buffer_end,:len(text)] = [ord(c) for c in text]
self._buffer["style"][self._buffer_end,:len(text)] = style
# Clear line beyond row lines in case use want to have only the first
# line on top (or else we would display buffer start)
self._buffer[(self._buffer_end+self.rows) % n] = empty
self._buffer_end = (self._buffer_end + 1) % n
if self._buffer_end == self._buffer_start:
self._buffer_start = (self._buffer_start + 1) % n
# Update text buffer
# V = self.view(int(self._scroll))
# self.put(0, 0, text=V["code"])
# Update selection if any
if self._selection is not None:
start, end = self._selection
start += self._scroll*self.cols
end += self._scroll*self.cols
if end < start:
self._program["selection"] = end, start
else:
self._program["selection"] = start, end
def on_mouse_press(self, x, y, button):
""" Selection start point (taking scroll into account) """
if (x,y) in self:
x = x // ( 6*self._scale)
y = y // (13*self._scale)
s = (int(self._scroll)+self.rows)*self.cols
start = y*self.cols + x
self._selection = start+s, start+s
self._program["selection"] = start, start
def on_mouse_drag(self, x, y, dx, dy, button):
""" Selection end point (taking scroll into account) """
if self._selection is not None:
x = x // ( 6*self._scale)
y = y // (13*self._scale)
s = (int(self._scroll)+self.rows)*self.cols
start = self._selection[0]
end = (y*self.cols+x) + s
self._selection = start, end
if end < start:
self._program["selection"] = end-s, start-s
else:
self._program["selection"] = start-s, end-s
def on_mouse_scroll(self, x, y, dx, dy):
# Count how many lines have been writen so far
if self._buffer_end > self._buffer_start:
n = self._buffer_end
else:
n = len(self._buffer) - self.rows
self._scroll = min(max(self._scroll-dy,-n),-1)
# Update text buffer
#V = self.view(int(self._scroll))
#self.put(0, 0, text=V["code"])
# Update selection if any
if self._selection is not None:
start, end = self._selection
start -= (int(self._scroll)+self.rows)*self.cols
end -= (int(self._scroll)+self.rows)*self.cols
self._program["selection"] = start, end
def draw(self):
V = self.view(int(self._scroll))
self.put(0, 0, text=V["code"])
TextBuffer.draw(self)
def clear(self):
TextBuffer.clear(self)
self._buffer_start = 0
self._buffer_end = 0
self._buffer[...] = 32,0
def view(self, index=-1):
""" Retrieve a view of the buffer starting at index """
# Count how many lines have been writen so far
if self._buffer_end > self._buffer_start:
n = self._buffer_end
else:
n = len(self._buffer) - self.rows
actual_index = min(max(index, -n),-1)
start = (self._buffer_end + actual_index) % len(self._buffer)
stop = start + self.rows
indices = np.mod(np.arange(start, stop), len(self._buffer))
return self._buffer[indices].ravel()
# -----------------------------------------------------------------------------
if __name__ == '__main__':
console = Console(24,80,scale=2)
window = app.Window(width=console.cols*console.scale*6,
height=console.rows*console.scale*13,
color = (1,1,1,1))
@window.event
def on_draw(dt):
window.clear(), console.draw()
import codecs
f = codecs.open("UTF-8-demo.txt", "r", "utf-8")
# f = codecs.open("TeX.txt", "r", "utf-8")
lines = f.readlines()
for line in lines:
console.write(line[:-1])
# @window.timer(1/30.0)
def timer(fps):
console.clear()
console.write("─────────────────────────────────────────────────────")
console.write("GLUMPY 2.0 - Copyright (c) 2014 Nicolas P. Rougier")
console.write("")
console.write(" → Window size: %dx%d" % (window.width, window.height))
console.write(" → Backend: %s (%s)" % (window._backend.__name__,
window._backend.__version__))
console.write(" → Console size: %dx%d" % (console.rows, console.cols))
console.write(" → Actual FPS: %.2f frames/second " % (app.fps()))
console.write("───────────────────────────────────────────────────────")
#for line in repr(window.config).split("\n"):
# console.write(u" "+line)
#console.write(u"───────────────────────────────────────────────────────")
window.attach(console)
app.run()
| {
"content_hash": "36245da17b186a894dda5eba7d3db4a0",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 84,
"avg_line_length": 29.941176470588236,
"alnum_prop": 0.5320386882272934,
"repo_name": "glumpy/glumpy",
"id": "c793dbc948ec346e01bdb222e7ae0b55ba778575",
"size": "13862",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/gloo-terminal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "26075"
},
{
"name": "Cython",
"bytes": "660"
},
{
"name": "GLSL",
"bytes": "177965"
},
{
"name": "Makefile",
"bytes": "407"
},
{
"name": "Python",
"bytes": "1320773"
}
],
"symlink_target": ""
} |
"""
@author: magic
"""
# -*- coding: utf-8 -*-
from django.db import models
from user import User
class Tag(models.Model):
name = models.CharField(max_length=30, verbose_name=u'标签名称')
class Meta:
verbose_name = u'标签'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.name
# 分类
class Category(models.Model):
name = models.CharField(max_length=30, verbose_name=u'分类名称')
index = models.IntegerField(default=999, verbose_name=u'分类的排序')
class Meta:
verbose_name = u'分类'
verbose_name_plural = verbose_name
ordering = ['index', 'id']
def __unicode__(self):
return self.name
# 自定义一个文章Model的管理器
# 1、新加一个数据处理的方法
# 2、改变原有的queryset
class ArticleManager(models.Manager):
def distinct_date(self):
distinct_date_list = []
date_list = self.values('date_publish')
for date in date_list:
date = date['date_publish'].strftime('%Y/%m文章存档')
if date not in distinct_date_list:
distinct_date_list.append(date)
return distinct_date_list
# 文章模型
class Article(models.Model):
title = models.CharField(max_length=50, verbose_name=u'文章标题')
desc = models.CharField(max_length=50, verbose_name=u'文章描述')
content = models.TextField(verbose_name=u'文章内容')
click_count = models.IntegerField(default=0, verbose_name=u'点击次数')
is_recommend = models.BooleanField(default=False, verbose_name=u'是否推荐')
date_publish = models.DateTimeField(auto_now_add=True, verbose_name=u'发布时间')
user = models.ForeignKey(User, verbose_name=u'用户')
category = models.ForeignKey(Category, blank=True, null=True, verbose_name=u'分类')
tag = models.ManyToManyField(Tag, verbose_name=u'标签')
objects = ArticleManager()
class Meta:
verbose_name = u'文章'
verbose_name_plural = verbose_name
ordering = ['-date_publish']
def __unicode__(self):
return self.title
# 评论模型
class Comment(models.Model):
content = models.TextField(verbose_name=u'评论内容')
username = models.CharField(max_length=30, blank=True, null=True, verbose_name=u'用户名')
email = models.EmailField(max_length=50, blank=True, null=True, verbose_name=u'邮箱地址')
url = models.URLField(max_length=100, blank=True, null=True, verbose_name=u'个人网页地址')
date_publish = models.DateTimeField(auto_now_add=True, verbose_name=u'发布时间')
user = models.ForeignKey(User, blank=True, null=True, verbose_name=u'用户')
article = models.ForeignKey(Article, blank=True, null=True, verbose_name=u'文章')
pid = models.ForeignKey('self', blank=True, null=True, verbose_name=u'父级评论')
class Meta:
verbose_name = u'评论'
verbose_name_plural = verbose_name
def __unicode__(self):
return str(self.id)
| {
"content_hash": "1087c8c199127a6ac5fda62c1d530c89",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 90,
"avg_line_length": 31.415730337078653,
"alnum_prop": 0.6605865522174535,
"repo_name": "csunny/blog_project",
"id": "d596e16212b61d2891f890dfc116daaaa96d97eb",
"size": "3086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/apps/blog/models/article.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "106391"
},
{
"name": "HTML",
"bytes": "34257"
},
{
"name": "JavaScript",
"bytes": "1368246"
},
{
"name": "Jupyter Notebook",
"bytes": "31514"
},
{
"name": "Python",
"bytes": "89996"
}
],
"symlink_target": ""
} |
""" Runs tests for the NoPress (All) Dataset Builder """
from diplomacy_research.models.policy.tests.policy_builder_test_setup import PolicyBuilderTestSetup
from diplomacy_research.models.policy.token_based.dataset.no_press_all import DatasetBuilder
from diplomacy_research.utils.process import run_in_separate_process
def launch():
""" Launches the tests """
testable_class = PolicyBuilderTestSetup(DatasetBuilder())
testable_class.run_tests()
def test_run():
""" Runs the test """
run_in_separate_process(target=launch, timeout=60)
| {
"content_hash": "2f85d383beacef8ef59fa6f099159bd9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 99,
"avg_line_length": 42.76923076923077,
"alnum_prop": 0.7607913669064749,
"repo_name": "diplomacy/research",
"id": "ced7c221e23ce66ae4845a3541782f970ae5bcd3",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diplomacy_research/models/policy/token_based/dataset/tests/test_no_press_all_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "592"
},
{
"name": "C++",
"bytes": "5188"
},
{
"name": "Dockerfile",
"bytes": "31749"
},
{
"name": "Groovy",
"bytes": "15568"
},
{
"name": "Python",
"bytes": "2557493"
},
{
"name": "Shell",
"bytes": "26305"
}
],
"symlink_target": ""
} |
from django.db import models
from django.db.models import Count
class Users(models.Model):
first_name = models.CharField(max_length=25)
last_name = models.CharField(max_length=25)
email_address = models.CharField(max_length=50)
is_admin = models.SmallIntegerField(null=False, default=0)
is_active = models.SmallIntegerField(null=False, default=1)
def __unicode__(self):
return self.email_address
class Questions(models.Model):
QUESTION_TYPE = ( ('multi-choice','multiple choice'), ('essay','free-form essay'), )
poll_title = models.CharField(max_length=50)
question_text = models.CharField(max_length=250)
question_type = models.CharField(max_length=25, choices=QUESTION_TYPE)
begin_date = models.DateTimeField()
end_date = models.DateTimeField()
def get_poll_results(self, poll_id):
return Votes.objects.filter(answer_value__question__id = poll_id).values('answer_value') \
.annotate(vote_count=Count('answer_value'))
def __unicode__(self):
return self.question_text
class Meta:
permissions=(
("can_send_poll_emails","Can send poll emails"
),
)
class Answers(models.Model):
question = models.ForeignKey(Questions)
answer_text = models.CharField(max_length=100)
def __unicode__(self):
return self.answer_text
class Votes(models.Model):
answer_value = models.ForeignKey(Answers)
voter = models.ForeignKey(Users)
answer_timestamp = models.DateTimeField()
def __unicode__(self):
return str(self.answer_value)
class Users_Questions_Hash(models.Model):
voter = models.ForeignKey(Users)
question = models.ForeignKey(Questions)
hash_value = models.CharField(max_length=255)
is_valid = models.SmallIntegerField(null=False, default=1)
def __unicode__(self):
return self.voter
| {
"content_hash": "a3b7690595ecb28d96d10c738684013e",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 98,
"avg_line_length": 30.580645161290324,
"alnum_prop": 0.6751054852320675,
"repo_name": "Pshrub/office-poll-topic",
"id": "3977d387a4c714795e93e8114f417a4489896e12",
"size": "1896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poll/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57"
},
{
"name": "Python",
"bytes": "11572"
}
],
"symlink_target": ""
} |
import datetime
import iso8601
import mock
from oslo_utils import timeutils
from jacket.objects import compute
from jacket.tests.compute.unit.objects import test_objects
from jacket.compute import utils
NOW = timeutils.utcnow().replace(microsecond=0)
fake_task_log = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'id': 1,
'task_name': 'fake-name',
'state': 'fake-state',
'host': 'fake-host',
'period_beginning': NOW - datetime.timedelta(seconds=10),
'period_ending': NOW,
'message': 'fake-message',
'task_items': 1,
'errors': 0,
}
class _TestTaskLog(object):
@mock.patch('compute.db.task_log_get', return_value=fake_task_log)
def test_get(self, mock_get):
task_log = compute.TaskLog.get(self.context,
fake_task_log['task_name'],
fake_task_log['period_beginning'],
fake_task_log['period_ending'],
fake_task_log['host'],
state=fake_task_log['state'])
mock_get.assert_called_once_with(
self.context,
fake_task_log['task_name'],
utils.strtime(fake_task_log['period_beginning']),
utils.strtime(fake_task_log['period_ending']),
fake_task_log['host'],
state=fake_task_log['state'])
self.compare_obj(task_log, fake_task_log)
@mock.patch('compute.db.task_log_begin_task')
def test_begin_task(self, mock_begin_task):
task_log = compute.TaskLog(self.context)
task_log.task_name = fake_task_log['task_name']
task_log.period_beginning = fake_task_log['period_beginning']
task_log.period_ending = fake_task_log['period_ending']
task_log.host = fake_task_log['host']
task_log.task_items = fake_task_log['task_items']
task_log.message = fake_task_log['message']
task_log.begin_task()
mock_begin_task.assert_called_once_with(
self.context,
fake_task_log['task_name'],
fake_task_log['period_beginning'].replace(
tzinfo=iso8601.iso8601.Utc()),
fake_task_log['period_ending'].replace(
tzinfo=iso8601.iso8601.Utc()),
fake_task_log['host'],
task_items=fake_task_log['task_items'],
message=fake_task_log['message'])
@mock.patch('compute.db.task_log_end_task')
def test_end_task(self, mock_end_task):
task_log = compute.TaskLog(self.context)
task_log.task_name = fake_task_log['task_name']
task_log.period_beginning = fake_task_log['period_beginning']
task_log.period_ending = fake_task_log['period_ending']
task_log.host = fake_task_log['host']
task_log.errors = fake_task_log['errors']
task_log.message = fake_task_log['message']
task_log.end_task()
mock_end_task.assert_called_once_with(
self.context,
fake_task_log['task_name'],
fake_task_log['period_beginning'].replace(
tzinfo=iso8601.iso8601.Utc()),
fake_task_log['period_ending'].replace(
tzinfo=iso8601.iso8601.Utc()),
fake_task_log['host'],
errors=fake_task_log['errors'],
message=fake_task_log['message'])
class TestTaskLog(test_objects._LocalTest, _TestTaskLog):
pass
class TestRemoteTaskLog(test_objects._RemoteTest, _TestTaskLog):
pass
class _TestTaskLogList(object):
@mock.patch('compute.db.task_log_get_all')
def test_get_all(self, mock_get_all):
fake_task_logs = [dict(fake_task_log, id=1), dict(fake_task_log, id=2)]
mock_get_all.return_value = fake_task_logs
task_logs = compute.TaskLogList.get_all(
self.context,
fake_task_log['task_name'],
fake_task_log['period_beginning'],
fake_task_log['period_ending'],
host=fake_task_log['host'],
state=fake_task_log['state'])
mock_get_all.assert_called_once_with(
self.context,
fake_task_log['task_name'],
utils.strtime(fake_task_log['period_beginning']),
utils.strtime(fake_task_log['period_ending']),
host=fake_task_log['host'],
state=fake_task_log['state'])
for index, task_log in enumerate(task_logs):
self.compare_obj(task_log, fake_task_logs[index])
class TestTaskLogList(test_objects._LocalTest, _TestTaskLogList):
pass
class TestRemoteTaskLogList(test_objects._RemoteTest, _TestTaskLogList):
pass
| {
"content_hash": "1e5a141f97685bfa65cde1ff3affec7f",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 37.04724409448819,
"alnum_prop": 0.5859723698193411,
"repo_name": "HybridF5/jacket",
"id": "1e406828ebebe05ceaee22f535c15cf2f9272fa6",
"size": "5278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/compute/unit/objects/test_task_log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
import errno
import json
import os
import os.path
import requests
import socket
import sqlite3
import string
import sys
import time
import urllib
from zenchimes.utilities import logged_class
from zenchimes import settings
@logged_class
class LMSCommandLineInterface(object):
bufsize = 2048
def __init__(self, *args, **kwargs):
"""
Initialize controller.
"""
self.logger.debug("Initializing")
self.error = False
req = requests.get('http://localhost:{}/config'.format(
settings.SERVER_HTTP_LISTEN_PORT))
config = json.loads(req.text)
player = config.get('player', None) # TODO: handle this case
self.LMS_CHIME_PATH = player.get('lms_chime_path', None)
self.LMS_HOSTNAME = player.get('lms_hostname', None)
self.LMS_PORT = int(player.get('lms_port', None))
self.MIXER_VOLUME = player.get('mixer_volume', None)
self.logger.debug("LMS_HOSTNAME: {0}".format(self.LMS_HOSTNAME))
self.logger.debug("LMS_PORT: {0}".format(self.LMS_PORT))
self.logger.debug("LMS_CHIME_PATH: {0}".format(self.LMS_CHIME_PATH))
self.logger.debug("MIXER_VOLUME: {0}".format(self.MIXER_VOLUME))
# Quick and dirty error catch-all.
req = requests.get('http://localhost:{}/chimes'.format(
settings.SERVER_HTTP_LISTEN_PORT))
chimes = json.loads(req.text)
for chime in chimes:
if not chime.get('is_active', False):
continue
# XXX: This is the new way to fetch the active chime data.
self.chime_name = chime.get('description', None)
self.chime_filename = "{0}/{1}".format(self.LMS_CHIME_PATH,
chime.get('filename', ''))
self.logger.debug("chime_filename: {0}".format(
self.chime_filename))
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.logger.debug("Socket connected?")
self.s.connect((self.LMS_HOSTNAME, self.LMS_PORT))
except socket.error as e:
self.error = True
self.logger.error("Unable to connect: {0}:{1} - {2}".format(
self.LMS_HOSTNAME, self.LMS_PORT, e.strerror))
return
except Exception as e:
self.error = True
self.logger.error("Unknown error: {0}".format("crap"))
return
self.logger.info("LMS Connected?")
self.get_players_initial_state()
def __del__(self):
"""
Destructor. Exit LMS CLI.
"""
try:
self.s.send('exit\n')
self.s.recv(self.bufsize)
self.s.close()
except socket.error as e:
self.logger.error("Unable to close socket: {0}".format(e.strerror))
except AttributeError as e:
self.logger.warning("Destructor socket not connected: {0}".format(
e.strerror))
def send_command(self, command):
"""
Send the command to LMS CLI and return response.
"""
self.s.send('{0}\n'.format(command))
data = self.s.recv(self.bufsize)
data = data.lstrip().rstrip()
return data
def get_players_initial_state(self):
"""
Assembles a data structure with all of each player's parameters. We
will need this later to restore initial state.
"""
self.players_initial_state = {}
current_player = None
data = self.send_command('players 0')[11:]
data = data.split(' ')
p_count = urllib.unquote(data[0])
data = data[1:]
count = int(p_count.split(':')[1])
self.players_initial_state['count'] = count
key_list_boolean = ['canpoweroff', 'connected', 'isplayer']
key_list_integer = ['playerindex']
for kv in data:
# Some kv contain multiple ':' and will result in a ValueError in
# the try block.
try:
key, value = urllib.unquote(kv).split(':')
except ValueError:
# The only known exception is the MAC address. If the player
# name contains a ':' then it will also be reassembled here.
tmp = urllib.unquote(kv).split(':')
key = tmp[0]
value = string.join(tmp[1:], ':')
if key == 'playerindex':
self.players_initial_state[value] = {}
current_player = self.players_initial_state[value]
else:
if key in key_list_boolean:
value = True if value else False
if key in key_list_integer:
value = int(value)
current_player[key] = value
# Now fetch initial mixer volume
for playerindex in range(count):
current_player = self.players_initial_state[str(playerindex)]
playerid = current_player['playerid']
command = '{0} status'.format(playerid)
data = self.send_command(command)
data = data[len(command):].lstrip().rstrip().split(' ')
# XXX: Ugh, yes there is a space in 'mixer volume'
key_list = ['sync_master', 'mixer volume', 'mode', 'power']
for kv in data:
# Some kv contain multiple ':' and will result in a ValueError
# in the try block.
try:
key, value = urllib.unquote(kv).split(':')
except ValueError:
# The only known exception is the MAC address. If the
# player name contains a ':' then it will also be
# reassembled here.
tmp = urllib.unquote(kv).split(':')
key = tmp[0]
value = string.join(tmp[1:], ':')
if key in key_list:
if key == 'sync_master':
self.players_initial_state['sync_master'] = value
elif key == 'power':
if value == '1':
current_player[key] = True
else:
current_player[key] = False
else:
current_player[key] = value
self.logger.debug("Initial State: {0}".format(self.players_initial_state))
def restore_players_initial_state(self):
"""
Restore specific player parameters such as volume and power.
"""
for playerindex in range(self.players_initial_state['count']):
playerid = self.players_initial_state[str(playerindex)]['playerid']
initial_volume = \
self.players_initial_state[str(playerindex)]['mixer volume']
command = '{0} mixer volume {1}'.format(
urllib.quote(playerid),
initial_volume)
self.send_command(command)
# Restore power state.
initial_power_state = \
self.players_initial_state[str(playerindex)]['power']
if initial_power_state:
initial_power_state = 1
else:
initial_power_state = 0
command = '{0} power {1}'.format(playerid, initial_power_state)
#print command
self.send_command(command)
def set_mixer_volume(self, volume):
"""
Set mixer volume.
"""
for playerindex in range(self.players_initial_state['count']):
playerid = self.players_initial_state[str(playerindex)]['playerid']
command = '{0} mixer volume {1}'.format(
urllib.quote(playerid), volume)
self.send_command(command)
def power_on(self):
"""
Power on all players. Kind of a hack for now cuz I'm in a hurry for POC.
"""
for playerindex in range(self.players_initial_state['count']):
playerid = self.players_initial_state[str(playerindex)]['playerid']
command = '{0} power {1}'.format(
urllib.quote(playerid), 1)
self.send_command(command)
def play_chime(self):
"""
Play the chime.
"""
# Fetch the current value of chime_enabled.
req = requests.get('http://localhost:{}/config/chime_enabled'.format(
settings.SERVER_HTTP_LISTEN_PORT))
config = json.loads(req.text)
# Implicit conversion to boolean. Continue with chime only if currently
# enabled.
if config.get("value", "True") == "True":
return
playerid = self.players_initial_state['sync_master']
sync_master_mode = 'play'
for playerindex in range(self.players_initial_state['count']):
if self.players_initial_state[str(playerindex)]['playerid'] \
== playerid:
sync_master_mode = \
self.players_initial_state[str(playerindex)]['mode']
command = '{0} playlist play {1} {2}'.format(
urllib.quote(playerid), urllib.quote(self.chime_filename),
urllib.quote(self.chime_name))
# Play chime only if idle
# TODO: Consolidate restrictions like mode and quiet time in one place.
if sync_master_mode != 'play':
self.power_on()
# Give it a moment to settle.
time.sleep(1)
self.set_mixer_volume(self.MIXER_VOLUME)
self.send_command(command)
time.sleep(self.get_duration() + 3)
self.restore_players_initial_state()
def get_duration(self):
"""
Check duration of play for post play sleep prior to restore state.
"""
playerid = self.players_initial_state['sync_master']
command = '{0} duration ?'.format(playerid)
duration = self.send_command(command).split(' ')[-1]
return float(duration)
def sync_players(self):
"""
Sync the players.
"""
def main():
sbcli = LMSCommandLineInterface()
print sbcli.players_initial_state
sys.exit(0)
if __name__ == '__main__':
DEBUG = True
main()
| {
"content_hash": "335ffa75060dce732d1763a79a22aa1d",
"timestamp": "",
"source": "github",
"line_count": 272,
"max_line_length": 82,
"avg_line_length": 37.720588235294116,
"alnum_prop": 0.5444444444444444,
"repo_name": "eigenholser/squeezebox-zenchimes",
"id": "c65f8a988a2a74a9193495b126fcdae0515bcadf",
"size": "10283",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "zenchimes/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "495"
},
{
"name": "HTML",
"bytes": "3167"
},
{
"name": "JavaScript",
"bytes": "110274"
},
{
"name": "Python",
"bytes": "22929"
},
{
"name": "Shell",
"bytes": "7642"
}
],
"symlink_target": ""
} |
from savanna.db import storage as s
from savanna.openstack.common import uuidutils
from savanna.tests.unit import base
def _create_clusters(name="cluster-1", plugin_name="some_plugin",
hadoop_version="1.2.3", **kwargs):
cluster_dict = {
"name": name,
"plugin_name": plugin_name,
"hadoop_version": hadoop_version,
}
cluster_dict.update(kwargs)
return cluster_dict, s.create_cluster(cluster_dict)
class ClusterStorageTest(base.DbTestCase):
def test_create_cluster_trivial(self):
cluster_dict, cluster = _create_clusters()
self.assertIsNotNone(cluster)
self.assertTrue(uuidutils.is_uuid_like(cluster.id))
self.assertDictContainsSubset(cluster_dict, cluster.dict)
def test_clusters_multi_tenancy(self):
self.assertEqual(0, len(s.get_clusters()))
self.set_tenant("t-1")
self.assertEqual(0, len(s.get_clusters()))
_create_clusters("c-1")
_create_clusters("c-2")
self.assertEqual(2, len(s.get_clusters()))
self.set_tenant("t-2")
self.assertEqual(0, len(s.get_clusters()))
_create_clusters("c-1")
_create_clusters("c-2")
self.assertEqual(2, len(s.get_clusters()))
_create_clusters("c-3")
self.assertEqual(3, len(s.get_clusters()))
| {
"content_hash": "5e7b2d89e28b25dba50c278774da4772",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 65,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.6322869955156951,
"repo_name": "rnirmal/savanna",
"id": "7ded8d908186c9789e10f528da1b1ec682755f76",
"size": "1921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "savanna/tests/unit/db/storage/test_clusters_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15705"
},
{
"name": "Python",
"bytes": "1011559"
},
{
"name": "Shell",
"bytes": "7199"
}
],
"symlink_target": ""
} |
from aquests.request_builder import make_http
def test_make_http ():
# method, url, params, auth, headers, meta, proxy, logger
req = make_http ("get", "/index", None, None, [], None, None, None)
assert req.headers ["accept"] == "*/*"
req = make_http ("get", "/index", {"a": "b"}, None, [], None, None, None)
assert req.headers ["accept"] == "*/*"
assert req.get_payload () == b""
req = make_http ("post", "/index", {"a": "b"}, None, [], None, None, None)
assert req.headers ["accept"] == "*/*"
assert req.headers ["content-type"] == "application/x-www-form-urlencoded; charset=utf-8"
assert req.get_payload () == b"a=b"
req = make_http ("post", "/index", {"a": "b"}, None, {"Accept": "text/html"}, None, None, None)
assert req.headers ["accept"] == "text/html"
assert req.headers ["content-type"] == "application/x-www-form-urlencoded; charset=utf-8"
req = make_http ("postjson", "/index", {"a": "b"}, None, {"Accept": "text/html"}, None, None, None)
assert req.headers ["accept"] == "text/html"
assert req.headers ["content-type"] == "application/json; charset=utf-8"
req = make_http ("post", "/index", "a=b", None, {"Accept": "text/html", "Content-Type": "application/test"}, None, None, None)
assert req.headers ["accept"] == "text/html"
assert req.headers ["content-type"] == "application/test"
req = make_http ("postjson", "/index", {"a": "b"}, None, None, None, None, None)
assert req.headers ["accept"] == "application/json"
assert req.headers ["content-type"] == "application/json; charset=utf-8"
| {
"content_hash": "497108e0ba83b4fc2b197d388a4d36d6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 130,
"avg_line_length": 50.875,
"alnum_prop": 0.5884520884520884,
"repo_name": "hansroh/skitai",
"id": "13d63e69f45111d4189e9ae0169cdced532aa87e",
"size": "1628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/level3/test_request_rebuild.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "12590"
},
{
"name": "Python",
"bytes": "593449"
},
{
"name": "Shell",
"bytes": "2809"
}
],
"symlink_target": ""
} |
from mygengo import MyGengo
# Get an instance of MyGengo to work with...
gengo = MyGengo(
public_key = 'your_public_key',
private_key = 'your_private_key',
sandbox = True, # possibly false, depending on your dev needs
)
# Grab a specific revision - you could liken this to querying version control
# on the myGengo side. :)
print gengo.getTranslationJobRevision(id = 42, rev_id = 1)
| {
"content_hash": "20e8fe377f757850a42a5a41c9e24aea",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 33.083333333333336,
"alnum_prop": 0.7153652392947103,
"repo_name": "fvbock/mygengo-python",
"id": "ab955e76ab23bb89b16d4f4fc4ef37fd7f69a1f6",
"size": "440",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/getTranslationJobRevision.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "40346"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_dlp_filepattern
short_description: Configure file patterns used by DLP blocking in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure dlp feature and filepattern category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip address.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
dlp_filepattern:
description:
- Configure file patterns used by DLP blocking.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
comment:
description:
- Optional comments.
entries:
description:
- Configure file patterns used by DLP blocking.
suboptions:
file-type:
description:
- Select a file type.
choices:
- 7z
- arj
- cab
- lzh
- rar
- tar
- zip
- bzip
- gzip
- bzip2
- xz
- bat
- msc
- uue
- mime
- base64
- binhex
- elf
- exe
- hta
- html
- jad
- class
- cod
- javascript
- msoffice
- msofficex
- fsg
- upx
- petite
- aspack
- sis
- hlp
- activemime
- jpeg
- gif
- tiff
- png
- bmp
- ignored
- unknown
- mpeg
- mov
- mp3
- wma
- wav
- pdf
- avi
- rm
- torrent
- hibun
- msi
- mach-o
- dmg
- .net
- xar
- chm
- iso
- crx
filter-type:
description:
- Filter by file name pattern or by file type.
choices:
- pattern
- type
pattern:
description:
- Add a file name pattern.
required: true
id:
description:
- ID.
required: true
name:
description:
- Name of table containing the file pattern list.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure file patterns used by DLP blocking.
fortios_dlp_filepattern:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
dlp_filepattern:
state: "present"
comment: "Optional comments."
entries:
-
file-type: "7z"
filter-type: "pattern"
pattern: "<your_own_value>"
id: "8"
name: "default_name_9"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_dlp_filepattern_data(json):
option_list = ['comment', 'entries', 'id',
'name']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def dlp_filepattern(data, fos):
vdom = data['vdom']
dlp_filepattern_data = data['dlp_filepattern']
filtered_data = filter_dlp_filepattern_data(dlp_filepattern_data)
if dlp_filepattern_data['state'] == "present":
return fos.set('dlp',
'filepattern',
data=filtered_data,
vdom=vdom)
elif dlp_filepattern_data['state'] == "absent":
return fos.delete('dlp',
'filepattern',
mkey=filtered_data['id'],
vdom=vdom)
def fortios_dlp(data, fos):
login(data)
methodlist = ['dlp_filepattern']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"dlp_filepattern": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"comment": {"required": False, "type": "str"},
"entries": {"required": False, "type": "list",
"options": {
"file-type": {"required": False, "type": "str",
"choices": ["7z", "arj", "cab",
"lzh", "rar", "tar",
"zip", "bzip", "gzip",
"bzip2", "xz", "bat",
"msc", "uue", "mime",
"base64", "binhex", "elf",
"exe", "hta", "html",
"jad", "class", "cod",
"javascript", "msoffice", "msofficex",
"fsg", "upx", "petite",
"aspack", "sis", "hlp",
"activemime", "jpeg", "gif",
"tiff", "png", "bmp",
"ignored", "unknown", "mpeg",
"mov", "mp3", "wma",
"wav", "pdf", "avi",
"rm", "torrent", "hibun",
"msi", "mach-o", "dmg",
".net", "xar", "chm",
"iso", "crx"]},
"filter-type": {"required": False, "type": "str",
"choices": ["pattern", "type"]},
"pattern": {"required": True, "type": "str"}
}},
"id": {"required": True, "type": "int"},
"name": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_dlp(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| {
"content_hash": "858c880e7bf22f1df3e523a005bbfd81",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 100,
"avg_line_length": 33.196850393700785,
"alnum_prop": 0.43390259329538267,
"repo_name": "SergeyCherepanov/ansible",
"id": "19f9b03ed4287b436b12bebc198ffb153e40f02d",
"size": "12666",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/network/fortios/fortios_dlp_filepattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
from tempest.lib.common import rest_client
class NamespaceObjectsClient(rest_client.RestClient):
api_version = "v2"
def list_namespace_objects(self, namespace, **kwargs):
"""Lists all namespace objects.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#list-objects
"""
url = 'metadefs/namespaces/%s/objects' % namespace
if kwargs:
url += '?%s' % urllib.urlencode(kwargs)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def create_namespace_object(self, namespace, **kwargs):
"""Create a namespace object
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#create-object
"""
url = 'metadefs/namespaces/%s/objects' % namespace
data = json.dumps(kwargs)
resp, body = self.post(url, data)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_namespace_object(self, namespace, object_name, **kwargs):
"""Update a namespace object
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#update-object
"""
url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
data = json.dumps(kwargs)
resp, body = self.put(url, data)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def show_namespace_object(self, namespace, object_name):
"""Show a namespace object
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#show-object
"""
url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
resp, body = self.get(url)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_namespace_object(self, namespace, object_name):
"""Delete a namespace object
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/image/v2/metadefs-index.html#delete-object
"""
url = 'metadefs/namespaces/%s/objects/%s' % (namespace, object_name)
resp, _ = self.delete(url)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
| {
"content_hash": "838359b80304ab8d790214b467f0fa34",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 85,
"avg_line_length": 39.723684210526315,
"alnum_prop": 0.6515402451142762,
"repo_name": "masayukig/tempest",
"id": "0cae8169b59048c9d9632e518c430a9b94261031",
"size": "3645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tempest/lib/services/image/v2/namespace_objects_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4682048"
},
{
"name": "Shell",
"bytes": "12734"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'CfAfrica-Scrapengine'
copyright = u'2016, Code For Africa'
author = u'andrew kamau'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'CfAfrica-Scrapengine v0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'CfAfrica-Scrapenginedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CfAfrica-Scrapengine.tex', u'CfAfrica-Scrapengine Documentation',
u'andrew kamau', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'cfafrica-scrapengine', u'CfAfrica-Scrapengine Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CfAfrica-Scrapengine', u'CfAfrica-Scrapengine Documentation',
author, 'CfAfrica-Scrapengine', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "3d0c8e4593dbc06e78b1273c82399be8",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 83,
"avg_line_length": 33.04,
"alnum_prop": 0.7080123266563945,
"repo_name": "CodeForAfricaLabs/Scrapengine",
"id": "885b572d47005f46d1e0c4c7c948e0acb0b72cc4",
"size": "9519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "62801"
},
{
"name": "Shell",
"bytes": "185"
}
],
"symlink_target": ""
} |
import cPickle
import os
import tarfile
import PIL.Image
from downloader import DataDownloader
class Cifar100Downloader(DataDownloader):
"""
See details about the CIFAR100 dataset here:
http://www.cs.toronto.edu/~kriz/cifar.html
"""
def urlList(self):
return [
'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz',
]
def uncompressData(self):
filename = 'cifar-100-python.tar.gz'
filepath = os.path.join(self.outdir, filename)
assert os.path.exists(filepath), 'Expected "%s" to exist' % filename
if not os.path.exists(os.path.join(self.outdir, 'cifar-100-python')):
print "Uncompressing file=%s ..." % filename
with tarfile.open(filepath) as tf:
tf.extractall(self.outdir)
def processData(self):
label_filename = 'meta'
label_filepath = os.path.join(self.outdir, 'cifar-100-python', label_filename)
with open(label_filepath, 'rb') as infile:
pickleObj = cPickle.load(infile)
fine_label_names = pickleObj['fine_label_names']
coarse_label_names = pickleObj['coarse_label_names']
for level, label_names in [
('fine', fine_label_names),
('coarse', coarse_label_names),
]:
dirname = os.path.join(self.outdir, level)
self.mkdir(dirname, clean=True)
with open(os.path.join(dirname, 'labels.txt'), 'w') as outfile:
for name in label_names:
outfile.write('%s\n' % name)
for filename, phase in [
('train', 'train'),
('test', 'test'),
]:
filepath = os.path.join(self.outdir, 'cifar-100-python', filename)
assert os.path.exists(filepath), 'Expected "%s" to exist' % filename
self.__extractData(filepath, phase, fine_label_names, coarse_label_names)
def __extractData(self, input_file, phase, fine_label_names, coarse_label_names):
"""
Read a pickle file at input_file and output as images
Arguments:
input_file -- a pickle file
phase -- train or test
fine_label_names -- mapping from fine_labels to strings
coarse_label_names -- mapping from coarse_labels to strings
"""
print 'Extracting images file=%s ...' % input_file
# Read the pickle file
with open(input_file, 'rb') as infile:
pickleObj = cPickle.load(infile)
# print 'Batch -', pickleObj['batch_label']
data = pickleObj['data']
assert data.shape[1] == 3072, 'Unexpected data.shape %s' % (data.shape,)
count = data.shape[0]
fine_labels = pickleObj['fine_labels']
assert len(fine_labels) == count, 'Expected len(fine_labels) to be %d, not %d' % (count, len(fine_labels))
coarse_labels = pickleObj['coarse_labels']
assert len(coarse_labels) == count, 'Expected len(coarse_labels) to be %d, not %d' % (
count, len(coarse_labels))
filenames = pickleObj['filenames']
assert len(filenames) == count, 'Expected len(filenames) to be %d, not %d' % (count, len(filenames))
data = data.reshape((count, 3, 32, 32))
data = data.transpose((0, 2, 3, 1))
fine_to_coarse = {} # mapping of fine labels to coarse labels
fine_dirname = os.path.join(self.outdir, 'fine', phase)
os.makedirs(fine_dirname)
coarse_dirname = os.path.join(self.outdir, 'coarse', phase)
os.makedirs(coarse_dirname)
with open(os.path.join(self.outdir, 'fine', '%s.txt' % phase), 'w') as fine_textfile, \
open(os.path.join(self.outdir, 'coarse', '%s.txt' % phase), 'w') as coarse_textfile:
for index, image in enumerate(data):
# Create the directory
fine_label = fine_label_names[fine_labels[index]]
dirname = os.path.join(fine_dirname, fine_label)
self.mkdir(dirname)
# Get the filename
filename = filenames[index]
ext = os.path.splitext(filename)[1][1:].lower()
if ext != self.file_extension:
filename = '%s.%s' % (os.path.splitext(filename)[0], self.file_extension)
filename = os.path.join(dirname, filename)
# Save the image
PIL.Image.fromarray(image).save(filename)
fine_textfile.write('%s %s\n' % (filename, fine_labels[index]))
coarse_textfile.write('%s %s\n' % (filename, coarse_labels[index]))
if fine_label not in fine_to_coarse:
fine_to_coarse[fine_label] = coarse_label_names[coarse_labels[index]]
# Create the coarse dataset with symlinks
for fine, coarse in fine_to_coarse.iteritems():
self.mkdir(os.path.join(coarse_dirname, coarse))
os.symlink(
# Create relative symlinks for portability
os.path.join('..', '..', '..', 'fine', phase, fine),
os.path.join(coarse_dirname, coarse, fine)
)
| {
"content_hash": "2441f819fdda89e0465dbf0f41cc996b",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 118,
"avg_line_length": 42.064516129032256,
"alnum_prop": 0.5694018404907976,
"repo_name": "gheinrich/DIGITS-GAN",
"id": "8c17b95790697d4236c12247fbe618d0130aab21",
"size": "5286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "digits/download_data/cifar100.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4338"
},
{
"name": "HTML",
"bytes": "2634846"
},
{
"name": "JavaScript",
"bytes": "53896"
},
{
"name": "Lua",
"bytes": "110599"
},
{
"name": "Makefile",
"bytes": "113"
},
{
"name": "Protocol Buffer",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "1237457"
},
{
"name": "Shell",
"bytes": "12480"
}
],
"symlink_target": ""
} |
'''
Write a Python program using ciscoconfparse that parses the 'cisco_ipsec.txt'
config file. Note, this config file is not fully valid (i.e. parts of the
configuration are missing).
The script should find all of the crypto map entries in the file (lines that
begin with 'crypto map CRYPTO') and print out the children of each crypto map.
'''
from ciscoconfparse import CiscoConfParse
def main():
'''
Find all of the crypto map entries in the file (lines that begin with
'crypto map CRYPTO') and print out the children of each crypto map.
'''
cisco_file = 'cisco_ipsec.txt'
cisco_cfg = CiscoConfParse(cisco_file)
crypto_maps = cisco_cfg.find_objects(r"^crypto map CRYPTO")
for c_map in crypto_maps:
print
print c_map.text
for child in c_map.children:
print child.text
print
if __name__ == "__main__":
main()
| {
"content_hash": "9b6ce33a7138c9d8bb99680d79062ca9",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 78,
"avg_line_length": 29.7,
"alnum_prop": 0.6801346801346801,
"repo_name": "cb1234/pynet-test",
"id": "2fdeb7c459df4257f7c1724877dac8f77d63fa6c",
"size": "913",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "pynet/pyth_ans_ecourse/class1/ex8_confparse.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "420860"
}
],
"symlink_target": ""
} |
import pytest
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_regression
from sklearn.linear_model import Ridge
from sklearn.kernel_ridge import KernelRidge
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.utils._testing import ignore_warnings
from sklearn.utils._testing import assert_array_almost_equal
X, y = make_regression(n_features=10, random_state=0)
Xcsr = sp.csr_matrix(X)
Xcsc = sp.csc_matrix(X)
Y = np.array([y, y]).T
def test_kernel_ridge():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csr():
pred = (
Ridge(alpha=1, fit_intercept=False, solver="cholesky")
.fit(Xcsr, y)
.predict(Xcsr)
)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsr, y).predict(Xcsr)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_csc():
pred = (
Ridge(alpha=1, fit_intercept=False, solver="cholesky")
.fit(Xcsc, y)
.predict(Xcsc)
)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(Xcsc, y).predict(Xcsc)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_singular_kernel():
# alpha=0 causes a LinAlgError in computing the dual coefficients,
# which causes a fallback to a lstsq solver. This is tested here.
pred = Ridge(alpha=0, fit_intercept=False).fit(X, y).predict(X)
kr = KernelRidge(kernel="linear", alpha=0)
ignore_warnings(kr.fit)(X, y)
pred2 = kr.predict(X)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed():
for kernel in ["linear", "rbf", "poly", "cosine"]:
K = pairwise_kernels(X, X, metric=kernel)
pred = KernelRidge(kernel=kernel).fit(X, y).predict(X)
pred2 = KernelRidge(kernel="precomputed").fit(K, y).predict(K)
assert_array_almost_equal(pred, pred2)
def test_kernel_ridge_precomputed_kernel_unchanged():
K = np.dot(X, X.T)
K2 = K.copy()
KernelRidge(kernel="precomputed").fit(K, y)
assert_array_almost_equal(K, K2)
def test_kernel_ridge_sample_weights():
K = np.dot(X, X.T) # precomputed kernel
sw = np.random.RandomState(0).rand(X.shape[0])
pred = Ridge(alpha=1, fit_intercept=False).fit(X, y, sample_weight=sw).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, y, sample_weight=sw).predict(X)
pred3 = (
KernelRidge(kernel="precomputed", alpha=1)
.fit(K, y, sample_weight=sw)
.predict(K)
)
assert_array_almost_equal(pred, pred2)
assert_array_almost_equal(pred, pred3)
def test_kernel_ridge_multi_output():
pred = Ridge(alpha=1, fit_intercept=False).fit(X, Y).predict(X)
pred2 = KernelRidge(kernel="linear", alpha=1).fit(X, Y).predict(X)
assert_array_almost_equal(pred, pred2)
pred3 = KernelRidge(kernel="linear", alpha=1).fit(X, y).predict(X)
pred3 = np.array([pred3, pred3]).T
assert_array_almost_equal(pred2, pred3)
# TODO: Remove in 1.1
def test_kernel_ridge_pairwise_is_deprecated():
k_ridge = KernelRidge(kernel="precomputed")
msg = r"Attribute `_pairwise` was deprecated in version 0\.24"
with pytest.warns(FutureWarning, match=msg):
k_ridge._pairwise
| {
"content_hash": "2dc11c7bcc8e1c1719806c662bb44357",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 88,
"avg_line_length": 32.59803921568628,
"alnum_prop": 0.6736842105263158,
"repo_name": "shyamalschandra/scikit-learn",
"id": "b1fcb06eb2204da5957a50f9693ccb2d179cd505",
"size": "3325",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/tests/test_kernel_ridge.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394788"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "PowerShell",
"bytes": "17312"
},
{
"name": "Python",
"bytes": "6271288"
},
{
"name": "Shell",
"bytes": "6747"
}
],
"symlink_target": ""
} |
"""Unit tests for make_info.py."""
from __future__ import unicode_literals
import unittest
from batchupload.make_info import make_info_page
class TestMakeInfoPage(unittest.TestCase):
"""Test the make_info_page method."""
def setUp(self):
self.data = {
'info': '{{Infobox\n| param1 = value1 \n}}',
'meta_cats': [
'A meta_Cat'
],
'cats': [
'cat1',
'cat2'
],
'filename': 'The_filename'
}
def test_make_info_page(self):
expected = (
'{{Infobox\n| param1 = value1 \n}}\n\n'
'<!-- Metadata categories -->\n'
'[[Category:A meta_Cat]]'
'\n\n'
'<!-- Content categories -->\n'
'[[Category:cat1]]\n'
'[[Category:cat2]]')
self.assertEqual(
make_info_page(self.data),
expected)
def test_make_info_page_preview(self):
expected = (
"Filename: The_filename.<ext>\n"
"{{Infobox\n| param1 = value1 \n}}\n\n"
"''Metadata categories:''\n"
"* [[:Category:A meta_Cat]]"
"\n\n"
"''Content categories:''\n"
"* [[:Category:cat1]]\n"
"* [[:Category:cat2]]")
self.assertEqual(
make_info_page(self.data, preview=True),
expected)
def test_make_info_page_no_meta_cats(self):
self.data['meta_cats'] = []
expected = (
'{{Infobox\n| param1 = value1 \n}}\n\n'
'<!-- Content categories -->\n'
'[[Category:cat1]]\n'
'[[Category:cat2]]')
self.assertEqual(
make_info_page(self.data),
expected)
def test_make_info_page_no_content_cats(self):
self.data['cats'] = []
expected = (
'{{Infobox\n| param1 = value1 \n}}\n\n'
'<!-- Metadata categories -->\n'
'[[Category:A meta_Cat]]')
self.assertEqual(
make_info_page(self.data),
expected)
def test_make_info_page_no_cats(self):
self.data['meta_cats'] = []
self.data['cats'] = []
expected = '{{Infobox\n| param1 = value1 \n}}'
self.assertEqual(
make_info_page(self.data),
expected)
| {
"content_hash": "81f6de6901bf6b843a320c0d2b9bd976",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 56,
"avg_line_length": 30.192307692307693,
"alnum_prop": 0.47091295116772824,
"repo_name": "lokal-profil/BatchUploadTools",
"id": "92c3d27166da53ce062ded7b4e432a8ff4c931ec",
"size": "2398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_make_info.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "165956"
}
],
"symlink_target": ""
} |
from distutils.core import setup
# perform the setup action
setup(
name = "pypi",
version = '2005-08-01',
description =
"PyPI is the Python Package Index at http://pypi.python.org/",
long_description = '''PyPI has a new home at
<http://pypi.python.org/>. Users should need not need to change
anything, as the old "www" address should still work. Use of the new
address in preference is encouraged.
Developers interested in how PyPI works, or in contributing to the project,
should visit http://wiki.python.org/moin/CheeseShopDev
''',
author = "Richard Jones",
author_email = "richard@python.org",
url = 'http://wiki.python.org/moin/CheeseShopDev',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
],
)
# vim: set filetype=python ts=4 sw=4 et si
| {
"content_hash": "02cff3187bbb4ba04dda383274fed4a6",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 35.878787878787875,
"alnum_prop": 0.6613175675675675,
"repo_name": "ericholscher/pypi",
"id": "90508fcb9d4ea037af909269cff8a2e7213c0013",
"size": "1217",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "195"
},
{
"name": "Python",
"bytes": "362013"
}
],
"symlink_target": ""
} |
"""Tests for gradient packer."""
import numpy as np
import tensorflow as tf
import tensor_list_util
class GradientPackerTest(tf.test.TestCase):
"""Tests for packing and unpacking gradients."""
def _get_variables(self):
x = tf.get_variable("x", initializer=tf.reshape(
tf.range(0, 2, dtype=tf.float32), [1, 2]))
y = tf.get_variable("y", initializer=tf.reshape(
tf.range(2, 17, dtype=tf.float32), [3, 5]))
z = tf.get_variable("z", initializer=tf.reshape(
tf.range(17, 94, dtype=tf.float32), [7, 11]))
return x, y, z
def test_vectorize_all(self):
x, y, z = self._get_variables()
loss = 0.5 * tensor_list_util.l2_squared([x, y, z])
gradients = tf.gradients(loss, [x, y, z])
vectorizer = tensor_list_util.GradientPacker(loss)
vectorized_gradients = vectorizer.pack(gradients)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
vector = sess.run(vectorized_gradients)
# By construction, we have that if f(x) = 0.5 * ||x||^2, then f'(x) = x.
self.assertAllClose(vector, np.reshape(np.arange(0, 94), [1, 94]))
def test_vectorize_some(self):
x, y, z = self._get_variables()
loss = 0.5 * tensor_list_util.l2_squared([x, y])
# There should a None entry in the gradient.
gradients = tf.gradients(loss, [x, y, z])
vectorizer = tensor_list_util.GradientPacker(loss)
vectorized_gradients = vectorizer.pack(gradients)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
vector = sess.run(vectorized_gradients)
self.assertAllClose(vector, np.reshape(np.arange(0, 17),
[1, 17]))
def test_unvectorize_all(self):
x, y, z = self._get_variables()
loss = 0.5 * tensor_list_util.l2_squared([x, y, z])
gradients = tf.gradients(loss, [x, y, z])
vectorizer = tensor_list_util.GradientPacker(loss)
vectorized_gradients = vectorizer.pack(gradients)
unvectorized_gradients = vectorizer.unpack(vectorized_gradients)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
list_of_tensors = sess.run(unvectorized_gradients)
variables = sess.run([x, y, z])
for variable, gradient in zip(variables, list_of_tensors):
self.assertAllClose(variable, gradient)
def test_unvectorize_some(self):
x, y, z = self._get_variables()
loss = 0.5 * tensor_list_util.l2_squared([x, y])
gradients = tf.gradients(loss, [x, y, z])
# Dependency inject gradients, so we don't have to duplicate ops.
vectorizer = tensor_list_util.GradientPacker(loss, gradients=gradients)
vectorized_gradients = vectorizer.pack(gradients)
unvectorized_gradients = vectorizer.unpack(vectorized_gradients, full=False)
init_op = tf.global_variables_initializer()
with self.test_session() as sess:
sess.run(init_op)
list_of_tensors = sess.run(unvectorized_gradients)
variables = sess.run([x, y])
for variable, gradient in zip(variables, list_of_tensors):
self.assertAllClose(variable, gradient)
self.assertEqual(vectorizer.gradient_size, 17)
def test_unvectorize_with_gaps(self):
x, y, z = self._get_variables()
loss = 0.5 * tensor_list_util.l2_squared([x, y])
gradients = tf.gradients(loss, [x, y, z])
vectorizer = tensor_list_util.GradientPacker(loss)
vectorized_gradients = vectorizer.pack(gradients)
unvectorized_gradients = vectorizer.unpack(vectorized_gradients, full=True)
self.assertIsNone(unvectorized_gradients[-1])
if __name__ == "__main__":
tf.test.main()
| {
"content_hash": "eba07fc9193d6191c5f0c7afa7f2734c",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 80,
"avg_line_length": 32.796460176991154,
"alnum_prop": 0.6600107933081489,
"repo_name": "google/spectral-density",
"id": "0987c32f4b08e23b85d2e4cc086eb1abffdaefca",
"size": "4282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tf/tensor_list_util_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "125623"
},
{
"name": "Python",
"bytes": "125638"
}
],
"symlink_target": ""
} |
try:
# python 3
from http.server import SimpleHTTPRequestHandler
import socketserver
except ImportError:
# python 2
from SimpleHTTPServer import SimpleHTTPRequestHandler
import SocketServer as socketserver
ADDR = "127.0.0.1"
PORT = 8000
httpd = socketserver.TCPServer(
(ADDR, PORT),
SimpleHTTPRequestHandler
)
print("\nserving at http://%s:%s" % (ADDR, PORT))
print("(Abort with Ctrl-C)")
httpd.serve_forever()
| {
"content_hash": "2c3c5353201ae83aa9ff1b1f3f0ab6ce",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 57,
"avg_line_length": 20.363636363636363,
"alnum_prop": 0.7120535714285714,
"repo_name": "jedie/pypyjs_test_compression",
"id": "c206644429bad7034434c18b54699832b46279b8",
"size": "471",
"binary": false,
"copies": "1",
"ref": "refs/heads/gh-pages",
"path": "simple_http_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3721"
},
{
"name": "HTML",
"bytes": "8706"
},
{
"name": "JavaScript",
"bytes": "58470"
},
{
"name": "Python",
"bytes": "14281"
}
],
"symlink_target": ""
} |
"""
This file implements print functionality for the CPU.
"""
from llvmlite.llvmpy.core import Type
from numba.core import types, typing, cgutils
from numba.core.imputils import Registry, impl_ret_untracked
registry = Registry('printimpl')
lower = registry.lower
# NOTE: the current implementation relies on CPython API even in
# nopython mode.
@lower("print_item", types.Literal)
def print_item_impl(context, builder, sig, args):
"""
Print a single constant value.
"""
ty, = sig.args
val = ty.literal_value
pyapi = context.get_python_api(builder)
strobj = pyapi.unserialize(pyapi.serialize_object(val))
pyapi.print_object(strobj)
pyapi.decref(strobj)
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower("print_item", types.Any)
def print_item_impl(context, builder, sig, args):
"""
Print a single native value by boxing it in a Python object and
invoking the Python interpreter's print routine.
"""
ty, = sig.args
val, = args
pyapi = context.get_python_api(builder)
env_manager = context.get_env_manager(builder)
if context.enable_nrt:
context.nrt.incref(builder, ty, val)
obj = pyapi.from_native_value(ty, val, env_manager)
with builder.if_else(cgutils.is_not_null(builder, obj), likely=True) as (if_ok, if_error):
with if_ok:
pyapi.print_object(obj)
pyapi.decref(obj)
with if_error:
cstr = context.insert_const_string(builder.module,
"the print() function")
strobj = pyapi.string_from_string(cstr)
pyapi.err_write_unraisable(strobj)
pyapi.decref(strobj)
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, sig.return_type, res)
@lower(print, types.VarArg(types.Any))
def print_varargs_impl(context, builder, sig, args):
"""
A entire print() call.
"""
pyapi = context.get_python_api(builder)
gil = pyapi.gil_ensure()
for i, (argtype, argval) in enumerate(zip(sig.args, args)):
signature = typing.signature(types.none, argtype)
imp = context.get_function("print_item", signature)
imp(builder, [argval])
if i < len(args) - 1:
pyapi.print_string(' ')
pyapi.print_string('\n')
pyapi.gil_release(gil)
res = context.get_dummy_value()
return impl_ret_untracked(context, builder, sig.return_type, res)
| {
"content_hash": "2751019febb219e888fae33a21c51e08",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 94,
"avg_line_length": 30.36144578313253,
"alnum_prop": 0.6464285714285715,
"repo_name": "stuartarchibald/numba",
"id": "0ec29b5561dca99be90e3ab72a30ffcf998b1e53",
"size": "2520",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "numba/cpython/printimpl.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6768"
},
{
"name": "C",
"bytes": "625527"
},
{
"name": "C++",
"bytes": "87110"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8342308"
},
{
"name": "Shell",
"bytes": "9062"
}
],
"symlink_target": ""
} |
"""Tests for convolutional layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
@keras_parameterized.run_all_keras_modes
class Conv1DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs=kwargs,
input_shape=(num_samples, length, stack_size),
expected_output_shape=expected_output_shape)
def _run_test_extra_batch_dim(self, kwargs, expected_output_shape):
batch_shape = (2, 11)
stack_size = 3
length = 7
with self.cached_session(use_gpu=True):
if expected_output_shape is not None:
expected_output_shape = (None,) + expected_output_shape
testing_utils.layer_test(
keras.layers.Conv1D,
kwargs=kwargs,
input_shape=batch_shape + (length, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {
'padding': 'valid'
}, (None, 5, 2)),
('padding_same', {
'padding': 'same'
}, (None, 7, 2)),
('padding_same_dilation_2', {
'padding': 'same',
'dilation_rate': 2
}, (None, 7, 2)),
('padding_same_dilation_3', {
'padding': 'same',
'dilation_rate': 3
}, (None, 7, 2)),
('padding_causal', {
'padding': 'causal'
}, (None, 7, 2)),
('strides', {
'strides': 2
}, (None, 3, 2)),
('dilation_rate', {
'dilation_rate': 2
}, (None, 3, 2)),
# Only runs on GPU with CUDA, groups are not supported on CPU.
# https://github.com/tensorflow/tensorflow/issues/29005
('group', {
'groups': 3,
'filters': 6
}, (None, 5, 6), True),
)
def test_conv1d(self, kwargs, expected_output_shape, requires_gpu=False):
kwargs['filters'] = kwargs.get('filters', 2)
kwargs['kernel_size'] = 3
if not requires_gpu or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
self._run_test_extra_batch_dim(kwargs, expected_output_shape)
def test_conv1d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv1d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(**kwargs)
layer.build((None, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv1d_recreate_conv(self):
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(filters=1,
kernel_size=3,
strides=1,
dilation_rate=2,
padding='causal')
inpt1 = np.random.normal(size=[1, 2, 1])
inpt2 = np.random.normal(size=[1, 1, 1])
outp1_shape = layer(inpt1).shape
_ = layer(inpt2).shape
self.assertEqual(outp1_shape, layer(inpt1).shape)
def test_conv1d_recreate_conv_unknown_dims(self):
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv1D(filters=1,
kernel_size=3,
strides=1,
dilation_rate=2,
padding='causal')
inpt1 = np.random.normal(size=[1, 9, 1]).astype(np.float32)
inpt2 = np.random.normal(size=[1, 2, 1]).astype(np.float32)
outp1_shape = layer(inpt1).shape
@def_function.function(input_signature=[
tensor_spec.TensorSpec([1, None, 1])])
def fn(inpt):
return layer(inpt)
fn(inpt2)
self.assertEqual(outp1_shape, layer(inpt1).shape)
@keras_parameterized.run_all_keras_modes
class Conv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
def _run_test_extra_batch_dim(self, kwargs, expected_output_shape):
batch_shape = (2, 11)
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
if expected_output_shape is not None:
expected_output_shape = (None,) + expected_output_shape
testing_utils.layer_test(
keras.layers.Conv2D,
kwargs=kwargs,
input_shape=batch_shape + (num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {
'padding': 'valid'
}, (None, 5, 4, 2)),
('padding_same', {
'padding': 'same'
}, (None, 7, 6, 2)),
('padding_same_dilation_2', {
'padding': 'same',
'dilation_rate': 2
}, (None, 7, 6, 2)),
('strides', {
'strides': (2, 2)
}, (None, 3, 2, 2)),
('dilation_rate', {
'dilation_rate': (2, 2)
}, (None, 3, 2, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {
'data_format': 'channels_first'
}, None, True),
# Only runs on GPU with CUDA, groups are not supported on CPU.
# https://github.com/tensorflow/tensorflow/issues/29005
('group', {
'groups': 3,
'filters': 6
}, (None, 5, 4, 6), True),
)
def test_conv2d(self, kwargs, expected_output_shape=None, requires_gpu=False):
kwargs['filters'] = kwargs.get('filters', 2)
kwargs['kernel_size'] = (3, 3)
if not requires_gpu or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
self._run_test_extra_batch_dim(kwargs, expected_output_shape)
def test_conv2d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv2d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv2D(**kwargs)
layer.build((None, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv2d_zero_kernel_size(self):
kwargs = {'filters': 2, 'kernel_size': 0}
with self.assertRaises(ValueError):
keras.layers.Conv2D(**kwargs)
@keras_parameterized.run_all_keras_modes
class Conv3DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape, validate_training=True):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape,
validate_training=validate_training)
def _run_test_extra_batch_dim(self,
kwargs,
expected_output_shape,
validate_training=True):
batch_shape = (2, 11)
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with self.cached_session(use_gpu=True):
if expected_output_shape is not None:
expected_output_shape = (None,) + expected_output_shape
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs=kwargs,
input_shape=batch_shape + (depth, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape,
validate_training=validate_training)
@parameterized.named_parameters(
('padding_valid', {
'padding': 'valid'
}, (None, 3, 5, 4, 2)),
('padding_same', {
'padding': 'same'
}, (None, 5, 7, 6, 2)),
('strides', {
'strides': (2, 2, 2)
}, (None, 2, 3, 2, 2)),
('dilation_rate', {
'dilation_rate': (2, 2, 2)
}, (None, 1, 3, 2, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {
'data_format': 'channels_first'
}, None, True),
# Only runs on GPU with CUDA, groups are not supported on CPU.
# https://github.com/tensorflow/tensorflow/issues/29005
('group', {
'groups': 3,
'filters': 6
}, (None, 3, 5, 4, 6), True),
)
def test_conv3d(self, kwargs, expected_output_shape=None, requires_gpu=False):
kwargs['filters'] = kwargs.get('filters', 2)
kwargs['kernel_size'] = (3, 3, 3)
# train_on_batch currently fails with XLA enabled on GPUs
test_training = 'groups' not in kwargs or not test_util.is_xla_enabled()
if not requires_gpu or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape, test_training)
self._run_test_extra_batch_dim(kwargs, expected_output_shape,
test_training)
def test_conv3d_regularizers(self):
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(len(layer.losses), 2)
self.assertEqual(len(layer.losses), 2)
layer(keras.backend.variable(np.ones((1, 5, 5, 5, 2))))
self.assertEqual(len(layer.losses), 3)
def test_conv3d_constraints(self):
k_constraint = lambda x: x
b_constraint = lambda x: x
kwargs = {
'filters': 3,
'kernel_size': 3,
'padding': 'valid',
'kernel_constraint': k_constraint,
'bias_constraint': b_constraint,
'strides': 1
}
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(**kwargs)
layer.build((None, 5, 5, 5, 2))
self.assertEqual(layer.kernel.constraint, k_constraint)
self.assertEqual(layer.bias.constraint, b_constraint)
def test_conv3d_dynamic_shape(self):
input_data = np.random.random((1, 3, 3, 3, 3)).astype(np.float32)
with self.cached_session(use_gpu=True):
# Won't raise error here.
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_last',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, None, None, None, 3),
input_data=input_data)
if test.is_gpu_available(cuda_only=True):
testing_utils.layer_test(
keras.layers.Conv3D,
kwargs={
'data_format': 'channels_first',
'filters': 3,
'kernel_size': 3
},
input_shape=(None, 3, None, None, None),
input_data=input_data)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class GroupedConvTest(keras_parameterized.TestCase):
@parameterized.named_parameters(
('Conv1D', keras.layers.Conv1D),
('Conv2D', keras.layers.Conv2D),
('Conv3D', keras.layers.Conv3D),
)
def test_group_conv_incorrect_use(self, layer):
with self.assertRaisesRegex(ValueError, 'The number of filters'):
layer(16, 3, groups=3)
with self.assertRaisesRegex(ValueError, 'The number of input channels'):
layer(16, 3, groups=4).build((32, 12, 12, 3))
@parameterized.named_parameters(
('Conv1D', keras.layers.Conv1D, (32, 12, 32)),
('Conv2D', keras.layers.Conv2D, (32, 12, 12, 32)),
('Conv3D', keras.layers.Conv3D, (32, 12, 12, 12, 32)),
)
def test_group_conv(self, layer_cls, input_shape):
if test.is_gpu_available(cuda_only=True):
with testing_utils.use_gpu():
inputs = random_ops.random_uniform(shape=input_shape)
layer = layer_cls(16, 3, groups=4, use_bias=False)
layer.build(input_shape)
input_slices = array_ops.split(inputs, 4, axis=-1)
weight_slices = array_ops.split(layer.kernel, 4, axis=-1)
expected_outputs = array_ops.concat([
nn.convolution_v2(inputs, weights)
for inputs, weights in zip(input_slices, weight_slices)
],
axis=-1)
self.assertAllClose(
layer(inputs), expected_outputs, rtol=3e-5, atol=3e-5)
def test_group_conv_depthwise(self):
if test.is_gpu_available(cuda_only=True):
with testing_utils.use_gpu():
inputs = random_ops.random_uniform(shape=(3, 27, 27, 32))
layer = keras.layers.Conv2D(32, 3, groups=32, use_bias=False)
layer.build((3, 27, 27, 32))
weights_dw = array_ops.reshape(layer.kernel, [3, 3, 32, 1])
expected_outputs = nn.depthwise_conv2d(
inputs, weights_dw, strides=[1, 1, 1, 1], padding='VALID')
self.assertAllClose(layer(inputs), expected_outputs, rtol=1e-5)
@keras_parameterized.run_all_keras_modes
class Conv1DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_col = 6
with testing_utils.use_gpu():
testing_utils.layer_test(
keras.layers.Conv1DTranspose,
kwargs=kwargs,
input_shape=(num_samples, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}, (None, 8, 2)),
('padding_same', {'padding': 'same'}, (None, 6, 2)),
('strides', {'strides': 2}, (None, 13, 2)),
# Only runs on GPU with CUDA, dilation_rate>1 is not supported on CPU.
('dilation_rate', {'dilation_rate': 2}, (None, 10, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv1d_transpose(self, kwargs, expected_output_shape=None):
kwargs['filters'] = 2
kwargs['kernel_size'] = 3
if (('data_format' not in kwargs and 'dilation_rate' not in kwargs) or
test.is_gpu_available(cuda_only=True)):
self._run_test(kwargs, expected_output_shape)
@keras_parameterized.run_all_keras_modes
class Conv3DTransposeTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
depth = 5
with testing_utils.use_gpu():
testing_utils.layer_test(
keras.layers.Conv3DTranspose,
kwargs=kwargs,
input_shape=(num_samples, depth, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}, (None, 7, 9, 8, 2)),
('padding_same', {'padding': 'same'}, (None, 5, 7, 6, 2)),
('strides', {'strides': (2, 2, 2)}, (None, 11, 15, 13, 2)),
('dilation_rate', {'dilation_rate': (2, 2, 2)}, (None, 7, 9, 8, 2)),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
)
def test_conv3d_transpose(self, kwargs, expected_output_shape=None):
kwargs['filters'] = 2
kwargs['kernel_size'] = (3, 3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
@keras_parameterized.run_all_keras_modes
class ConvSequentialTest(keras_parameterized.TestCase):
def _run_test(self, conv_layer_cls, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
kwargs['filters'] = 1
kwargs['kernel_size'] = 3
kwargs['dilation_rate'] = 2
with self.cached_session(use_gpu=True):
layer = conv_layer_cls(**kwargs)
output1 = layer(np.zeros(input_shape1))
self.assertEqual(output1.shape, expected_output_shape1)
output2 = layer(np.zeros(input_shape2))
self.assertEqual(output2.shape, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 8, 2), (1, 5, 2), (1, 4, 1), (1, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1)),
('padding_causal', {'padding': 'causal'},
(1, 8, 2), (1, 5, 2), (1, 8, 1), (1, 5, 1)),
)
def test_conv1d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv1D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 7, 6, 2), (1, 6, 5, 2), (1, 3, 2, 1), (1, 2, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 7, 6, 2), (1, 6, 5, 2), (1, 7, 6, 1), (1, 6, 5, 1)),
)
def test_conv2d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv2D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'},
(1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 1, 3, 2, 1), (1, 4, 2, 1, 1)),
('padding_same', {'padding': 'same'},
(1, 5, 7, 6, 2), (1, 8, 6, 5, 2), (1, 5, 7, 6, 1), (1, 8, 6, 5, 1)),
)
def test_conv3d(self, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2):
self._run_test(keras.layers.Conv3D, kwargs, input_shape1, input_shape2,
expected_output_shape1, expected_output_shape2)
def test_dynamic_shape(self):
with self.cached_session(use_gpu=True):
layer = keras.layers.Conv3D(2, 3)
input_shape = (5, None, None, 2)
inputs = keras.Input(shape=input_shape)
x = layer(inputs)
# Won't raise error here with None values in input shape (b/144282043).
layer(x)
@keras_parameterized.run_all_keras_modes
class ZeroPaddingTest(keras_parameterized.TestCase):
def test_zero_padding_1d(self):
num_samples = 2
input_dim = 2
num_steps = 5
shape = (num_samples, num_steps, input_dim)
inputs = np.ones(shape)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': 2},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding1D,
kwargs={'padding': (1, 2)},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.ZeroPadding1D(padding=2)
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding1D(padding=(1, 2))
layer.build(shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
for left_offset in [0]:
np.testing.assert_allclose(np_output[:, left_offset, :], 0.)
for right_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, :], 1.)
layer.get_config()
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding1D(padding=None)
@parameterized.named_parameters(('channels_first', 'channels_first'),
('channels_last', 'channels_last'))
def test_zero_padding_2d(self, data_format):
num_samples = 2
stack_size = 2
input_num_row = 4
input_num_col = 5
if data_format == 'channels_first':
inputs = np.ones((num_samples, stack_size, input_num_row, input_num_col))
elif data_format == 'channels_last':
inputs = np.ones((num_samples, input_num_row, input_num_col, stack_size))
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={
'padding': (2, 2),
'data_format': data_format
},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding2D,
kwargs={
'padding': ((1, 2), (3, 4)),
'data_format': data_format
},
input_shape=inputs.shape)
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.ZeroPadding2D(
padding=(2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, :], 1.)
layer = keras.layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, top_offset, :, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, bottom_offset, :, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, left_offset, :], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, right_offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, :], 1.)
elif data_format == 'channels_first':
for top_offset in [0]:
np.testing.assert_allclose(np_output[:, :, top_offset, :], 0.)
for bottom_offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, bottom_offset, :], 0.)
for left_offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, left_offset], 0.)
for right_offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, right_offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding2D(padding=None)
@parameterized.named_parameters(('channels_first', 'channels_first'),
('channels_last', 'channels_last'))
def test_zero_padding_3d(self, data_format):
num_samples = 2
stack_size = 2
input_len_dim1 = 4
input_len_dim2 = 5
input_len_dim3 = 3
if data_format == 'channels_first':
inputs = np.ones((num_samples, stack_size, input_len_dim1, input_len_dim2,
input_len_dim3))
elif data_format == 'channels_last':
inputs = np.ones((num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size))
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={
'padding': (2, 2, 2),
'data_format': data_format
},
input_shape=inputs.shape)
testing_utils.layer_test(
keras.layers.ZeroPadding3D,
kwargs={
'padding': ((1, 2), (3, 4), (0, 2)),
'data_format': data_format
},
input_shape=inputs.shape)
with self.cached_session(use_gpu=True):
# correctness test
layer = keras.layers.ZeroPadding3D(
padding=(2, 2, 2), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 2:-2, 2:-2, 2:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0, 1, -1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, :, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, :, 2:-2, 2:-2, 2:-2], 1.)
layer = keras.layers.ZeroPadding3D(
padding=((1, 2), (3, 4), (0, 2)), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_last':
for offset in [0]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, offset, :, :, :], 0.)
for offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
np.testing.assert_allclose(np_output[:, 1:-2, 3:-4, 0:-2, :], 1.)
elif data_format == 'channels_first':
for offset in [0]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, offset, :, :], 0.)
for offset in [0, 1, 2]:
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
for offset in [-1, -2, -3, -4]:
np.testing.assert_allclose(np_output[:, :, :, offset, :], 0.)
for offset in [-1, -2]:
np.testing.assert_allclose(np_output[:, :, :, :, offset], 0.)
np.testing.assert_allclose(np_output[:, :, 1:-2, 3:-4, 0:-2], 1.)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=(1, 1))
with self.assertRaises(ValueError):
keras.layers.ZeroPadding3D(padding=None)
@test_util.for_all_test_methods(test_util.disable_xla,
'align_corners=False not supported by XLA')
@keras_parameterized.run_all_keras_modes
class UpSamplingTest(keras_parameterized.TestCase):
def test_upsampling_1d(self):
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling1D, kwargs={'size': 2}, input_shape=(3, 5, 4))
def test_upsampling_2d(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col), data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_row * input_num_row
assert np_output.shape[3] == length_col * input_num_col
else: # tf
assert np_output.shape[1] == length_row * input_num_row
assert np_output.shape[2] == length_col * input_num_col
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_row, axis=2)
expected_out = np.repeat(expected_out, length_col, axis=3)
else: # tf
expected_out = np.repeat(inputs, length_row, axis=1)
expected_out = np.repeat(expected_out, length_col, axis=2)
np.testing.assert_allclose(np_output, expected_out)
def test_upsampling_2d_bilinear(self):
num_samples = 2
stack_size = 2
input_num_row = 11
input_num_col = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_num_row,
input_num_col)
else:
inputs = np.random.rand(num_samples, input_num_row, input_num_col,
stack_size)
testing_utils.layer_test(keras.layers.UpSampling2D,
kwargs={'size': (2, 2),
'data_format': data_format,
'interpolation': 'bilinear'},
input_shape=inputs.shape)
if not context.executing_eagerly():
for length_row in [2]:
for length_col in [2, 3]:
layer = keras.layers.UpSampling2D(
size=(length_row, length_col),
data_format=data_format)
layer.build(inputs.shape)
outputs = layer(keras.backend.variable(inputs))
np_output = keras.backend.eval(outputs)
if data_format == 'channels_first':
self.assertEqual(np_output.shape[2], length_row * input_num_row)
self.assertEqual(np_output.shape[3], length_col * input_num_col)
else:
self.assertEqual(np_output.shape[1], length_row * input_num_row)
self.assertEqual(np_output.shape[2], length_col * input_num_col)
def test_upsampling_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 10
input_len_dim2 = 11
input_len_dim3 = 12
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.UpSampling3D,
kwargs={'size': (2, 2, 2),
'data_format': data_format},
input_shape=inputs.shape)
for length_dim1 in [2, 3]:
for length_dim2 in [2]:
for length_dim3 in [3]:
layer = keras.layers.UpSampling3D(
size=(length_dim1, length_dim2, length_dim3),
data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
if data_format == 'channels_first':
assert np_output.shape[2] == length_dim1 * input_len_dim1
assert np_output.shape[3] == length_dim2 * input_len_dim2
assert np_output.shape[4] == length_dim3 * input_len_dim3
else: # tf
assert np_output.shape[1] == length_dim1 * input_len_dim1
assert np_output.shape[2] == length_dim2 * input_len_dim2
assert np_output.shape[3] == length_dim3 * input_len_dim3
# compare with numpy
if data_format == 'channels_first':
expected_out = np.repeat(inputs, length_dim1, axis=2)
expected_out = np.repeat(expected_out, length_dim2, axis=3)
expected_out = np.repeat(expected_out, length_dim3, axis=4)
else: # tf
expected_out = np.repeat(inputs, length_dim1, axis=1)
expected_out = np.repeat(expected_out, length_dim2, axis=2)
expected_out = np.repeat(expected_out, length_dim3, axis=3)
np.testing.assert_allclose(np_output, expected_out)
@keras_parameterized.run_all_keras_modes
class CroppingTest(keras_parameterized.TestCase):
def test_cropping_1d(self):
num_samples = 2
time_length = 4
input_len_dim1 = 2
inputs = np.random.rand(num_samples, time_length, input_len_dim1)
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping1D,
kwargs={'cropping': (2, 2)},
input_shape=inputs.shape)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping1D(cropping=None)
def test_cropping_2d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 9
input_len_dim2 = 9
cropping = ((2, 2), (3, 3))
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
with self.cached_session(use_gpu=True):
# basic test
testing_utils.layer_test(
keras.layers.Cropping2D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
# correctness test
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :, cropping[0][0]:-cropping[0][1], cropping[
1][0]:-cropping[1][1]]
else:
expected_out = inputs[:, cropping[0][0]:-cropping[0][1], cropping[1][
0]:-cropping[1][1], :]
np.testing.assert_allclose(np_output, expected_out)
for data_format in ['channels_first', 'channels_last']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
stack_size)
# another correctness test (no cropping)
with self.cached_session(use_gpu=True):
cropping = ((0, 0), (0, 0))
layer = keras.layers.Cropping2D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with input
np.testing.assert_allclose(np_output, inputs)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=(1, 1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping2D(cropping=None)
def test_cropping_3d(self):
num_samples = 2
stack_size = 2
input_len_dim1 = 8
input_len_dim2 = 8
input_len_dim3 = 8
croppings = [((2, 2), (1, 1), (2, 3)), 3, (0, 1, 1)]
for cropping in croppings:
for data_format in ['channels_last', 'channels_first']:
if data_format == 'channels_first':
inputs = np.random.rand(num_samples, stack_size, input_len_dim1,
input_len_dim2, input_len_dim3)
else:
inputs = np.random.rand(num_samples, input_len_dim1, input_len_dim2,
input_len_dim3, stack_size)
# basic test
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.Cropping3D,
kwargs={'cropping': cropping,
'data_format': data_format},
input_shape=inputs.shape)
if len(croppings) == 3 and len(croppings[0]) == 2:
# correctness test
with self.cached_session(use_gpu=True):
layer = keras.layers.Cropping3D(
cropping=cropping, data_format=data_format)
layer.build(inputs.shape)
output = layer(keras.backend.variable(inputs))
if context.executing_eagerly():
np_output = output.numpy()
else:
np_output = keras.backend.eval(output)
# compare with numpy
if data_format == 'channels_first':
expected_out = inputs[:, :,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1]]
else:
expected_out = inputs[:,
cropping[0][0]:-cropping[0][1],
cropping[1][0]:-cropping[1][1],
cropping[2][0]:-cropping[2][1], :]
np.testing.assert_allclose(np_output, expected_out)
# test incorrect use
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=(1, 1))
with self.assertRaises(ValueError):
keras.layers.Cropping3D(cropping=None)
@keras_parameterized.run_all_keras_modes
class DepthwiseConv2DTest(keras_parameterized.TestCase):
def _run_test(self, kwargs, expected_output_shape=None):
num_samples = 2
stack_size = 3
num_row = 7
num_col = 6
with self.cached_session(use_gpu=True):
testing_utils.layer_test(
keras.layers.DepthwiseConv2D,
kwargs=kwargs,
input_shape=(num_samples, num_row, num_col, stack_size),
expected_output_shape=expected_output_shape)
@parameterized.named_parameters(
('padding_valid', {'padding': 'valid'}),
('padding_same', {'padding': 'same'}),
('strides', {'strides': (2, 2)}),
# Only runs on GPU with CUDA, channels_first is not supported on CPU.
# TODO(b/62340061): Support channels_first on CPU.
('data_format', {'data_format': 'channels_first'}),
('depth_multiplier_1', {'depth_multiplier': 1}),
('depth_multiplier_2', {'depth_multiplier': 2}),
('dilation_rate', {'dilation_rate': (2, 2)}, (None, 3, 2, 3)),
)
def test_depthwise_conv2d(self, kwargs, expected_output_shape=None):
kwargs['kernel_size'] = (3, 3)
if 'data_format' not in kwargs or test.is_gpu_available(cuda_only=True):
self._run_test(kwargs, expected_output_shape)
def test_depthwise_conv2d_full(self):
kwargs = {
'kernel_size': 3,
'padding': 'valid',
'data_format': 'channels_last',
'dilation_rate': (1, 1),
'activation': None,
'depthwise_regularizer': 'l2',
'bias_regularizer': 'l2',
'activity_regularizer': 'l2',
'depthwise_constraint': 'unit_norm',
'use_bias': True,
'strides': (2, 2),
'depth_multiplier': 1,
}
self._run_test(kwargs)
if __name__ == '__main__':
test.main()
| {
"content_hash": "73b6e6bdc2ad3511bd546dd879024935",
"timestamp": "",
"source": "github",
"line_count": 1158,
"max_line_length": 80,
"avg_line_length": 37.85578583765112,
"alnum_prop": 0.5762255628806716,
"repo_name": "aldian/tensorflow",
"id": "0bc869160ecfc2000072920aaad3611e4847005b",
"size": "44526",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/layers/convolutional_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
} |
import random
import unittest
from hearthbreaker.agents.basic_agents import DoNothingAgent, PredictableAgent
from tests.agents.testing_agents import OneCardPlayingAgent, PlayAndAttackAgent, CardTestingAgent,\
SelfSpellTestingAgent, EnemyMinionSpellTestingAgent
from tests.testing_utils import generate_game_for
from hearthbreaker.cards import *
class TestWarrior(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_ArathiWeaponsmith(self):
game = generate_game_for(ArathiWeaponsmith, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
# Arathi Weaponsmith should be played
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(3, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertEqual("Arathi Weaponsmith", game.players[0].minions[0].card.name)
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(2, game.players[0].weapon.durability)
def test_Armorsmith(self):
game = generate_game_for(Armorsmith, StonetuskBoar, OneCardPlayingAgent, PlayAndAttackAgent)
# Armorsmith should be played
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].health)
self.assertEqual("Armorsmith", game.players[0].minions[0].card.name)
self.assertEqual(0, game.players[0].hero.armor)
# Three Stonetusks should attack, generating one armor each
game.play_single_turn()
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual(3, game.players[0].hero.armor)
def test_CruelTaskmaster(self):
game = generate_game_for(CruelTaskmaster, Shieldbearer, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(0, game.players[1].minions[0].calculate_attack())
self.assertEqual(4, game.players[1].minions[0].health)
# Cruel Taskmaster should be played, targeting the Shieldbearer
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual("Cruel Taskmaster", game.players[0].minions[0].card.name)
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[1].minions[0].calculate_attack())
self.assertEqual(3, game.players[1].minions[0].health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual(2, game.players[0].minions[1].calculate_attack())
self.assertEqual(2, game.players[0].minions[1].health)
self.assertEqual(2, game.players[1].minions[0].calculate_attack())
self.assertEqual(3, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].calculate_attack())
self.assertEqual(3, game.players[1].minions[1].health)
def test_FrothingBerserker(self):
game = generate_game_for(FrothingBerserker, AngryChicken, OneCardPlayingAgent, PlayAndAttackAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(3, len(game.players[1].minions))
# Frothing Berserker should be played
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(4, game.players[0].minions[0].health)
self.assertEqual("Frothing Berserker", game.players[0].minions[0].card.name)
# Three chickens should attack, generating a total of +6 attack for the Frothing Berserker
game.play_single_turn()
self.assertEqual(8, game.players[0].minions[0].calculate_attack())
self.assertEqual(1, game.players[0].minions[0].health)
def test_GrommashHellscream(self):
game = generate_game_for(GrommashHellscream, ExplosiveTrap, PlayAndAttackAgent, CardTestingAgent)
for turn in range(0, 14):
game.play_single_turn()
# Hellscream should be played, attacking (charge) and getting 2 damage by trap that will trigger enrage,
# dealing 10 damage as result
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(10, game.players[0].minions[0].calculate_attack())
self.assertEqual(7, game.players[0].minions[0].health)
self.assertEqual(20, game.players[1].hero.health)
game.players[0].minions[0].heal(2, None)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].damage(2, None)
self.assertEqual(10, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].silence()
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].heal(2, None)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
game.players[0].minions[0].damage(2, None)
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
def test_KorkronElite(self):
game = generate_game_for(KorkronElite, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
# Kor'kron Elite should be played and attack (charge)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertEqual(3, game.players[0].minions[0].health)
self.assertEqual(26, game.players[1].hero.health)
def test_WarsongCommander(self):
game = generate_game_for(WarsongCommander, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
# Super special test cases - http://www.hearthhead.com/card=1009/warsong-commander#comments:id=1935295
game.players[0].mana = 100
# Play the Warsong Commander
commander = WarsongCommander()
commander.player = game.players[0]
commander.use(game.players[0], game)
self.assertFalse(game.players[0].minions[0].charge()) # Should not give charge to itself
# Test so that enrage doesn't remove the charge
worgen = RagingWorgen()
worgen.player = game.players[0]
worgen.use(game.players[0], game)
game.players[0].minions[0].damage(1, None) # Trigger enrage, charge should still be active
self.assertEqual(4, game.players[0].minions[0].calculate_attack())
self.assertTrue(game.players[0].minions[0].charge())
# Test so that charge gets applied before a battlecry
weapon = FieryWarAxe().create_weapon(game.players[0])
weapon.equip(game.players[0])
self.assertEqual(3, game.players[0].weapon.base_attack)
self.assertEqual(2, game.players[0].weapon.durability)
bloodsail = BloodsailRaider()
bloodsail.player = game.players[0]
bloodsail.use(game.players[0], game) # Should gain charge first, then 4 attack from weapon
self.assertEqual(5, game.players[0].minions[0].calculate_attack())
self.assertTrue(game.players[0].minions[0].charge())
# TODO: Test with Faceless Manipulator here
# Remove the Warsong Commander
game.players[0].minions[-1].die(None)
game.check_delayed()
# The previous charged minions should still have charge
self.assertTrue(game.players[0].minions[0].charge())
self.assertTrue(game.players[0].minions[-1].charge())
# Test so that a minion played before Warsong doesn't get charge
shield = Shieldbearer()
shield.summon(game.players[0], game, 0)
self.assertFalse(game.players[0].minions[0].charge())
commander.use(game.players[0], game)
self.assertFalse(game.players[0].minions[1].charge())
# Remove the Warsong again
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
# Buff a minion to above 3
game.players[0].minions[0].change_attack(5)
# Play Warsong, the buffed minion should not get charge
commander.use(game.players[0], game)
self.assertFalse(game.players[0].minions[1].charge())
# Auras!
stormwind = StormwindChampion()
stormwind.player = game.players[0]
stormwind.use(game.players[0], game)
self.assertEqual(3, game.players[0].minions[1].calculate_attack())
self.assertEqual(4, game.players[0].minions[1].health)
# Kill the worgen
game.players[0].minions[-1].die(None)
game.players[0].minions[-1].activate_delayed()
# And play it again. It should get the aura FIRST, making it a 4/4 minion, and thus DOES NOT gain charge!
worgen.use(game.players[0], game)
self.assertFalse(game.players[0].minions[0].charge())
# Minions summoned by other minions
dragonling = DragonlingMechanic()
dragonling.player = game.players[0]
dragonling.use(game.players[0], game)
self.assertTrue(game.players[0].minions[0].charge())
self.assertEqual("Mechanical Dragonling", game.players[0].minions[1].card.name)
self.assertTrue(game.players[0].minions[1].charge())
# Kill them to make room
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
creeper = HauntedCreeper()
creeper.player = game.players[0]
creeper.use(game.players[0], game)
self.assertTrue(game.players[0].minions[0].charge())
game.players[0].minions[0].die(None)
game.players[0].minions[0].activate_delayed()
game.check_delayed()
self.assertEqual("Spectral Spider", game.players[0].minions[0].card.name)
self.assertTrue(game.players[0].minions[0].charge())
def test_BattleRage(self):
game = generate_game_for(BattleRage, StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.players[0].mana = 100
shield = Shieldbearer()
shield.player = game.players[0]
shield.use(game.players[0], game)
shield.use(game.players[0], game)
shield.use(game.players[0], game)
game.players[0].minions[0].damage(1, None)
game.players[0].minions[1].damage(1, None)
game.players[0].hero.damage(1, None)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(4, len(game.players[0].hand))
# Battle Rage should be played, 3 damaged characters = 3 cards drawn
game.play_single_turn()
self.assertEqual(7, len(game.players[0].hand))
def test_Brawl(self):
game = generate_game_for(Brawl, StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.players[0].mana = 100
shield = Shieldbearer()
shield.player = game.players[0]
shield.use(game.players[0], game)
shield.use(game.players[0], game)
golem = HarvestGolem()
golem.player = game.players[0]
golem.use(game.players[0], game)
shield.use(game.players[1], game)
shield.use(game.players[1], game)
shield.use(game.players[1], game)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
# Brawl should be played, leaving one minion behind and Damaged Golem should have spawned for first player
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Damaged Golem", game.players[0].minions[0].card.name)
self.assertEqual(1, len(game.players[1].minions))
def test_Charge(self):
game = generate_game_for([Shieldbearer, Charge], StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.players[0].agent.play_on = 4
for turn in range(0, 6):
game.play_single_turn()
# Shieldbearer and Charge should be played
game.play_single_turn()
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertTrue(game.players[0].minions[0].charge())
def test_Cleave(self):
game = generate_game_for(Cleave, SenjinShieldmasta, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(8, len(game.players[0].hand))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(5, game.players[1].minions[0].health)
self.assertEqual(5, game.players[1].minions[1].health)
# 2 enemy minions are now in play, so Cleave should be played
game.play_single_turn()
self.assertEqual(8, len(game.players[0].hand))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(3, game.players[1].minions[0].health)
self.assertEqual(3, game.players[1].minions[1].health)
def test_WhirlwindExecute(self):
game = generate_game_for(Execute, [GoldshireFootman, Whirlwind], CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
def test_HeroicStrike(self):
game = generate_game_for(HeroicStrike, StonetuskBoar, PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(26, game.players[1].hero.health)
def test_InnerRageRampage(self):
game = generate_game_for([InnerRage, Rampage], GoldshireFootman, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(3, game.players[1].minions[1].calculate_attack())
self.assertEqual(1, game.players[1].minions[1].health)
game.play_single_turn()
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(6, game.players[1].minions[1].calculate_attack())
self.assertEqual(4, game.players[1].minions[1].health)
def test_ShieldBlockShieldSlam(self):
game = generate_game_for([ShieldBlock, ShieldSlam], Doomsayer, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(6, len(game.players[0].hand))
self.assertEqual(5, game.players[0].hero.armor)
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(7, game.players[1].minions[0].health)
game.play_single_turn()
self.assertEqual(5, game.players[0].hero.armor)
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[1].minions[0].health)
def test_Slam(self):
game = generate_game_for(Slam, [GoldshireFootman, Doomsayer], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn() # Slam to kill Footman, no draw
self.assertEqual(4, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
game.play_single_turn() # Slam and Doomsayer survives
self.assertEqual(5, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
def test_Upgrade(self):
game = generate_game_for(Upgrade, StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, game.players[0].weapon.base_attack)
self.assertEqual(3, game.players[0].weapon.durability)
game.play_single_turn()
self.assertEqual(2, game.players[0].weapon.base_attack)
self.assertEqual(4, game.players[0].weapon.durability)
def test_MortalStrike(self):
game = generate_game_for(MortalStrike, StonetuskBoar, SelfSpellTestingAgent, DoNothingAgent)
game.players[0].hero.health = 14
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(10, game.players[0].hero.health)
game.play_single_turn()
self.assertEqual(4, game.players[0].hero.health)
def test_CommandingShout(self):
game = generate_game_for([StonetuskBoar, StonetuskBoar, StonetuskBoar, StonetuskBoar,
CommandingShout], UnstableGhoul,
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Unstable Ghoul", game.current_player.minions[0].card.name)
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(0, len(game.other_player.minions))
game.play_single_turn()
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(0, len(game.current_player.minions))
def test_Gorehowl(self):
game = generate_game_for([Gorehowl, Deathwing], [BoulderfistOgre, Deathwing],
PlayAndAttackAgent, CardTestingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(7, game.players[1].minions[0].health)
game.play_single_turn()
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(1, game.players[0].weapon.durability) # Gorehowl does not break from killing Boulderfist
self.assertEqual(6, game.players[0].weapon.base_attack) # But it does lose 1 attack
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertIsNone(game.players[0].weapon) # Attacks face and weapon breaks
self.assertEqual(24, game.players[0].hero.health)
self.assertEqual(24, game.players[1].hero.health)
def test_FieryWarAxe(self):
game = generate_game_for(FieryWarAxe, BoulderfistOgre,
PlayAndAttackAgent, DoNothingAgent)
for turn in range(0, 3):
game.play_single_turn()
self.assertEqual(1, game.current_player.weapon.durability)
self.assertEqual(3, game.current_player.weapon.base_attack)
self.assertEqual(27, game.other_player.hero.health)
def test_DeathsBite(self):
game = generate_game_for([IronfurGrizzly, DeathsBite], Deathlord,
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertIsNotNone(game.current_player.weapon)
self.assertEqual(1, game.other_player.minions[0].health)
game.play_single_turn()
game.play_single_turn()
# The Death's Bite attacks the new Deathlord, triggering the weapon's deathrattle
# This finishes off the other deathlord and the first friendly Grizzly
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(2, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[1].health)
def test_Warbot(self):
game = generate_game_for(Warbot, StonetuskBoar, CardTestingAgent, PlayAndAttackAgent)
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(2, game.other_player.minions[0].calculate_attack())
def test_BouncingBlades(self):
game = generate_game_for([GoldshireFootman, EchoingOoze, BouncingBlade], [GoldshireFootman, EchoingOoze],
CardTestingAgent, CardTestingAgent)
for turn in range(4):
game.play_single_turn()
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual(2, game.players[0].minions[1].health)
self.assertEqual(2, game.players[0].minions[2].health)
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(2, game.players[1].minions[2].health)
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(3, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].health)
self.assertEqual(1, game.players[0].minions[1].health)
self.assertEqual(2, game.players[1].minions[0].health)
self.assertEqual(2, game.players[1].minions[1].health)
self.assertEqual(1, game.players[1].minions[2].health)
def test_OgreWarmaul(self):
game = generate_game_for(OgreWarmaul,
[StonetuskBoar, GoldshireFootman, SilverbackPatriarch],
PlayAndAttackAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
self.assertIsNotNone(game.current_player.weapon)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Stonetusk Boar", game.other_player.minions[0].card.name)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual("Silverback Patriarch", game.other_player.minions[0].card.name)
self.assertEqual(30, game.other_player.hero.health)
def test_SiegeEngine(self):
game = generate_game_for(SiegeEngine, StonetuskBoar, PredictableAgent, DoNothingAgent)
# Arathi Weaponsmith should be played
for turn in range(0, 13):
game.play_single_turn()
self.assertEqual(12, game.players[0].hero.armor)
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].health)
self.assertEqual("Siege Engine", game.players[0].minions[0].card.name)
# Hero Power will be used, triggering the Siege Engine
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(14, game.players[0].hero.armor)
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(6, game.players[0].minions[-1].calculate_attack())
self.assertEqual(5, game.players[0].minions[-1].health)
def test_Crush(self):
game = generate_game_for([Crush, ChillwindYeti], DreadInfernal, EnemyMinionSpellTestingAgent,
CardTestingAgent)
# Player 2 plays a Dread Infernal
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
# Player 1 pays 7 mana to use Crush
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(0, len(game.players[1].minions))
self.assertEqual(0, game.players[0].mana)
# Player 2 plays another Dread Infernal
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
# Player 1 plays Yeti, can't afford Crush
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
# Player 2 plays another Dread Infernal, damaging the Yeti
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(2, len(game.players[1].minions))
self.assertEqual(4, game.players[0].minions[-1].health)
# Player 1 pays 3 mana to use Crush and 4 the play a 2nd Yeti
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[0].mana)
def test_BurrowingMine(self):
game = generate_game_for(BurrowingMine, StonetuskBoar, DoNothingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(0, game.current_player.hero.health)
self.assertEqual(3, len(game.current_player.hand))
self.assertEqual(0, game.current_player.deck.left)
def test_IronJuggernaut(self):
game = generate_game_for(IronJuggernaut, CircleOfHealing, OneCardPlayingAgent, PredictableAgent)
for turn in range(11):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual("Iron Juggernaut", game.players[0].minions[0].card.name)
found_mine = False
for card in game.players[1].deck.cards:
if card.name == "Burrowing Mine":
found_mine = True
self.assertTrue(found_mine, "Did not find the burrowing mine in the opponent's deck")
# Will draw multiple mines in a row
self.assertEqual(30, game.players[1].hero.health)
for turn in range(43):
game.play_single_turn()
self.assertEqual(0, game.players[1].hero.health)
def test_ScrewjankClunker(self):
game = generate_game_for([Wisp, ScrewjankClunker, ScrewjankClunker], [Wisp, MoltenGiant],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(8):
game.play_single_turn()
# Clunker cannot buff anything
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual(1, game.players[0].minions[1].calculate_attack())
self.assertEqual(1, game.players[0].minions[1].health)
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(1, game.players[1].minions[0].health)
game.play_single_turn()
# Clunker buffs previous Clunker
self.assertEqual(3, len(game.players[0].minions))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(2, game.players[0].minions[0].calculate_attack())
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual(4, game.players[0].minions[1].calculate_attack())
self.assertEqual(7, game.players[0].minions[1].health)
self.assertEqual(1, game.players[0].minions[2].calculate_attack())
self.assertEqual(1, game.players[0].minions[2].health)
self.assertEqual(1, game.players[1].minions[0].calculate_attack())
self.assertEqual(1, game.players[1].minions[0].health)
def test_AxeFlinger(self):
game = generate_game_for(AxeFlinger, [MortalCoil, ShadowWordPain], OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(7):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn() # Mortal Coils the Axe Flinger
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(28, game.players[1].hero.health)
game.play_single_turn() # Plays 2nd Axe Flinger
game.play_single_turn() # Pains 1 Axe Flinger, no damage
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(28, game.players[1].hero.health)
def test_Revenge(self):
game = generate_game_for(SinisterStrike, [ChillwindYeti, Revenge, Revenge],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(11):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(4, game.players[1].minions[0].health) # 1st Revenge cast at 15 hp, so 1 damage
self.assertEqual(12, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health) # 2nd Revenge cast at 12 hp, so 3 damage
self.assertEqual(12, game.players[1].hero.health)
def test_AlextraszasChampion(self):
game = generate_game_for([AlexstraszasChampion, AlexstraszasChampion, AlexstraszasChampion,
AlexstraszasChampion, AlexstraszasChampion, Nefarian], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(3):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(2, game.current_player.minions[0].calculate_attack())
self.assertFalse(game.current_player.minions[0].charge())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertTrue(game.current_player.minions[0].charge())
| {
"content_hash": "8397284ba3972dc787ed5ff1fe1c704a",
"timestamp": "",
"source": "github",
"line_count": 722,
"max_line_length": 116,
"avg_line_length": 42.945983379501385,
"alnum_prop": 0.6532718418421647,
"repo_name": "jomyhuang/sdwle",
"id": "da58ebe5bfe74762910b4bd471e9de21beac9905",
"size": "31007",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "testsHB/card_tests/warrior_tests.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3162614"
}
],
"symlink_target": ""
} |
"""
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import re
import warnings
import numpy as np
from numpy.testing import assert_allclose
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from scipy.special import expit
import pytest
from sklearn import datasets
from sklearn.base import clone
from sklearn.datasets import make_classification, make_regression
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble._gradient_boosting import predict_stages
from sklearn.preprocessing import scale
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
from sklearn.utils import check_random_state, tosequence
from sklearn.utils._mocking import NoSampleWeightWrapper
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import skip_if_32bit
from sklearn.exceptions import DataConversionWarning
from sklearn.exceptions import NotFittedError
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from sklearn.svm import NuSVR
GRADIENT_BOOSTING_ESTIMATORS = [GradientBoostingClassifier, GradientBoostingRegressor]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also make regression dataset
X_reg, y_reg = make_regression(
n_samples=100, n_features=4, n_informative=8, noise=10, random_state=7
)
y_reg = scale(y_reg)
rng = np.random.RandomState(0)
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
@pytest.mark.parametrize("loss", ("log_loss", "exponential"))
def test_classification_toy(loss, global_random_seed):
# Check classification on a toy dataset.
clf = GradientBoostingClassifier(
loss=loss, n_estimators=10, random_state=global_random_seed
)
with pytest.raises(ValueError):
clf.predict(T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 10 == len(clf.estimators_)
log_loss_decrease = clf.train_score_[:-1] - clf.train_score_[1:]
assert np.any(log_loss_decrease >= 0.0)
leaves = clf.apply(X)
assert leaves.shape == (6, 10, 1)
@pytest.mark.parametrize("loss", ("log_loss", "exponential"))
def test_classification_synthetic(loss, global_random_seed):
# Test GradientBoostingClassifier on synthetic dataset used by
# Hastie et al. in ESLII - Figure 10.9
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=global_random_seed)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
# Increasing the number of trees should decrease the test error
common_params = {
"max_depth": 1,
"learning_rate": 1.0,
"loss": loss,
"random_state": global_random_seed,
}
gbrt_100_stumps = GradientBoostingClassifier(n_estimators=100, **common_params)
gbrt_100_stumps.fit(X_train, y_train)
gbrt_200_stumps = GradientBoostingClassifier(n_estimators=200, **common_params)
gbrt_200_stumps.fit(X_train, y_train)
assert gbrt_100_stumps.score(X_test, y_test) < gbrt_200_stumps.score(X_test, y_test)
# Decision stumps are better suited for this dataset with a large number of
# estimators.
common_params = {
"n_estimators": 200,
"learning_rate": 1.0,
"loss": loss,
"random_state": global_random_seed,
}
gbrt_stumps = GradientBoostingClassifier(max_depth=1, **common_params)
gbrt_stumps.fit(X_train, y_train)
gbrt_10_nodes = GradientBoostingClassifier(max_leaf_nodes=10, **common_params)
gbrt_10_nodes.fit(X_train, y_train)
assert gbrt_stumps.score(X_test, y_test) > gbrt_10_nodes.score(X_test, y_test)
@pytest.mark.parametrize("loss", ("squared_error", "absolute_error", "huber"))
@pytest.mark.parametrize("subsample", (1.0, 0.5))
def test_regression_dataset(loss, subsample, global_random_seed):
# Check consistency on regression dataset with least squares
# and least absolute deviation.
ones = np.ones(len(y_reg))
last_y_pred = None
for sample_weight in [None, ones, 2 * ones]:
# learning_rate, max_depth and n_estimators were adjusted to get a mode
# that is accurate enough to reach a low MSE on the training set while
# keeping the resource used to execute this test low enough.
reg = GradientBoostingRegressor(
n_estimators=30,
loss=loss,
max_depth=4,
subsample=subsample,
min_samples_split=2,
random_state=global_random_seed,
learning_rate=0.5,
)
reg.fit(X_reg, y_reg, sample_weight=sample_weight)
leaves = reg.apply(X_reg)
assert leaves.shape == (100, 30)
y_pred = reg.predict(X_reg)
mse = mean_squared_error(y_reg, y_pred)
assert mse < 0.05
if last_y_pred is not None:
# FIXME: We temporarily bypass this test. This is due to the fact
# that GBRT with and without `sample_weight` do not use the same
# implementation of the median during the initialization with the
# `DummyRegressor`. In the future, we should make sure that both
# implementations should be the same. See PR #17377 for more.
# assert_allclose(last_y_pred, y_pred)
pass
last_y_pred = y_pred
@pytest.mark.parametrize("subsample", (1.0, 0.5))
@pytest.mark.parametrize("sample_weight", (None, 1))
def test_iris(subsample, sample_weight, global_random_seed):
if sample_weight == 1:
sample_weight = np.ones(len(iris.target))
# Check consistency on dataset iris.
clf = GradientBoostingClassifier(
n_estimators=100,
loss="log_loss",
random_state=global_random_seed,
subsample=subsample,
)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9
leaves = clf.apply(iris.data)
assert leaves.shape == (150, 100, 3)
def test_regression_synthetic(global_random_seed):
# Test on synthetic regression datasets used in Leo Breiman,
# `Bagging Predictors?. Machine Learning 24(2): 123-140 (1996).
random_state = check_random_state(global_random_seed)
regression_params = {
"n_estimators": 100,
"max_depth": 4,
"min_samples_split": 2,
"learning_rate": 0.1,
"loss": "squared_error",
"random_state": global_random_seed,
}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200, random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 6.5
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 2500.0
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.025
@pytest.mark.parametrize(
"GradientBoosting, X, y",
[
(GradientBoostingRegressor, X_reg, y_reg),
(GradientBoostingClassifier, iris.data, iris.target),
],
)
def test_feature_importances(GradientBoosting, X, y):
# smoke test to check that the gradient boosting expose an attribute
# feature_importances_
gbdt = GradientBoosting()
assert not hasattr(gbdt, "feature_importances_")
gbdt.fit(X, y)
assert hasattr(gbdt, "feature_importances_")
def test_probability_log(global_random_seed):
# Predict probabilities.
clf = GradientBoostingClassifier(n_estimators=100, random_state=global_random_seed)
with pytest.raises(ValueError):
clf.predict_proba(T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_single_class_with_sample_weight():
sample_weight = [0, 0, 0, 1, 1, 1]
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
msg = (
"y contains 1 class after sample_weight trimmed classes with "
"zero weights, while a minimum of 2 classes are required."
)
with pytest.raises(ValueError, match=msg):
clf.fit(X, y, sample_weight=sample_weight)
def test_check_inputs_predict_stages():
# check that predict_stages through an error if the type of X is not
# supported
x, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
x_sparse_csc = csc_matrix(x)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(x, y)
score = np.zeros((y.shape)).reshape(-1, 1)
err_msg = "When X is a sparse matrix, a CSR format is expected"
with pytest.raises(ValueError, match=err_msg):
predict_stages(clf.estimators_, x_sparse_csc, clf.learning_rate, score)
x_fortran = np.asfortranarray(x)
with pytest.raises(ValueError, match="X should be C-ordered np.ndarray"):
predict_stages(clf.estimators_, x_fortran, clf.learning_rate, score)
def test_max_feature_regression(global_random_seed):
# Test to make sure random state is set properly.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=global_random_seed)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(
n_estimators=100,
min_samples_split=5,
max_depth=2,
learning_rate=0.1,
max_features=2,
random_state=global_random_seed,
)
gbrt.fit(X_train, y_train)
log_loss = gbrt._loss(y_test, gbrt.decision_function(X_test))
assert log_loss < 0.5, "GB failed with deviance %.4f" % log_loss
def test_feature_importance_regression(
fetch_california_housing_fxt, global_random_seed
):
"""Test that Gini importance is calculated correctly.
This test follows the example from [1]_ (pg. 373).
.. [1] Friedman, J., Hastie, T., & Tibshirani, R. (2001). The elements
of statistical learning. New York: Springer series in statistics.
"""
california = fetch_california_housing_fxt()
X, y = california.data, california.target
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=global_random_seed
)
reg = GradientBoostingRegressor(
loss="huber",
learning_rate=0.1,
max_leaf_nodes=6,
n_estimators=100,
random_state=global_random_seed,
)
reg.fit(X_train, y_train)
sorted_idx = np.argsort(reg.feature_importances_)[::-1]
sorted_features = [california.feature_names[s] for s in sorted_idx]
# The most important feature is the median income by far.
assert sorted_features[0] == "MedInc"
# The three subsequent features are the following. Their relative ordering
# might change a bit depending on the randomness of the trees and the
# train / test split.
assert set(sorted_features[1:4]) == {"Longitude", "AveOccup", "Latitude"}
# TODO(1.3): Remove warning filter
@pytest.mark.filterwarnings("ignore:`max_features='auto'` has been deprecated in 1.1")
def test_max_feature_auto():
# Test if max features is set properly for floats and str.
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features="auto")
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.sqrt(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features="auto")
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == n_features
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(n_features * 0.3)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features="sqrt")
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.sqrt(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features="log2")
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == int(np.log2(n_features))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.01 / X.shape[1])
gbrt.fit(X_train, y_train)
assert gbrt.max_features_ == 1
def test_staged_predict():
# Test whether staged decision function eventually gives
# the same prediction.
X, y = datasets.make_friedman1(n_samples=1200, random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
with pytest.raises(ValueError):
np.fromiter(clf.staged_predict(X_test), dtype=np.float64)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert y.shape == y_pred.shape
assert_array_almost_equal(y_pred, y)
def test_staged_predict_proba():
# Test whether staged predict proba eventually gives
# the same prediction.
X, y = datasets.make_hastie_10_2(n_samples=1200, random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not
with pytest.raises(NotFittedError):
np.fromiter(clf.staged_predict_proba(X_test), dtype=np.float64)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert y_test.shape == y_pred.shape
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert y_test.shape[0] == staged_proba.shape[0]
assert 2 == staged_proba.shape[1]
assert_array_almost_equal(clf.predict_proba(X_test), staged_proba)
@pytest.mark.parametrize("Estimator", GRADIENT_BOOSTING_ESTIMATORS)
def test_staged_functions_defensive(Estimator, global_random_seed):
# test that staged_functions make defensive copies
rng = np.random.RandomState(global_random_seed)
X = rng.uniform(size=(10, 3))
y = (4 * X[:, 0]).astype(int) + 1 # don't predict zeros
estimator = Estimator()
estimator.fit(X, y)
for func in ["predict", "decision_function", "predict_proba"]:
staged_func = getattr(estimator, "staged_" + func, None)
if staged_func is None:
# regressor has no staged_predict_proba
continue
with warnings.catch_warnings(record=True):
staged_result = list(staged_func(X))
staged_result[1][:] = 0
assert np.all(staged_result[0] != 0)
def test_serialization():
# Check model serialization.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_degenerate_targets():
# Check if we can fit even though all targets are equal.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
with pytest.raises(ValueError):
clf.fit(X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict([rng.rand(2)])
assert_array_equal(np.ones((1,), dtype=np.float64), clf.predict([rng.rand(2)]))
def test_quantile_loss(global_random_seed):
# Check if quantile loss with alpha=0.5 equals absolute_error.
clf_quantile = GradientBoostingRegressor(
n_estimators=100,
loss="quantile",
max_depth=4,
alpha=0.5,
random_state=global_random_seed,
)
clf_quantile.fit(X_reg, y_reg)
y_quantile = clf_quantile.predict(X_reg)
clf_ae = GradientBoostingRegressor(
n_estimators=100,
loss="absolute_error",
max_depth=4,
random_state=global_random_seed,
)
clf_ae.fit(X_reg, y_reg)
y_ae = clf_ae.predict(X_reg)
assert_allclose(y_quantile, y_ae)
def test_symbol_labels():
# Test with non-integer class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert 100 == len(clf.estimators_)
def test_float_class_labels():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T), np.asarray(true_result, dtype=np.float32))
assert 100 == len(clf.estimators_)
def test_shape_y():
# Test with float class labels.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
warn_msg = (
"A column-vector y was passed when a 1d array was expected. "
"Please change the shape of y to \\(n_samples, \\), for "
"example using ravel()."
)
with pytest.warns(DataConversionWarning, match=warn_msg):
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_mem_layout():
# Test with different memory layouts of X and y
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert 100 == len(clf.estimators_)
def test_oob_improvement():
# Test if oob improvement has correct shape and regression test.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1, subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(
clf.oob_improvement_[:5], np.array([0.19, 0.15, 0.12, -0.12, -0.11]), decimal=2
)
def test_oob_improvement_raise():
# Test if oob improvement has correct shape.
clf = GradientBoostingClassifier(n_estimators=100, random_state=1, subsample=1.0)
clf.fit(X, y)
with pytest.raises(AttributeError):
clf.oob_improvement_
def test_oob_multilcass_iris():
# Check OOB improvement on multi-class dataset.
clf = GradientBoostingClassifier(
n_estimators=100, loss="log_loss", random_state=1, subsample=0.5
)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
# Check verbose=1 does not cause error.
from io import StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(
n_estimators=100, random_state=1, verbose=1, subsample=0.8
)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = " ".join(["%10s"] + ["%16s"] * 3) % (
"Iter",
"Train Loss",
"OOB Improve",
"Remaining Time",
)
assert true_header == header
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert 10 + 9 == n_lines
def test_more_verbose_output():
# Check verbose=2 does not cause error.
from io import StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1, verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = " ".join(["%10s"] + ["%16s"] * 2) % (
"Iter",
"Train Loss",
"Remaining Time",
)
assert true_header == header
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert 100 == n_lines
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start(Cls, global_random_seed):
# Test if warm start equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
est = Cls(n_estimators=200, max_depth=1, random_state=global_random_seed)
est.fit(X, y)
est_ws = Cls(
n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed
)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
if Cls is GradientBoostingRegressor:
assert_allclose(est_ws.predict(X), est.predict(X))
else:
# Random state is preserved and hence predict_proba must also be
# same
assert_array_equal(est_ws.predict(X), est.predict(X))
assert_allclose(est_ws.predict_proba(X), est.predict_proba(X))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_n_estimators(Cls, global_random_seed):
# Test if warm start equals fit - set n_estimators.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
est = Cls(n_estimators=300, max_depth=1, random_state=global_random_seed)
est.fit(X, y)
est_ws = Cls(
n_estimators=100, max_depth=1, warm_start=True, random_state=global_random_seed
)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_allclose(est_ws.predict(X), est.predict(X))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_max_depth(Cls):
# Test if possible to fit trees of different depth in ensemble.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_clear(Cls):
# Test if fit clears state.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_smaller_n_estimators(Cls):
# Test if warm start with smaller n_estimators raises error
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
with pytest.raises(ValueError):
est.fit(X, y)
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_equal_n_estimators(Cls):
# Test if warm start with equal n_estimators does nothing
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_oob_switch(Cls):
# Test if oob can be turned on during warm start.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0, np.zeros(10, dtype=bool))
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_oob(Cls):
# Test if warm start OOB equals fit.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=200, max_depth=1, subsample=0.5, random_state=1)
est.fit(X, y)
est_ws = Cls(
n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True
)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100], est.oob_improvement_[:100])
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_sparse(Cls):
# Test that all sparse matrix types are supported
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
sparse_matrix_type = [csr_matrix, csc_matrix, coo_matrix]
est_dense = Cls(
n_estimators=100, max_depth=1, subsample=0.5, random_state=1, warm_start=True
)
est_dense.fit(X, y)
est_dense.predict(X)
est_dense.set_params(n_estimators=200)
est_dense.fit(X, y)
y_pred_dense = est_dense.predict(X)
for sparse_constructor in sparse_matrix_type:
X_sparse = sparse_constructor(X)
est_sparse = Cls(
n_estimators=100,
max_depth=1,
subsample=0.5,
random_state=1,
warm_start=True,
)
est_sparse.fit(X_sparse, y)
est_sparse.predict(X)
est_sparse.set_params(n_estimators=200)
est_sparse.fit(X_sparse, y)
y_pred_sparse = est_sparse.predict(X)
assert_array_almost_equal(
est_dense.oob_improvement_[:100], est_sparse.oob_improvement_[:100]
)
assert_array_almost_equal(y_pred_dense, y_pred_sparse)
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_warm_start_fortran(Cls, global_random_seed):
# Test that feeding a X in Fortran-ordered is giving the same results as
# in C-ordered
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=global_random_seed)
est_c = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True)
est_fortran = Cls(n_estimators=1, random_state=global_random_seed, warm_start=True)
est_c.fit(X, y)
est_c.set_params(n_estimators=11)
est_c.fit(X, y)
X_fortran = np.asfortranarray(X)
est_fortran.fit(X_fortran, y)
est_fortran.set_params(n_estimators=11)
est_fortran.fit(X_fortran, y)
assert_allclose(est_c.predict(X), est_fortran.predict(X))
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration."""
if i == 9:
return True
else:
return False
@pytest.mark.parametrize("Cls", GRADIENT_BOOSTING_ESTIMATORS)
def test_monitor_early_stopping(Cls):
# Test if monitor return value works.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert est.n_estimators == 20 # this is not altered
assert est.estimators_.shape[0] == 10
assert est.train_score_.shape[0] == 10
assert est.oob_improvement_.shape[0] == 10
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert est.n_estimators == 30
assert est.estimators_.shape[0] == 30
assert est.train_score_.shape[0] == 30
est = Cls(
n_estimators=20, max_depth=1, random_state=1, subsample=0.5, warm_start=True
)
est.fit(X, y, monitor=early_stopping_monitor)
assert est.n_estimators == 20
assert est.estimators_.shape[0] == 10
assert est.train_score_.shape[0] == 10
assert est.oob_improvement_.shape[0] == 10
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert est.n_estimators == 30
assert est.train_score_.shape[0] == 30
assert est.estimators_.shape[0] == 30
assert est.oob_improvement_.shape[0] == 30
def test_complete_classification():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(
n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1
)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert tree.max_depth == k
assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1
def test_complete_regression():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(
n_estimators=20, max_depth=None, random_state=1, max_leaf_nodes=k + 1
)
est.fit(X_reg, y_reg)
tree = est.estimators_[-1, 0].tree_
assert tree.children_left[tree.children_left == TREE_LEAF].shape[0] == k + 1
def test_zero_estimator_reg(global_random_seed):
# Test if init='zero' works for regression by checking that it is better
# than a simple baseline.
baseline = DummyRegressor(strategy="mean").fit(X_reg, y_reg)
mse_baseline = mean_squared_error(baseline.predict(X_reg), y_reg)
est = GradientBoostingRegressor(
n_estimators=5,
max_depth=1,
random_state=global_random_seed,
init="zero",
learning_rate=0.5,
)
est.fit(X_reg, y_reg)
y_pred = est.predict(X_reg)
mse_gbdt = mean_squared_error(y_reg, y_pred)
assert mse_gbdt < mse_baseline
def test_zero_estimator_clf(global_random_seed):
# Test if init='zero' works for classification.
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(
n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero"
)
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(
n_estimators=20, max_depth=1, random_state=global_random_seed, init="zero"
)
est.fit(X, y)
assert est.score(X, y) > 0.96
@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS)
def test_max_leaf_nodes_max_depth(GBEstimator):
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert tree.max_depth == 1
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert tree.max_depth == 1
@pytest.mark.parametrize("GBEstimator", GRADIENT_BOOSTING_ESTIMATORS)
def test_min_impurity_decrease(GBEstimator):
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
est = GBEstimator(min_impurity_decrease=0.1)
est.fit(X, y)
for tree in est.estimators_.flat:
# Simply check if the parameter is passed on correctly. Tree tests
# will suffice for the actual working of this param
assert tree.min_impurity_decrease == 0.1
def test_warm_start_wo_nestimators_change():
# Test if warm_start does nothing if n_estimators is not changed.
# Regression test for #3513.
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential(global_random_seed):
# Predict probabilities.
clf = GradientBoostingClassifier(
loss="exponential", n_estimators=100, random_state=global_random_seed
)
with pytest.raises(ValueError):
clf.predict_proba(T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_allclose(y_proba[:, 1], expit(2 * score))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ("huber", "squared_error", "absolute_error", "quantile"):
gb = GradientBoostingRegressor(learning_rate=1.0, n_estimators=2, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert gb.predict([[1, 0]])[0] > 0.5
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0], [1, 0], [1, 0], [0, 1]]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ("log_loss", "exponential"):
gb = GradientBoostingClassifier(n_estimators=5, loss=loss)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
@skip_if_32bit
@pytest.mark.parametrize(
"EstimatorClass", (GradientBoostingClassifier, GradientBoostingRegressor)
)
@pytest.mark.parametrize("sparse_matrix", (csr_matrix, csc_matrix, coo_matrix))
def test_sparse_input(EstimatorClass, sparse_matrix):
y, X = datasets.make_multilabel_classification(
random_state=0, n_samples=50, n_features=1, n_classes=20
)
y = y[:, 0]
X_sparse = sparse_matrix(X)
dense = EstimatorClass(
n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7
).fit(X, y)
sparse = EstimatorClass(
n_estimators=10, random_state=0, max_depth=2, min_impurity_decrease=1e-7
).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_, dense.feature_importances_)
assert_array_almost_equal(sparse.predict(X_sparse), dense.predict(X))
assert_array_almost_equal(dense.predict(X_sparse), sparse.predict(X))
if issubclass(EstimatorClass, GradientBoostingClassifier):
assert_array_almost_equal(sparse.predict_proba(X), dense.predict_proba(X))
assert_array_almost_equal(
sparse.predict_log_proba(X), dense.predict_log_proba(X)
)
assert_array_almost_equal(
sparse.decision_function(X_sparse), sparse.decision_function(X)
)
assert_array_almost_equal(
dense.decision_function(X_sparse), sparse.decision_function(X)
)
for res_sparse, res in zip(
sparse.staged_decision_function(X_sparse),
sparse.staged_decision_function(X),
):
assert_array_almost_equal(res_sparse, res)
def test_gradient_boosting_early_stopping():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(
n_estimators=1000,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
gbr = GradientBoostingRegressor(
n_estimators=1000,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if early_stopping works as expected
for est, tol, early_stop_n_estimators in (
(gbc, 1e-1, 28),
(gbr, 1e-1, 13),
(gbc, 1e-3, 70),
(gbr, 1e-3, 28),
):
est.set_params(tol=tol)
est.fit(X_train, y_train)
assert est.n_estimators_ == early_stop_n_estimators
assert est.score(X_test, y_test) > 0.7
# Without early stopping
gbc = GradientBoostingClassifier(
n_estimators=50, learning_rate=0.1, max_depth=3, random_state=42
)
gbc.fit(X, y)
gbr = GradientBoostingRegressor(
n_estimators=30, learning_rate=0.1, max_depth=3, random_state=42
)
gbr.fit(X, y)
assert gbc.n_estimators_ == 50
assert gbr.n_estimators_ == 30
def test_gradient_boosting_validation_fraction():
X, y = make_classification(n_samples=1000, random_state=0)
gbc = GradientBoostingClassifier(
n_estimators=100,
n_iter_no_change=10,
validation_fraction=0.1,
learning_rate=0.1,
max_depth=3,
random_state=42,
)
gbc2 = clone(gbc).set_params(validation_fraction=0.3)
gbc3 = clone(gbc).set_params(n_iter_no_change=20)
gbr = GradientBoostingRegressor(
n_estimators=100,
n_iter_no_change=10,
learning_rate=0.1,
max_depth=3,
validation_fraction=0.1,
random_state=42,
)
gbr2 = clone(gbr).set_params(validation_fraction=0.3)
gbr3 = clone(gbr).set_params(n_iter_no_change=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
# Check if validation_fraction has an effect
gbc.fit(X_train, y_train)
gbc2.fit(X_train, y_train)
assert gbc.n_estimators_ != gbc2.n_estimators_
gbr.fit(X_train, y_train)
gbr2.fit(X_train, y_train)
assert gbr.n_estimators_ != gbr2.n_estimators_
# Check if n_estimators_ increase monotonically with n_iter_no_change
# Set validation
gbc3.fit(X_train, y_train)
gbr3.fit(X_train, y_train)
assert gbr.n_estimators_ < gbr3.n_estimators_
assert gbc.n_estimators_ < gbc3.n_estimators_
def test_early_stopping_stratified():
# Make sure data splitting for early stopping is stratified
X = [[1, 2], [2, 3], [3, 4], [4, 5]]
y = [0, 0, 0, 1]
gbc = GradientBoostingClassifier(n_iter_no_change=5)
with pytest.raises(
ValueError, match="The least populated class in y has only 1 member"
):
gbc.fit(X, y)
def _make_multiclass():
return make_classification(n_classes=3, n_clusters_per_class=1)
@pytest.mark.parametrize(
"gb, dataset_maker, init_estimator",
[
(GradientBoostingClassifier, make_classification, DummyClassifier),
(GradientBoostingClassifier, _make_multiclass, DummyClassifier),
(GradientBoostingRegressor, make_regression, DummyRegressor),
],
ids=["binary classification", "multiclass classification", "regression"],
)
def test_gradient_boosting_with_init(
gb, dataset_maker, init_estimator, global_random_seed
):
# Check that GradientBoostingRegressor works when init is a sklearn
# estimator.
# Check that an error is raised if trying to fit with sample weight but
# initial estimator does not support sample weight
X, y = dataset_maker()
sample_weight = np.random.RandomState(global_random_seed).rand(100)
# init supports sample weights
init_est = init_estimator()
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
# init does not support sample weights
init_est = NoSampleWeightWrapper(init_estimator())
gb(init=init_est).fit(X, y) # ok no sample weights
with pytest.raises(ValueError, match="estimator.*does not support sample weights"):
gb(init=init_est).fit(X, y, sample_weight=sample_weight)
def test_gradient_boosting_with_init_pipeline():
# Check that the init estimator can be a pipeline (see issue #13466)
X, y = make_regression(random_state=0)
init = make_pipeline(LinearRegression())
gb = GradientBoostingRegressor(init=init)
gb.fit(X, y) # pipeline without sample_weight works fine
with pytest.raises(
ValueError,
match="The initial estimator Pipeline does not support sample weights",
):
gb.fit(X, y, sample_weight=np.ones(X.shape[0]))
# Passing sample_weight to a pipeline raises a ValueError. This test makes
# sure we make the distinction between ValueError raised by a pipeline that
# was passed sample_weight, and a ValueError raised by a regular estimator
# whose input checking failed.
invalid_nu = 1.5
err_msg = (
"The 'nu' parameter of NuSVR must be a float in the"
f" range (0.0, 1.0]. Got {invalid_nu} instead."
)
with pytest.raises(ValueError, match=re.escape(err_msg)):
# Note that NuSVR properly supports sample_weight
init = NuSVR(gamma="auto", nu=invalid_nu)
gb = GradientBoostingRegressor(init=init)
gb.fit(X, y, sample_weight=np.ones(X.shape[0]))
def test_early_stopping_n_classes():
# when doing early stopping (_, , y_train, _ = train_test_split(X, y))
# there might be classes in y that are missing in y_train. As the init
# estimator will be trained on y_train, we need to raise an error if this
# happens.
X = [[1]] * 10
y = [0, 0] + [1] * 8 # only 2 negative class over 10 samples
gb = GradientBoostingClassifier(
n_iter_no_change=5, random_state=0, validation_fraction=0.8
)
with pytest.raises(
ValueError, match="The training data after the early stopping split"
):
gb.fit(X, y)
# No error if we let training data be big enough
gb = GradientBoostingClassifier(
n_iter_no_change=5, random_state=0, validation_fraction=0.4
)
def test_gbr_degenerate_feature_importances():
# growing an ensemble of single node trees. See #13620
X = np.zeros((10, 10))
y = np.ones((10,))
gbr = GradientBoostingRegressor().fit(X, y)
assert_array_equal(gbr.feature_importances_, np.zeros(10, dtype=np.float64))
# TODO(1.3): Remove
def test_loss_deprecated():
est1 = GradientBoostingClassifier(loss="deviance", random_state=0)
with pytest.warns(FutureWarning, match=r"The loss.* 'deviance' was deprecated"):
est1.fit(X, y)
est2 = GradientBoostingClassifier(loss="log_loss", random_state=0)
est2.fit(X, y)
assert_allclose(est1.predict(X), est2.predict(X))
# TODO(1.3): remove
@pytest.mark.parametrize(
"Estimator", [GradientBoostingClassifier, GradientBoostingRegressor]
)
def test_loss_attribute_deprecation(Estimator):
# Check that we raise the proper deprecation warning if accessing
# `loss_`.
X = np.array([[1, 2], [3, 4]])
y = np.array([1, 0])
est = Estimator().fit(X, y)
with pytest.warns(FutureWarning, match="`loss_` was deprecated"):
est.loss_
| {
"content_hash": "52b6c5de6c514f5c678be7d825c71a55",
"timestamp": "",
"source": "github",
"line_count": 1325,
"max_line_length": 88,
"avg_line_length": 34.16075471698113,
"alnum_prop": 0.6601197446037602,
"repo_name": "TomDLT/scikit-learn",
"id": "cbe56578e45c540ca64f6d565ffbd87ded564527",
"size": "45263",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "sklearn/ensemble/tests/test_gradient_boosting.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "42335"
},
{
"name": "C++",
"bytes": "147316"
},
{
"name": "Cython",
"bytes": "667491"
},
{
"name": "Makefile",
"bytes": "1644"
},
{
"name": "Python",
"bytes": "10429796"
},
{
"name": "Shell",
"bytes": "43325"
}
],
"symlink_target": ""
} |
class MufCompileError(Exception):
pass
class MufRuntimeError(Exception):
pass
class MufBreakExecution(Exception):
pass
class ReloadAsMuvException(Exception):
pass
# vim: expandtab tabstop=4 shiftwidth=4 softtabstop=4 nowrap
| {
"content_hash": "7afd290de1fe9df9b907b626e037b6aa",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 60,
"avg_line_length": 14.588235294117647,
"alnum_prop": 0.7580645161290323,
"repo_name": "revarbat/mufsim",
"id": "3827809dece471ba89b8cacda9a98861fd727180",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mufsim/errors.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Forth",
"bytes": "30297"
},
{
"name": "Makefile",
"bytes": "1027"
},
{
"name": "Python",
"bytes": "471958"
},
{
"name": "Shell",
"bytes": "606"
}
],
"symlink_target": ""
} |
"""This file is only retained for backwards compatibility.
It will be removed in the future. sre was moved to re in version 2.5.
"""
import warnings
warnings.warn("The sre module is deprecated, please import re.",
DeprecationWarning, 2)
from re import *
from re import __all__
# old pickles expect the _compile() reconstructor in this module
from re import _compile
| {
"content_hash": "2f385280170dffa6820f5befa972fbce",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 70,
"avg_line_length": 27.5,
"alnum_prop": 0.7246753246753247,
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"id": "14c484b9c098564bfd880aff3ad23dbf09add94f",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/src/virtualenv/Lib/sre.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "427445"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "280650"
},
{
"name": "D",
"bytes": "9679"
},
{
"name": "HTML",
"bytes": "37335"
},
{
"name": "Java",
"bytes": "740594"
},
{
"name": "JavaScript",
"bytes": "1801741"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "2631176"
},
{
"name": "Shell",
"bytes": "12283"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import datetime
from django.conf import settings
from django.http import HttpResponse
from django.test import TestCase
from django.utils import timezone
from mock import patch, MagicMock
from zerver.lib.test_helpers import MockLDAP
from confirmation.models import Confirmation
from zilencer.models import Deployment
from zerver.forms import HomepageForm, WRONG_SUBDOMAIN_ERROR
from zerver.lib.actions import do_change_password
from zerver.views.invite import get_invitee_emails_set
from zerver.views.registration import confirmation_key
from zerver.models import (
get_realm, get_prereg_user_by_email, get_user_profile_by_email,
get_unique_open_realm, completely_open,
PreregistrationUser, Realm, RealmAlias, Recipient,
Referral, ScheduledJob, UserProfile, UserMessage,
Stream, Subscription, ScheduledJob
)
from zerver.management.commands.deliver_email import send_email_job
from zerver.lib.actions import (
set_default_streams,
do_change_is_admin,
get_stream
)
from zerver.lib.initial_password import initial_password
from zerver.lib.actions import do_deactivate_realm, do_set_realm_default_language, \
add_new_user_history
from zerver.lib.digest import send_digest_email
from zerver.lib.notifications import (
enqueue_welcome_emails, one_click_unsubscribe_link, send_local_email_template_with_delay)
from zerver.lib.test_helpers import find_pattern_in_email, find_key_by_email, queries_captured, \
HostRequestMock
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zerver.lib.test_runner import slow
from zerver.lib.sessions import get_session_dict_user
from zerver.context_processors import common_context
import re
import ujson
from typing import Dict, List, Set, Optional
from six.moves import urllib
from six.moves import range
from typing import Any, Text
import os
class AddNewUserHistoryTest(ZulipTestCase):
def test_add_new_user_history_race(self):
# type: () -> None
"""Sends a message during user creation"""
# Create a user who hasn't had historical messages added
stream_dict = {
"Denmark": {"description": "A Scandinavian country", "invite_only": False},
"Verona": {"description": "A city in Italy", "invite_only": False}
} # type: Dict[Text, Dict[Text, Any]]
set_default_streams(get_realm("zulip"), stream_dict)
with patch("zerver.lib.actions.add_new_user_history"):
self.register("test@zulip.com", "test")
user_profile = get_user_profile_by_email("test@zulip.com")
subs = Subscription.objects.select_related("recipient").filter(
user_profile=user_profile, recipient__type=Recipient.STREAM)
streams = Stream.objects.filter(id__in=[sub.recipient.type_id for sub in subs])
self.send_message("hamlet@zulip.com", streams[0].name, Recipient.STREAM, "test")
add_new_user_history(user_profile, streams)
class PasswordResetTest(ZulipTestCase):
"""
Log in, reset password, log out, log in with new password.
"""
def test_password_reset(self):
# type: () -> None
email = 'hamlet@zulip.com'
old_password = initial_password(email)
self.login(email)
# test password reset template
result = self.client_get('/accounts/password/reset/')
self.assert_in_response('Reset your password.', result)
# start the password reset process by supplying an email address
result = self.client_post('/accounts/password/reset/', {'email': email})
# check the redirect link telling you to check mail for password reset link
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/password/reset/done/"))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email to finish the process.", result)
# Visit the password reset link.
password_reset_url = self.get_confirmation_url_from_outbox(email, "(\S+)")
result = self.client_get(password_reset_url)
self.assertEqual(result.status_code, 200)
# Reset your password
result = self.client_post(password_reset_url,
{'new_password1': 'new_password',
'new_password2': 'new_password'})
# password reset succeeded
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith("/password/done/"))
# log back in with new password
self.login(email, password='new_password')
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
# make sure old password no longer works
self.login(email, password=old_password, fails=True)
def test_redirect_endpoints(self):
# type: () -> None
'''
These tests are mostly designed to give us 100% URL coverage
in our URL coverage reports. Our mechanism for finding URL
coverage doesn't handle redirects, so we just have a few quick
tests here.
'''
result = self.client_get('/accounts/password/reset/done/')
self.assert_in_success_response(["Check your email"], result)
result = self.client_get('/accounts/password/done/')
self.assert_in_success_response(["We've reset your password!"], result)
result = self.client_get('/accounts/send_confirm/alice@example.com')
self.assert_in_success_response(["Still no email?"], result)
class LoginTest(ZulipTestCase):
"""
Logging in, registration, and logging out.
"""
def test_login(self):
# type: () -> None
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_bad_password(self):
# type: () -> None
self.login("hamlet@zulip.com", password="wrongpassword", fails=True)
self.assertIsNone(get_session_dict_user(self.client.session))
def test_login_nonexist_user(self):
# type: () -> None
result = self.login_with_return("xxx@zulip.com", "xxx")
self.assert_in_response("Please enter a correct email and password", result)
def test_register(self):
# type: () -> None
realm = get_realm("zulip")
stream_dict = {"stream_"+str(i): {"description": "stream_%s_description" % i, "invite_only": False}
for i in range(40)} # type: Dict[Text, Dict[Text, Any]]
for stream_name in stream_dict.keys():
self.make_stream(stream_name, realm=realm)
set_default_streams(realm, stream_dict)
with queries_captured() as queries:
self.register("test@zulip.com", "test")
# Ensure the number of queries we make is not O(streams)
self.assert_max_length(queries, 69)
user_profile = get_user_profile_by_email('test@zulip.com')
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.assertFalse(user_profile.enable_stream_desktop_notifications)
def test_register_deactivated(self):
# type: () -> None
"""
If you try to register for a deactivated realm, you get a clear error
page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.register("test@zulip.com", "test")
self.assert_in_response("has been deactivated", result)
with self.assertRaises(UserProfile.DoesNotExist):
get_user_profile_by_email('test@zulip.com')
def test_login_deactivated(self):
# type: () -> None
"""
If you try to log in to a deactivated realm, you get a clear error page.
"""
realm = get_realm("zulip")
realm.deactivated = True
realm.save(update_fields=["deactivated"])
result = self.login_with_return("hamlet@zulip.com")
self.assert_in_response("has been deactivated", result)
def test_logout(self):
# type: () -> None
self.login("hamlet@zulip.com")
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
def test_non_ascii_login(self):
# type: () -> None
"""
You can log in even if your password contain non-ASCII characters.
"""
email = "test@zulip.com"
password = u"hümbüǵ"
# Registering succeeds.
self.register("test@zulip.com", password)
user_profile = get_user_profile_by_email(email)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
self.client_post('/accounts/logout/')
self.assertIsNone(get_session_dict_user(self.client.session))
# Logging in succeeds.
self.client_post('/accounts/logout/')
self.login(email, password)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
def test_login_page_redirects_logged_in_user(self):
# type: () -> None
"""You will be redirected to the app's main page if you land on the
login page when already logged in.
"""
self.login("cordelia@zulip.com")
response = self.client_get("/login/")
self.assertEqual(response["Location"], "/")
class InviteUserTest(ZulipTestCase):
def invite(self, users, streams, body=''):
# type: (str, List[Text], str) -> HttpResponse
"""
Invites the specified users to Zulip with the specified streams.
users should be a string containing the users to invite, comma or
newline separated.
streams should be a list of strings.
"""
return self.client_post("/json/invite_users",
{"invitee_emails": users,
"stream": streams,
"custom_body": body})
def check_sent_emails(self, correct_recipients, custom_body=None):
# type: (List[str], Optional[str]) -> None
from django.core.mail import outbox
self.assertEqual(len(outbox), len(correct_recipients))
email_recipients = [email.recipients()[0] for email in outbox]
self.assertEqual(sorted(email_recipients), sorted(correct_recipients))
if len(outbox) == 0:
return
if custom_body is None:
self.assertNotIn("Message from", outbox[0].body)
else:
self.assertIn("Message from ", outbox[0].body)
self.assertIn(custom_body, outbox[0].body)
def test_bulk_invite_users(self):
# type: () -> None
"""The bulk_invite_users code path is for the first user in a realm."""
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bob@zulip.com']
params = {
'invitee_emails': ujson.dumps(invitees),
}
result = self.client_post('/json/invite/bulk', params)
self.assert_json_success(result)
self.check_sent_emails(invitees)
def test_bulk_invite_users_invalid_emails(self):
# type: () -> None
self.login('hamlet@zulip.com')
invitees = ['alice@zulip.com', 'bobnoatzulip.com']
params = {
'invitee_emails': ujson.dumps(invitees),
}
self.assert_json_error(
self.client_post('/json/invite/bulk', params),
'Some emails did not validate, so we didn\'t send any invitations.')
self.check_sent_emails([])
def test_successful_invite_user(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
def test_successful_invite_user_with_custom_body(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
body = "Custom Text."
self.assert_json_success(self.invite(invitee, ["Denmark"], body))
self.assertTrue(find_pattern_in_email(invitee, body))
self.check_sent_emails([invitee], custom_body=body)
def test_successful_invite_user_with_name(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
invitee = "Alice Test <{}>".format(email)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.check_sent_emails([email])
def test_successful_invite_user_with_name_and_normal_one(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters causes an invitation
email to be sent.
"""
self.login("hamlet@zulip.com")
email = "alice-test@zulip.com"
email2 = "bob-test@zulip.com"
invitee = "Alice Test <{}>, {}".format(email, email2)
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(email))
self.assertTrue(find_key_by_email(email2))
self.check_sent_emails([email, email2])
def test_successful_invite_user_with_notifications_stream(self):
# type: () -> None
"""
A call to /json/invite_users with valid parameters unconditionally
subscribes the invitee to the notifications stream if it exists and is
public.
"""
realm = get_realm('zulip')
notifications_stream = get_stream('Verona', realm)
realm.notifications_stream = notifications_stream
realm.save()
self.login('hamlet@zulip.com')
invitee = 'alice-test@zulip.com'
self.assert_json_success(self.invite(invitee, ['Denmark']))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
prereg_user = get_prereg_user_by_email(invitee)
streams = list(prereg_user.streams.all())
self.assertTrue(notifications_stream in streams)
def test_invite_user_signup_initial_history(self):
# type: () -> None
"""
Test that a new user invited to a stream receives some initial
history but only from public streams.
"""
self.login("hamlet@zulip.com")
user_profile = get_user_profile_by_email("hamlet@zulip.com")
private_stream_name = "Secret"
self.make_stream(private_stream_name, invite_only=True)
self.subscribe_to_stream(user_profile.email, private_stream_name)
public_msg_id = self.send_message("hamlet@zulip.com", "Denmark", Recipient.STREAM,
"Public topic", "Public message")
secret_msg_id = self.send_message("hamlet@zulip.com", private_stream_name, Recipient.STREAM,
"Secret topic", "Secret message")
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, [private_stream_name, "Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.submit_reg_form_for_user("alice-test@zulip.com", "password")
invitee_profile = get_user_profile_by_email(invitee)
invitee_msg_ids = [um.message_id for um in
UserMessage.objects.filter(user_profile=invitee_profile)]
self.assertTrue(public_msg_id in invitee_msg_ids)
self.assertFalse(secret_msg_id in invitee_msg_ids)
def test_multi_user_invite(self):
# type: () -> None
"""
Invites multiple users with a variety of delimiters.
"""
self.login("hamlet@zulip.com")
# Intentionally use a weird string.
self.assert_json_success(self.invite(
"""bob-test@zulip.com, carol-test@zulip.com,
dave-test@zulip.com
earl-test@zulip.com""", ["Denmark"]))
for user in ("bob", "carol", "dave", "earl"):
self.assertTrue(find_key_by_email("%s-test@zulip.com" % (user,)))
self.check_sent_emails(["bob-test@zulip.com", "carol-test@zulip.com",
"dave-test@zulip.com", "earl-test@zulip.com"])
def test_missing_or_invalid_params(self):
# type: () -> None
"""
Tests inviting with various missing or invalid parameters.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users", {"invitee_emails": "foo@zulip.com",
"custom_body": ''}),
"You must specify at least one stream for invitees to join.")
for address in ("noatsign.com", "outsideyourdomain@example.net"):
self.assert_json_error(
self.invite(address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
self.check_sent_emails([])
self.assert_json_error(
self.invite("", ["Denmark"]),
"You must specify at least one email address.")
self.check_sent_emails([])
def test_invalid_stream(self):
# type: () -> None
"""
Tests inviting to a non-existent stream.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(self.invite("iago-test@zulip.com", ["NotARealStream"]),
"Stream does not exist: NotARealStream. No invites were sent.")
self.check_sent_emails([])
def test_invite_existing_user(self):
# type: () -> None
"""
If you invite an address already using Zulip, no invitation is sent.
"""
self.login("hamlet@zulip.com")
self.assert_json_error(
self.client_post("/json/invite_users",
{"invitee_emails": "hamlet@zulip.com",
"stream": ["Denmark"],
"custom_body": ''}),
"We weren't able to invite anyone.")
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email="hamlet@zulip.com"))
self.check_sent_emails([])
def test_invite_some_existing_some_new(self):
# type: () -> None
"""
If you invite a mix of already existing and new users, invitations are
only sent to the new users.
"""
self.login("hamlet@zulip.com")
existing = ["hamlet@zulip.com", "othello@zulip.com"]
new = ["foo-test@zulip.com", "bar-test@zulip.com"]
result = self.client_post("/json/invite_users",
{"invitee_emails": "\n".join(existing + new),
"stream": ["Denmark"],
"custom_body": ''})
self.assert_json_error(result,
"Some of those addresses are already using Zulip, \
so we didn't send them an invitation. We did send invitations to everyone else!")
# We only created accounts for the new users.
for email in existing:
self.assertRaises(PreregistrationUser.DoesNotExist,
lambda: PreregistrationUser.objects.get(
email=email))
for email in new:
self.assertTrue(PreregistrationUser.objects.get(email=email))
# We only sent emails to the new users.
self.check_sent_emails(new)
prereg_user = get_prereg_user_by_email('foo-test@zulip.com')
self.assertEqual(prereg_user.email, 'foo-test@zulip.com')
def test_invite_outside_domain_in_closed_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = True`, you can't invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = True
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_error(
self.invite(external_address, ["Denmark"]),
"Some emails did not validate, so we didn't send any invitations.")
def test_invite_outside_domain_in_open_realm(self):
# type: () -> None
"""
In a realm with `restricted_to_domain = False`, you can invite people
with a different domain from that of the realm or your e-mail address.
"""
zulip_realm = get_realm("zulip")
zulip_realm.restricted_to_domain = False
zulip_realm.save()
self.login("hamlet@zulip.com")
external_address = "foo@example.com"
self.assert_json_success(self.invite(external_address, ["Denmark"]))
self.check_sent_emails([external_address])
def test_invite_with_non_ascii_streams(self):
# type: () -> None
"""
Inviting someone to streams with non-ASCII characters succeeds.
"""
self.login("hamlet@zulip.com")
invitee = "alice-test@zulip.com"
stream_name = u"hümbüǵ"
# Make sure we're subscribed before inviting someone.
self.subscribe_to_stream("hamlet@zulip.com", stream_name)
self.assert_json_success(self.invite(invitee, [stream_name]))
def test_refer_friend(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
invitee = "alice-test@zulip.com"
result = self.client_post('/json/refer_friend', dict(email=invitee))
self.assert_json_success(result)
# verify this works
Referral.objects.get(user_profile=user, email=invitee)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_refer_friend_no_email(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 0
user.save()
self.assert_json_error(
self.client_post('/json/refer_friend', dict(email='')),
"No email address specified")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 0)
def test_refer_friend_no_invites(self):
# type: () -> None
self.login("hamlet@zulip.com")
user = get_user_profile_by_email('hamlet@zulip.com')
user.invites_granted = 1
user.invites_used = 1
user.save()
invitee = "alice-test@zulip.com"
self.assert_json_error(
self.client_post('/json/refer_friend', dict(email=invitee)),
"Insufficient invites")
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertEqual(user.invites_used, 1)
def test_invitation_reminder_email(self):
# type: () -> None
from django.core.mail import outbox
current_user_email = "hamlet@zulip.com"
self.login(current_user_email)
invitee = "alice-test@zulip.com"
self.assert_json_success(self.invite(invitee, ["Denmark"]))
self.assertTrue(find_key_by_email(invitee))
self.check_sent_emails([invitee])
data = {"email": invitee, "referrer_email": current_user_email}
invitee = get_prereg_user_by_email(data["email"])
referrer = get_user_profile_by_email(data["referrer_email"])
link = Confirmation.objects.get_link_for_object(invitee, host=referrer.realm.host)
context = common_context(referrer)
context.update({
'activate_url': link,
'referrer': referrer,
'verbose_support_offers': settings.VERBOSE_SUPPORT_OFFERS,
'support_email': settings.ZULIP_ADMINISTRATOR
})
with self.settings(EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend'):
send_local_email_template_with_delay(
[{'email': data["email"], 'name': ""}],
"zerver/emails/invitation/invitation_reminder_email",
context,
datetime.timedelta(days=0),
tags=["invitation-reminders"],
sender={'email': settings.ZULIP_ADMINISTRATOR, 'name': 'Zulip'})
email_jobs_to_deliver = ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL,
scheduled_timestamp__lte=timezone.now())
self.assertEqual(len(email_jobs_to_deliver), 1)
email_count = len(outbox)
for job in email_jobs_to_deliver:
self.assertTrue(send_email_job(job))
self.assertEqual(len(outbox), email_count + 1)
class InviteeEmailsParserTests(TestCase):
def setUp(self):
# type: () -> None
self.email1 = "email1@zulip.com"
self.email2 = "email2@zulip.com"
self.email3 = "email3@zulip.com"
def test_if_emails_separated_by_commas_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{} ,{}, {}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_separated_by_newlines_are_parsed_and_striped_correctly(self):
# type: () -> None
emails_raw = "{}\n {}\n {} ".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_from_email_client_separated_by_newlines_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>\nEmailTwo<{}>\nEmail Three<{}>".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
def test_if_emails_in_mixed_style_are_parsed_correctly(self):
# type: () -> None
emails_raw = "Email One <{}>,EmailTwo<{}>\n{}".format(self.email1, self.email2, self.email3)
expected_set = {self.email1, self.email2, self.email3}
self.assertEqual(get_invitee_emails_set(emails_raw), expected_set)
class EmailUnsubscribeTests(ZulipTestCase):
def test_error_unsubscribe(self):
# type: () -> None
# An invalid insubscribe token "test123" produces an error.
result = self.client_get('/accounts/unsubscribe/missed_messages/test123')
self.assert_in_response('Unknown email unsubscribe request', result)
# An unknown message type "fake" produces an error.
user_profile = get_user_profile_by_email("hamlet@zulip.com")
unsubscribe_link = one_click_unsubscribe_link(user_profile, "fake")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assert_in_response('Unknown email unsubscribe request', result)
def test_missedmessage_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in missed message
e-mails that you can click even when logged out to update your
email notification settings.
"""
user_profile = get_user_profile_by_email("hamlet@zulip.com")
user_profile.enable_offline_email_notifications = True
user_profile.save()
unsubscribe_link = one_click_unsubscribe_link(user_profile,
"missed_messages")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_offline_email_notifications)
def test_welcome_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in welcome e-mails that you can
click even when logged out to stop receiving them.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
# Simulate a new user signing up, which enqueues 2 welcome e-mails.
enqueue_welcome_emails(email, "King Hamlet")
self.assertEqual(2, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from the welcome e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "welcome")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The welcome email jobs are no longer scheduled.
self.assertEqual(result.status_code, 200)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
def test_digest_unsubscribe(self):
# type: () -> None
"""
We provide one-click unsubscribe links in digest e-mails that you can
click even when logged out to stop receiving them.
Unsubscribing from these emails also dequeues any digest email jobs that
have been queued.
"""
email = "hamlet@zulip.com"
user_profile = get_user_profile_by_email("hamlet@zulip.com")
self.assertTrue(user_profile.enable_digest_emails)
# Enqueue a fake digest email.
send_digest_email(user_profile, "", "", "")
self.assertEqual(1, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
# Simulate unsubscribing from digest e-mails.
unsubscribe_link = one_click_unsubscribe_link(user_profile, "digest")
result = self.client_get(urllib.parse.urlparse(unsubscribe_link).path)
# The setting is toggled off, and scheduled jobs have been removed.
self.assertEqual(result.status_code, 200)
# Circumvent user_profile caching.
user_profile = UserProfile.objects.get(email="hamlet@zulip.com")
self.assertFalse(user_profile.enable_digest_emails)
self.assertEqual(0, len(ScheduledJob.objects.filter(
type=ScheduledJob.EMAIL, filter_string__iexact=email)))
class RealmCreationTest(ZulipTestCase):
def test_create_realm(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password, realm_subdomain=string_id)
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
# Check defaults
self.assertEqual(realm.org_type, Realm.COMMUNITY)
self.assertEqual(realm.restricted_to_domain, False)
self.assertEqual(realm.invite_required, True)
self.assertTrue(result["Location"].endswith("/"))
def test_create_realm_existing_email(self):
# type: () -> None
"""
Trying to create a realm with an existing email should just redirect to
a login page.
"""
with self.settings(OPEN_REALM_CREATION=True):
email = 'hamlet@zulip.com'
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
def test_create_realm_no_creation_key(self):
# type: () -> None
"""
Trying to create a realm without a creation_key should fail when
OPEN_REALM_CREATION is false.
"""
email = "user1@test.com"
realm = get_realm('test')
# Make sure the realm does not exist
self.assertIsNone(realm)
with self.settings(OPEN_REALM_CREATION=False):
# Create new realm with the email, but no creation key.
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 200)
self.assert_in_response('New organization creation disabled.', result)
def test_create_realm_with_subdomain(self):
# type: () -> None
password = "test"
string_id = "zuliptest"
email = "user1@test.com"
realm_name = "Test"
# Make sure the realm does not exist
self.assertIsNone(get_realm('test'))
with self.settings(REALMS_HAVE_SUBDOMAINS=True), self.settings(OPEN_REALM_CREATION=True):
# Create new realm with the email
result = self.client_post('/create_realm/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name=realm_name,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=string_id + ".testserver")
self.assertEqual(result.status_code, 302)
# Make sure the realm is created
realm = get_realm(string_id)
self.assertIsNotNone(realm)
self.assertEqual(realm.string_id, string_id)
self.assertEqual(get_user_profile_by_email(email).realm, realm)
self.assertEqual(realm.name, realm_name)
self.assertEqual(realm.subdomain, string_id)
def test_mailinator_signup(self):
# type: () -> None
with self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': "hi@mailinator.com"})
self.assert_in_response('Please use your real email address.', result)
def test_subdomain_restrictions(self):
# type: () -> None
password = "test"
email = "user1@test.com"
realm_name = "Test"
with self.settings(REALMS_HAVE_SUBDOMAINS=False), self.settings(OPEN_REALM_CREATION=True):
result = self.client_post('/create_realm/', {'email': email})
self.client_get(result["Location"])
confirmation_url = self.get_confirmation_url_from_outbox(email)
self.client_get(confirmation_url)
errors = {'id': "at least 3 characters",
'-id': "cannot start or end with a",
'string-ID': "lowercase letters",
'string_id': "lowercase letters",
'stream': "unavailable",
'streams': "unavailable",
'about': "unavailable",
'abouts': "unavailable",
'zephyr': "unavailable"}
for string_id, error_msg in errors.items():
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = string_id,
realm_name = realm_name)
self.assert_in_response(error_msg, result)
# test valid subdomain
result = self.submit_reg_form_for_user(email, password,
realm_subdomain = 'a-0',
realm_name = realm_name)
self.assertEqual(result.status_code, 302)
class UserSignUpTest(ZulipTestCase):
def test_user_default_language(self):
# type: () -> None
"""
Check if the default language of new user is the default language
of the realm.
"""
email = "newguy@zulip.com"
password = "newpassword"
realm = get_realm('zulip')
do_set_realm_default_language(realm, "de")
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password)
self.assertEqual(result.status_code, 302)
user_profile = get_user_profile_by_email(email)
self.assertEqual(user_profile.default_language, realm.default_language)
from django.core.mail import outbox
outbox.pop()
def test_signup_already_active(self):
# type: () -> None
"""
Check if signing up with an active email redirects to a login page.
"""
email = 'hamlet@zulip.com'
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('login', result['Location'])
def test_signup_invalid_name(self):
# type: () -> None
"""
Check if an invalid name during signup is handled properly.
"""
email = "newguy@zulip.com"
password = "newpassword"
result = self.client_post('/accounts/home/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
confirmation_url = self.get_confirmation_url_from_outbox(email)
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
# Pick a password and agree to the ToS.
result = self.submit_reg_form_for_user(email, password, full_name="<invalid>")
self.assert_in_success_response("Invalid characters in name!", result)
def test_unique_completely_open_domain(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
for string_id in ('simple', 'zephyr'):
realm = get_realm(string_id)
do_deactivate_realm(realm)
realm.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_completely_open_domain_success(self):
# type: () -> None
password = "test"
email = "user1@acme.com"
subdomain = "zulip"
realm_name = "Zulip"
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there."], result)
def test_failed_signup_with_realm_str(self):
# type: () -> None
"""
Signing up with the special accounts_home_with_realm_str endpoint should
fail (i.e. redirect to the standard accounts_home) if
settings.REALMS_HAVE_SUBDOMAINS is true, or if the realm is not
completely open.
"""
realm = get_realm('zulip')
realm.restricted_to_domain = False
realm.invite_required = False
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS=True):
email = 'user1@acme.com'
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('accounts/home', result['Location'])
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS=False):
email = 'user1@acme.com'
result = self.client_post('/register/zulip/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertIn('accounts/home', result['Location'])
def test_failed_signup_due_to_restricted_domain(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = False
realm.save()
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=realm)
self.assertIn("trying to join, zulip, only allows users with e-mail", form.errors['email'][0])
def test_failed_signup_due_to_invite_required(self):
# type: () -> None
realm = get_realm('zulip')
realm.invite_required = True
realm.save()
request = HostRequestMock(host = realm.host)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@zulip.com'}, realm=realm)
self.assertIn("Please request an invite from", form.errors['email'][0])
def test_failed_signup_due_to_nonexistent_realm(self):
# type: () -> None
with self.settings(REALMS_HAVE_SUBDOMAINS = True):
request = HostRequestMock(host = 'acme.' + settings.EXTERNAL_HOST)
request.session = {} # type: ignore
form = HomepageForm({'email': 'user@acme.com'}, realm=None)
self.assertIn("organization you are trying to join does not exist", form.errors['email'][0])
def test_registration_through_ldap(self):
# type: () -> None
password = "testing"
email = "newuser@zulip.com"
subdomain = "zulip"
realm_name = "Zulip"
ldap_user_attr_map = {'full_name': 'fn', 'short_name': 'sn'}
ldap_patcher = patch('django_auth_ldap.config.ldap.initialize')
mock_initialize = ldap_patcher.start()
mock_ldap = MockLDAP()
mock_initialize.return_value = mock_ldap
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': ['New User Name']
}
}
with patch('zerver.views.registration.get_subdomain', return_value=subdomain):
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
with self.settings(
POPULATE_PROFILE_VIA_LDAP=True,
LDAP_APPEND_DOMAIN='zulip.com',
AUTH_LDAP_BIND_PASSWORD='',
AUTH_LDAP_USER_ATTR_MAP=ldap_user_attr_map,
AUTHENTICATION_BACKENDS=('zproject.backends.ZulipLDAPAuthBackend',),
AUTH_LDAP_USER_DN_TEMPLATE='uid=%(user)s,ou=users,dc=zulip,dc=com'):
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"New User Name",
"newuser@zulip.com"],
result)
# Test the TypeError exception handler
mock_ldap.directory = {
'uid=newuser,ou=users,dc=zulip,dc=com': {
'userPassword': 'testing',
'fn': None # This will raise TypeError
}
}
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assert_in_success_response(["You're almost there.",
"newuser@zulip.com"],
result)
mock_ldap.reset()
mock_initialize.stop()
@patch('DNS.dnslookup', return_value=[['sipbtest:*:20922:101:Fred Sipb,,,:/mit/sipbtest:/bin/athena/tcsh']])
def test_registration_of_mirror_dummy_user(self, ignored):
# type: (Any) -> None
password = "test"
email = "sipbtest@mit.edu"
subdomain = "sipb"
realm_name = "MIT"
user_profile = get_user_profile_by_email(email)
user_profile.is_mirror_dummy = True
user_profile.is_active = False
user_profile.save()
result = self.client_post('/register/', {'email': email})
self.assertEqual(result.status_code, 302)
self.assertTrue(result["Location"].endswith(
"/accounts/send_confirm/%s" % (email,)))
result = self.client_get(result["Location"])
self.assert_in_response("Check your email so we can get started.", result)
# Visit the confirmation link.
from django.core.mail import outbox
for message in reversed(outbox):
if email in message.to:
confirmation_link_pattern = re.compile(settings.EXTERNAL_HOST + "(\S+)>")
confirmation_url = confirmation_link_pattern.search(
message.body).groups()[0]
break
else:
raise AssertionError("Couldn't find a confirmation email.")
result = self.client_get(confirmation_url)
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
from_confirmation='1',
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 200)
result = self.submit_reg_form_for_user(email,
password,
realm_name=realm_name,
realm_subdomain=subdomain,
# Pass HTTP_HOST for the target subdomain
HTTP_HOST=subdomain + ".testserver")
self.assertEqual(result.status_code, 302)
self.assertEqual(get_session_dict_user(self.client.session), user_profile.id)
class TestOpenRealms(ZulipTestCase):
def test_open_realm_logic(self):
# type: () -> None
realm = get_realm('simple')
do_deactivate_realm(realm)
mit_realm = get_realm("zephyr")
self.assertEqual(get_unique_open_realm(), None)
mit_realm.restricted_to_domain = False
mit_realm.save()
self.assertTrue(completely_open(mit_realm))
self.assertEqual(get_unique_open_realm(), None)
with self.settings(SYSTEM_ONLY_REALMS={"zulip"}):
self.assertEqual(get_unique_open_realm(), mit_realm)
mit_realm.restricted_to_domain = True
mit_realm.save()
class DeactivateUserTest(ZulipTestCase):
def test_deactivate_user(self):
# type: () -> None
email = 'hamlet@zulip.com'
self.login(email)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
user = get_user_profile_by_email('hamlet@zulip.com')
self.assertFalse(user.is_active)
self.login(email, fails=True)
def test_do_not_deactivate_final_admin(self):
# type: () -> None
email = 'iago@zulip.com'
self.login(email)
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
result = self.client_delete('/json/users/me')
self.assert_json_error(result, "Cannot deactivate the only organization administrator")
user = get_user_profile_by_email('iago@zulip.com')
self.assertTrue(user.is_active)
self.assertTrue(user.is_realm_admin)
email = 'hamlet@zulip.com'
user_2 = get_user_profile_by_email('hamlet@zulip.com')
do_change_is_admin(user_2, True)
self.assertTrue(user_2.is_realm_admin)
result = self.client_delete('/json/users/me')
self.assert_json_success(result)
do_change_is_admin(user, True)
class TestLoginPage(ZulipTestCase):
def test_login_page_wrong_subdomain_error(self):
# type: () -> None
result = self.client_get("/login/?subdomain=1")
self.assertIn(WRONG_SUBDOMAIN_ERROR, result.content.decode('utf8'))
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_alias(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_redirects_for_root_domain(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'testserver'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
mock_get_host.return_value = 'www.testserver.com'
with self.settings(REALMS_HAVE_SUBDOMAINS=True,
EXTERNAL_HOST='www.testserver.com',
ROOT_SUBDOMAIN_ALIASES=['test']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/')
@patch('django.http.HttpRequest.get_host')
def test_login_page_works_without_subdomains(self, mock_get_host):
# type: (MagicMock) -> None
mock_get_host.return_value = 'www.testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
mock_get_host.return_value = 'testserver'
with self.settings(ROOT_SUBDOMAIN_ALIASES=['www']):
result = self.client_get("/en/login/")
self.assertEqual(result.status_code, 200)
class TestFindMyTeam(ZulipTestCase):
def test_template(self):
# type: () -> None
result = self.client_get('/find_my_team/')
self.assertIn("Find your team", result.content.decode('utf8'))
def test_result(self):
# type: () -> None
url = '/find_my_team/?emails=iago@zulip.com,cordelia@zulip.com'
result = self.client_get(url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn("iago@zulip.com", content)
self.assertIn("cordelia@zulip.com", content)
def test_find_team_ignore_invalid_email(self):
# type: () -> None
url = '/find_my_team/?emails=iago@zulip.com,invalid_email'
result = self.client_get(url)
content = result.content.decode('utf8')
self.assertIn("Emails sent! You will only receive emails", content)
self.assertIn("iago@zulip.com", content)
self.assertNotIn("invalid_email", content)
def test_find_team_zero_emails(self):
# type: () -> None
data = {'emails': ''}
result = self.client_post('/find_my_team/', data)
self.assertIn('This field is required', result.content.decode('utf8'))
self.assertEqual(result.status_code, 200)
def test_find_team_one_email(self):
# type: () -> None
data = {'emails': 'hamlet@zulip.com'}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/find_my_team/?emails=hamlet%40zulip.com')
def test_find_team_multiple_emails(self):
# type: () -> None
data = {'emails': 'hamlet@zulip.com,iago@zulip.com'}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 302)
expected = '/find_my_team/?emails=hamlet%40zulip.com%2Ciago%40zulip.com'
self.assertEqual(result.url, expected)
def test_find_team_more_than_ten_emails(self):
# type: () -> None
data = {'emails': ','.join(['hamlet-{}@zulip.com'.format(i) for i in range(11)])}
result = self.client_post('/find_my_team/', data)
self.assertEqual(result.status_code, 200)
self.assertIn("Please enter at most 10", result.content.decode('utf8'))
class ConfirmationKeyTest(ZulipTestCase):
def test_confirmation_key(self):
# type: () -> None
request = MagicMock()
request.session = {
'confirmation_key': {'confirmation_key': 'xyzzy'}
}
result = confirmation_key(request)
self.assert_json_success(result)
self.assert_in_response('xyzzy', result)
| {
"content_hash": "79793549edfcbbd48763810a66884cd2",
"timestamp": "",
"source": "github",
"line_count": 1410,
"max_line_length": 114,
"avg_line_length": 42.65673758865248,
"alnum_prop": 0.5904798324078077,
"repo_name": "dattatreya303/zulip",
"id": "a54d65f57c8d4680bc6fee53d30145f9d3c230ed",
"size": "60182",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "zerver/tests/test_signup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "298684"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "541221"
},
{
"name": "JavaScript",
"bytes": "1601573"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3506780"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
} |
from rally.plugins.task.sla.iteration_time import * # noqa: F401,F403
from rally.plugins.task.sla import iteration_time as _new
# import it as last item to be sure that we use the right module
from rally.common import logging
logging.log_deprecated_module(
target=__name__, new_module=_new.__name__, release="3.0.0"
)
| {
"content_hash": "9474c441bf55f33e03b3bdc968605164",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 70,
"avg_line_length": 32.6,
"alnum_prop": 0.7361963190184049,
"repo_name": "openstack/rally",
"id": "dc66970ed658bef306655f76b893e909cc99f4f7",
"size": "924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rally/plugins/common/sla/iteration_time.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1245"
},
{
"name": "HTML",
"bytes": "70138"
},
{
"name": "JavaScript",
"bytes": "10234"
},
{
"name": "Mako",
"bytes": "1182"
},
{
"name": "Python",
"bytes": "2254050"
},
{
"name": "Shell",
"bytes": "6966"
}
],
"symlink_target": ""
} |
from __future__ import annotations
from typing import Optional, Sequence
from pydantic import BaseModel, root_validator, validator
from datadog_checks.base.utils.functions import identity
from datadog_checks.base.utils.models import validation
from . import defaults, validators
class SharedConfig(BaseModel):
class Config:
allow_mutation = False
device_global_exclude: Optional[Sequence[str]]
file_system_global_exclude: Optional[Sequence[str]]
mount_point_global_exclude: Optional[Sequence[str]]
service: Optional[str]
@root_validator(pre=True)
def _initial_validation(cls, values):
return validation.core.initialize_config(getattr(validators, 'initialize_shared', identity)(values))
@validator('*', pre=True, always=True)
def _ensure_defaults(cls, v, field):
if v is not None or field.required:
return v
return getattr(defaults, f'shared_{field.name}')(field, v)
@validator('*')
def _run_validations(cls, v, field):
if not v:
return v
return getattr(validators, f'shared_{field.name}', identity)(v, field=field)
@root_validator(pre=False)
def _final_validation(cls, values):
return validation.core.finalize_config(getattr(validators, 'finalize_shared', identity)(values))
| {
"content_hash": "23d6e4fc940234bf57c96d47c96e42d1",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 108,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.6984848484848485,
"repo_name": "DataDog/integrations-core",
"id": "db701e907545595e119bd23ed31182f81450ae9e",
"size": "1677",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "disk/datadog_checks/disk/config_models/shared.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 7, transform = "Difference", sigma = 0.0, exog_count = 20, ar_order = 12); | {
"content_hash": "4fa3df53ae3b3ae16168fcc485ea6944",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 167,
"avg_line_length": 38.142857142857146,
"alnum_prop": 0.7078651685393258,
"repo_name": "antoinecarme/pyaf",
"id": "a5ddfbf21e8f86dcb2c921afbd223eb846647811",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Difference/trend_Lag1Trend/cycle_7/ar_12/test_artificial_1024_Difference_Lag1Trend_7_12_20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
"""Tests for ops which manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np # pylint: disable=unused-import
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_list_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ListOpsTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def _testPushPop(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testPushPop(self, max_num_elements):
self._testPushPop(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testPushPopGPU(self, max_num_elements):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testPushPop(max_num_elements)
@test_util.run_deprecated_v1
def testPushInFullListFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[], max_num_elements=1)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to push item into a full list"):
l = list_ops.tensor_list_push_back(l, 2.)
self.evaluate(l)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_deprecated_v1
def testPopFromEmptyTensorListFails(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Trying to pop from an empty list"):
l = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.evaluate(l)
def testPopUninitializedTensorUseListElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[2, 3], num_elements=3)
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(e, np.zeros((2, 3)))
def testPopUninitializedTensorUseSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)
_, e = gen_list_ops.tensor_list_pop_back(
l, element_dtype=dtypes.float32, element_shape=[4, 3])
self.assertAllEqual(e, np.zeros((4, 3)))
def testPopUninitializedTensorWithInvalidElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to read an uninitialized tensor but "
"element_shape is not fully defined"):
_, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.evaluate(e)
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 2], num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Incompatible shapes during merge: \[1,3\] vs. \[\?,2\]"):
_, e = gen_list_ops.tensor_list_pop_back(
l, element_dtype=dtypes.float32, element_shape=[1, 3])
self.evaluate(e)
def testPushGetGrad(self):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
c0 = constant_op.constant(5.0)
c1 = constant_op.constant([10.0, 20.0])
tape.watch(c0)
tape.watch(c1)
l = list_ops.tensor_list_push_back(l, c0)
l = list_ops.tensor_list_push_back(l, c1)
t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t1), [10.0, 20.0])
# t1 == c1 so the gradient should be [0., [1., 1.]]
# This tests that the gradient of push_back correctly converts DT_INVALID
# tensors to zeros. The list returned by the gradient of GetItem will
# have only have tensor at index 1 set and others set to DT_INVALID.
dt0, dt1 = tape.gradient(t1, [c0, c1])
self.assertAllEqual(self.evaluate(dt1), [1.0, 1.0])
self.assertEqual(self.evaluate(dt0), 0.0)
def _testStack(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
if not context.executing_eagerly():
self.assertAllEqual(t.shape.as_list(), [None])
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testStack(self, max_num_elements):
self._testStack(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testStackGPU(self, max_num_elements):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testStack(max_num_elements)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testStackWithUnknownElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
# Should raise an error when the element tensors do not all have the same
# shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Incompatible ranks during merge: 0 vs. 1"):
l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testStackWithPartiallyDefinedElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0]))
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[1.0], [2.0]])
# Should raise an error when the element tensors do not all have the same
# shape.
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Incompatible shapes during merge: \[1\] vs. \[2\]"):
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
@test_util.run_deprecated_v1
def testStackEmptyList(self, max_num_elements):
# Should be able to stack empty lists with fully defined element_shape.
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 1, 2))
# Should not be able to stack empty lists with partially defined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
# Should not be able to stack empty lists with undefined element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
def _testStackWithUninitializedTensors(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [0., 0., 0.])
def testStackWithUninitializedTensors(self):
self._testStackWithUninitializedTensors()
def testStackWithUninitializedTensorsGpu(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testStackWithUninitializedTensors()
def _testStackWithUninitializedTensorsInferShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
l = list_ops.tensor_list_set_item(l, 1, [1., 2.])
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[0., 0.], [1., 2.], [0., 0.]])
def testStackWithUninitializedTensorsInferShape(self):
self._testStackWithUninitializedTensorsInferShape()
def testStackWithUninitializedTensorsInferShapeGpu(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testStackWithUninitializedTensorsInferShape()
def testStackReservedListWithNoElementsAndPartialElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to stack list which only contains "
"uninitialized tensors and has a "
"non-fully-defined element_shape: <unknown>"):
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testStackUsingSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
t = gen_list_ops.tensor_list_stack(
l, element_dtype=dtypes.float32, element_shape=[])
self.assertAllEqual(self.evaluate(t), np.zeros((3,)))
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 2))
def testGatherGrad(self, max_num_elements):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[],
max_num_elements=max_num_elements)
c0 = constant_op.constant(1.0)
tape.watch(c0)
l = list_ops.tensor_list_push_back(l, c0)
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [2.0, 1.0])
s = (t[0] + t[1]) * (t[0] + t[1])
dt = tape.gradient(s, c0)
self.assertAllEqual(self.evaluate(dt), 6.0)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testGatherWithUnknownElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant([3.0, 4.0]))
t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [2.0, 1.0])
t = list_ops.tensor_list_gather(l, [2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[3.0, 4.0]])
# Should raise an error when the requested tensors do not all have the same
# shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Incompatible ranks during merge: 0 vs. 1"):
t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testGatherWithPartiallyDefinedElementShape(self, max_num_elements):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None],
max_num_elements=max_num_elements)
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0]))
l = list_ops.tensor_list_push_back(l, constant_op.constant([2.0, 3.0]))
l = list_ops.tensor_list_push_back(l, constant_op.constant([4.0, 5.0]))
t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[1.0]])
t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[2.0, 3.0], [4.0, 5.0]])
# Should raise an error when the requested tensors do not all have the same
# shape.
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Incompatible shapes during merge: \[1\] vs. \[2\]"):
t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
self.evaluate(t)
@parameterized.named_parameters(("NoMaxNumElements", None),
("WithMaxNumElements", 3))
@test_util.run_deprecated_v1
def testGatherEmptyList(self, max_num_elements):
# Should be able to gather from empty lists with fully defined
# element_shape.
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[1, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.assertAllEqual((0, 1, 2), self.evaluate(t).shape)
# Should not be able to gather from empty lists with partially defined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=[None, 2],
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.evaluate(t)
# Should not be able to gather from empty lists with undefined
# element_shape.
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"non-fully-defined"):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32,
element_shape=None,
max_num_elements=max_num_elements)
t = list_ops.tensor_list_gather(l, [], element_dtype=dtypes.float32)
self.evaluate(t)
def testGatherGradWithNonContiguousIndices(self):
with backprop.GradientTape(persistent=True) as tape:
t = constant_op.constant([1.0, 2.0, 3.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
c = constant_op.constant(5.0)
tape.watch(c)
l = list_ops.tensor_list_set_item(l, 1, c)
t = list_ops.tensor_list_gather(l, [1], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [5.0])
s = t[0] * t[0]
dt = tape.gradient(s, c)
self.assertAllEqual(self.evaluate(dt), 10.0)
dl = tape.gradient(t, l)
dl_length = list_ops.tensor_list_length(dl)
self.assertAllEqual(self.evaluate(dl_length), 3)
def _testGatherWithUninitializedTensors(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
t = list_ops.tensor_list_gather(l, [0, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [0., 0.])
def testGatherWithUninitializedTensors(self):
self._testGatherWithUninitializedTensors()
def testGatherWithUninitializedTensorsGpu(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testGatherWithUninitializedTensors()
def _testGatherWithUninitializedTensorsInferShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
l = list_ops.tensor_list_set_item(l, 1, [1., 2.])
t = list_ops.tensor_list_gather(l, [1, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[1., 2.], [0., 0.]])
def testGatherWithUninitializedTensorsInferShape(self):
self._testGatherWithUninitializedTensorsInferShape()
def testGatherWithUninitializedTensorsInferShapeGpu(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self._testGatherWithUninitializedTensorsInferShape()
def testGatherReservedListWithNoElementsAndPartialElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Tried to gather uninitialized tensors from a"
" list with non-fully-defined element_shape"):
t = list_ops.tensor_list_gather(l, [0], element_dtype=dtypes.float32)
self.evaluate(t)
def testGatherUsingSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
t = gen_list_ops.tensor_list_gather(
l, [0, 1, 2], element_dtype=dtypes.float32, element_shape=[])
self.assertAllEqual(self.evaluate(t), np.zeros((3,)))
def testScatterOutputListSize(self):
c0 = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_scatter(
c0, [1, 3], ops.convert_to_tensor([], dtype=dtypes.int32))
# TensorListScatter should return a list with size largest index + 1.
self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 4)
def testScatterWithInvalidRowsInInputTensorFails(self):
c0 = constant_op.constant([1.0, 2.0])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Invalid number of rows in input tensor. Expected: 3 Actual: 2"):
l = list_ops.tensor_list_scatter(
c0, [1, 0, 2], ops.convert_to_tensor([], dtype=dtypes.int32))
self.evaluate(l)
def testScatterWithNegativeIndicesFails(self):
c0 = constant_op.constant([1.0, 2.0])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Indices in TensorListScatter must all be positive."):
l = list_ops.tensor_list_scatter(
c0, [-1, -2], ops.convert_to_tensor([], dtype=dtypes.int32))
self.evaluate(l)
def testScatterGrad(self):
with backprop.GradientTape() as tape:
c0 = constant_op.constant([1.0, 2.0])
tape.watch(c0)
l = list_ops.tensor_list_scatter(
c0, [1, 0], ops.convert_to_tensor([], dtype=dtypes.int32))
t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t0), 2.0)
self.assertAllEqual(self.evaluate(t1), 1.0)
loss = t0 * t0 + t1 * t1
dt = tape.gradient(loss, c0)
self.assertAllEqual(self.evaluate(dt), [2., 4.])
def testScatterWithPartialReadGrad(self):
with backprop.GradientTape() as tape:
c0 = constant_op.constant([1.0, 2.0])
tape.watch(c0)
l = list_ops.tensor_list_scatter(
c0, [1, 0], ops.convert_to_tensor([], dtype=dtypes.int32))
t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t0), 2.0)
loss = t0 * t0
dt = tape.gradient(loss, c0)
self.assertAllEqual(self.evaluate(dt), [0., 4.])
def testTensorListFromTensor(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 2.0)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0)
def testFromTensorGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testTensorListFromTensor()
def testGetSetItem(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e0), 1.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [3.0, 2.0])
def testGetSetGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testGetSetItem()
def testSetGetGrad(self):
with backprop.GradientTape() as tape:
t = constant_op.constant(5.)
tape.watch(t)
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
l = list_ops.tensor_list_set_item(l, 1, 2. * t)
e = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 10.0)
self.assertAllEqual(self.evaluate(tape.gradient(e, t)), 2.0)
def testGetUninitializedTensorUseListElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=3)
l = list_ops.tensor_list_set_item(l, 0, 5.)
e1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
e2 = list_ops.tensor_list_get_item(l, 2, element_dtype=dtypes.float32)
self.assertEqual(self.evaluate(e1), 0.)
self.assertEqual(self.evaluate(e2), 0.)
def testGetUninitializedTensorUseSpecifiedElementShape(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
e0 = gen_list_ops.tensor_list_get_item(
l, 0, element_shape=[], element_dtype=dtypes.float32)
e1 = gen_list_ops.tensor_list_get_item(
l, 1, element_shape=[2, 3], element_dtype=dtypes.float32)
self.assertEqual(self.evaluate(e0), 0.)
self.assertAllEqual(self.evaluate(e1), np.zeros((2, 3)))
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 3], num_elements=3)
e1 = gen_list_ops.tensor_list_get_item(
l, 1, element_shape=[2, 3], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e1), np.zeros((2, 3)))
def testGetUninitializedTensorWithInvalidElementShapeFails(self):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=None, num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to read an uninitialized tensor but "
"element_shape is not fully defined"):
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.evaluate(e0)
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[None, 2], num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Incompatible shapes during merge: \[1,3\] vs. \[\?,2\]"):
e0 = gen_list_ops.tensor_list_get_item(
l, 0, element_dtype=dtypes.float32, element_shape=[1, 3])
self.evaluate(e0)
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testSkipEagerSetItemIndexOutOfBounds(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
e0 = constant_op.constant(5.)
l = list_ops.tensor_list_set_item(
l, 0, 2. * e0, resize_if_index_out_of_bounds=True)
l = list_ops.tensor_list_set_item(
l, 1, 1., resize_if_index_out_of_bounds=True)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
grad = gradients_impl.gradients(t, e0)[0]
self.assertAllEqual(self.evaluate(grad), 2.)
@test_util.run_deprecated_v1
def testSetOnEmptyListWithMaxNumElementsFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[], max_num_elements=3)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Trying to modify element 0 in a list with 0 elements."):
l = list_ops.tensor_list_set_item(l, 0, 1.)
self.evaluate(l)
def testUnknownShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0]))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), [1.0, 2.0])
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
def testCPUGPUCopy(self):
if not context.num_gpus():
return
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
with context.device("gpu:0"):
l_gpu = array_ops.identity(l)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
l_cpu = array_ops.identity(l_gpu)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
def testCPUGPUCopyNested(self):
if not context.num_gpus():
return
t = constant_op.constant([1.0, 2.0])
child_l = list_ops.tensor_list_from_tensor(t, element_shape=[])
l = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.variant)
l = list_ops.tensor_list_push_back(l, child_l)
with context.device("gpu:0"):
l_gpu = array_ops.identity(l)
_, child_l_gpu = list_ops.tensor_list_pop_back(
l_gpu, element_dtype=dtypes.variant)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
child_l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
l_cpu = array_ops.identity(l_gpu)
_, child_l_cpu = list_ops.tensor_list_pop_back(
l_cpu, element_dtype=dtypes.variant)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
child_l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
def testGraphStack(self):
with self.cached_session():
tl = list_ops.empty_tensor_list(
element_shape=constant_op.constant([1], dtype=dtypes.int32),
element_dtype=dtypes.int32)
tl = list_ops.tensor_list_push_back(tl, [1])
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_stack(tl, element_dtype=dtypes.int32)),
[[1]])
def testSkipEagerStackInLoop(self):
with self.cached_session():
t1 = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
i = constant_op.constant(0, dtype=dtypes.int32)
def body(i, t1):
t1 = list_ops.tensor_list_push_back(t1, i)
i += 1
return i, t1
i, t1 = control_flow_ops.while_loop(lambda i, t1: math_ops.less(i, 4),
body, [i, t1])
s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.int32)
self.assertAllEqual(self.evaluate(s1), [0, 1, 2, 3])
def testSkipEagerStackSwitchDtype(self):
with self.cached_session():
list_ = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
def body(list_, m):
list_ = control_flow_ops.cond(
math_ops.equal(list_ops.tensor_list_length(list_), 0),
lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: list_)
list_ = list_ops.tensor_list_push_back(list_, m)
return list_, m
for _ in range(2):
list_, m = body(list_, m)
s1 = list_ops.tensor_list_stack(list_, element_dtype=dtypes.float32)
np_s1 = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllEqual(self.evaluate(s1), np_s1)
def testSkipEagerStackInLoopSwitchDtype(self):
with self.cached_session():
t1 = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
i = constant_op.constant(0, dtype=dtypes.float32)
m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
def body(i, m, t1):
t1 = control_flow_ops.cond(
math_ops.equal(list_ops.tensor_list_length(t1), 0),
lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: t1)
t1 = list_ops.tensor_list_push_back(t1, m * i)
i += 1.0
return i, m, t1
i, m, t1 = control_flow_ops.while_loop(
lambda i, m, t1: math_ops.less(i, 4), body, [i, m, t1])
s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.float32)
np_s1 = np.vstack([np.arange(1, 4) * i for i in range(4)])
self.assertAllEqual(self.evaluate(s1), np_s1)
def testSerialize(self):
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
t = constant_op.constant([[1.0], [2.0]])
l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps, e = list_ops.tensor_list_pop_back(
l_ps, element_dtype=dtypes.float32)
with ops.device("/job:worker"):
worker_e = array_ops.identity(e)
self.assertAllEqual(self.evaluate(worker_e), [2.0])
def testSerializeListWithInvalidTensors(self):
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
l = list_ops.tensor_list_reserve(
element_dtype=dtypes.float32, element_shape=[], num_elements=2)
l = list_ops.tensor_list_set_item(l, 0, 1.)
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps = list_ops.tensor_list_set_item(l_ps, 1, 2.)
t = list_ops.tensor_list_stack(l_ps, element_dtype=dtypes.float32)
with ops.device("/job:worker"):
worker_t = array_ops.identity(t)
self.assertAllEqual(self.evaluate(worker_t), [1.0, 2.0])
def testSerializeListWithUnknownRank(self):
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
t = constant_op.constant([[1.0], [2.0]])
l = list_ops.tensor_list_from_tensor(t, element_shape=None)
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
element_shape = list_ops.tensor_list_element_shape(
l_ps, shape_type=dtypes.int32)
with ops.device("/job:worker"):
element_shape = array_ops.identity(element_shape)
self.assertEqual(self.evaluate(element_shape), -1)
def testSerializeListWithMaxNumElements(self):
if context.num_gpus():
# TODO(b/119151861): Enable on GPU.
return
worker = test_util.create_local_cluster(num_workers=1, num_ps=1)[0][0]
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
l = list_ops.empty_tensor_list(
element_shape=None,
element_dtype=dtypes.float32,
max_num_elements=2)
l = list_ops.tensor_list_push_back(l, 1.)
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps = list_ops.tensor_list_push_back(l_ps, 2.)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Tried to push item into a full list"):
with ops.device("/job:worker"):
l_worker = array_ops.identity(l_ps)
l_worker = list_ops.tensor_list_push_back(l_worker, 3.0)
self.evaluate(l_worker)
def testPushPopGradients(self):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[])
c = constant_op.constant(1.0)
tape.watch(c)
l = list_ops.tensor_list_push_back(l, c)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
e = 2 * e
self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0)
def testStackFromTensorGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
tape.watch(c)
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
c2 = list_ops.tensor_list_stack(
l, element_dtype=dtypes.float32, num_elements=2)
result = c2 * 2.0
grad = tape.gradient(result, [c])[0]
self.assertAllEqual(self.evaluate(grad), [2.0, 2.0])
def testGetSetGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
tape.watch(c)
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
c2 = constant_op.constant(3.0)
tape.watch(c2)
l = list_ops.tensor_list_set_item(l, 0, c2)
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
y = e * e + ee * ee
grad_c, grad_c2 = tape.gradient(y, [c, c2])
self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0])
self.assertAllEqual(self.evaluate(grad_c2), 6.0)
@test_util.run_deprecated_v1
def testSetOutOfBounds(self):
c = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0))
@test_util.run_deprecated_v1
def testSkipEagerSetItemWithMismatchedShapeFails(self):
with self.cached_session() as sess:
ph = array_ops.placeholder(dtypes.float32)
c = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
# Set a placeholder with unknown shape to satisfy the shape inference
# at graph building time.
l = list_ops.tensor_list_set_item(l, 0, ph)
l_0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible shape"):
sess.run(l_0, {ph: [3.0]})
def testResourceVariableScatterGather(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l = list_ops.tensor_list_from_tensor(c, element_shape=[])
v = vs.get_variable("var", initializer=[l] * 10, use_resource=True)
v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32)
self.evaluate(v.initializer)
self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked))
v_r_sparse_stacked = list_ops.tensor_list_stack(
v.sparse_read(0), dtypes.float32)
self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked))
l_new_0 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
l_new_1 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1])
updated_v_elems = array_ops.unstack(updated_v)
updated_v_stacked = [
list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems
]
expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] +
[[1.0, 2.0]] * 4)
self.assertAllEqual(self.evaluate(updated_v_stacked), expected)
@test_util.run_deprecated_v1
def testConcat(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=[])
l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[])
l_batch_0 = array_ops.stack([l0, l1])
l_batch_1 = array_ops.stack([l1, l0])
l_concat_01 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_1, element_dtype=dtypes.float32)
l_concat_10 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_0, element_dtype=dtypes.float32)
l_concat_00 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_0, element_dtype=dtypes.float32)
l_concat_11 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_1, element_dtype=dtypes.float32)
expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]]
expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]]
expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]]
expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]]
for i, (concat, expected) in enumerate(zip(
[l_concat_00, l_concat_01, l_concat_10, l_concat_11],
[expected_00, expected_01, expected_10, expected_11])):
splitted = array_ops.unstack(concat)
splitted_stacked_ret = self.evaluate(
(list_ops.tensor_list_stack(splitted[0], dtypes.float32),
list_ops.tensor_list_stack(splitted[1], dtypes.float32)))
print("Test concat %d: %s, %s, %s, %s"
% (i, expected[0], splitted_stacked_ret[0],
expected[1], splitted_stacked_ret[1]))
self.assertAllClose(expected[0], splitted_stacked_ret[0])
self.assertAllClose(expected[1], splitted_stacked_ret[1])
# Concatenating mismatched shapes fails.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(
list_ops.tensor_list_concat_lists(
l_batch_0,
list_ops.empty_tensor_list([], dtypes.float32),
element_dtype=dtypes.float32))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"element shapes are not identical at index 0"):
l_batch_of_vec_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls,
element_dtype=dtypes.float32))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"input_b\[0\].dtype != element_dtype."):
l_batch_of_int_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([1], element_shape=[])] * 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls,
element_dtype=dtypes.float32))
@test_util.run_deprecated_v1
def testPushBackBatch(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=[])
l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=[])
l_batch = array_ops.stack([l0, l1])
l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0])
l_unstack = array_ops.unstack(l_push)
l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32)
l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32)
self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret))
self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret))
with ops.control_dependencies([l_push]):
l_unstack_orig = array_ops.unstack(l_batch)
l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0],
dtypes.float32)
l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1],
dtypes.float32)
# Check that without aliasing, push_back_batch still works; and
# that it doesn't modify the input.
l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate(
(l0_ret, l1_ret, l0_orig_ret, l1_orig_ret))
self.assertAllClose([1.0, 2.0, 3.0], l0_r_v)
self.assertAllClose([-1.0, 4.0], l1_r_v)
self.assertAllClose([1.0, 2.0], l0_orig_v)
self.assertAllClose([-1.0], l1_orig_v)
# Pushing back mismatched shapes fails.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, []))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible shape to a list at index 0"):
self.evaluate(
list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Invalid data type at index 0"):
self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4]))
def testZerosLike(self):
for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128,
dtypes.bool):
l_empty = list_ops.empty_tensor_list(
element_dtype=dtype, element_shape=[])
l_empty_zeros = array_ops.zeros_like(l_empty)
t_empty_zeros = list_ops.tensor_list_stack(
l_empty_zeros, element_dtype=dtype)
l_full = list_ops.tensor_list_push_back(l_empty,
math_ops.cast(0, dtype=dtype))
l_full = list_ops.tensor_list_push_back(l_full,
math_ops.cast(1, dtype=dtype))
l_full_zeros = array_ops.zeros_like(l_full)
t_full_zeros = list_ops.tensor_list_stack(
l_full_zeros, element_dtype=dtype)
self.assertAllEqual(self.evaluate(t_empty_zeros), [])
self.assertAllEqual(
self.evaluate(t_full_zeros), np.zeros(
(2,), dtype=dtype.as_numpy_dtype))
def testZerosLikeNested(self):
for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128,
dtypes.bool):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.variant, element_shape=[])
sub_l = list_ops.empty_tensor_list(element_dtype=dtype, element_shape=[])
l = list_ops.tensor_list_push_back(l, sub_l)
sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(
1, dtype=dtype))
l = list_ops.tensor_list_push_back(l, sub_l)
sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(
2, dtype=dtype))
l = list_ops.tensor_list_push_back(l, sub_l)
# l : [[],
# [1],
# [1, 2]]
#
# l_zeros : [[],
# [0],
# [0, 0]]
l_zeros = array_ops.zeros_like(l)
outputs = []
for _ in range(3):
l_zeros, out = list_ops.tensor_list_pop_back(
l_zeros, element_dtype=dtypes.variant)
outputs.append(list_ops.tensor_list_stack(out, element_dtype=dtype))
# Note: `outputs` contains popped values so the order is reversed.
self.assertAllEqual(self.evaluate(outputs[2]), [])
self.assertAllEqual(
self.evaluate(outputs[1]), np.zeros((1,), dtype=dtype.as_numpy_dtype))
self.assertAllEqual(
self.evaluate(outputs[0]), np.zeros((2,), dtype=dtype.as_numpy_dtype))
def testElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
shape = list_ops.tensor_list_element_shape(l, shape_type=dtypes.int32)
self.assertEqual(self.evaluate(shape), -1)
def testZerosLikeUninitialized(self):
l0 = list_ops.tensor_list_reserve([], 3, element_dtype=dtypes.float32)
l1 = list_ops.tensor_list_set_item(l0, 0, 1.) # [1., _, _]
zeros_1 = array_ops.zeros_like(l1) # [0., _, _]
l2 = list_ops.tensor_list_set_item(l1, 2, 2.) # [1., _, 2.]
zeros_2 = array_ops.zeros_like(l2) # [0., _, 0.]
# Gather indices with zeros in `zeros_1`.
res_1 = list_ops.tensor_list_gather(
zeros_1, [0], element_dtype=dtypes.float32)
# Gather indices with zeros in `zeros_2`.
res_2 = list_ops.tensor_list_gather(
zeros_2, [0, 2], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(res_1), [0.])
self.assertAllEqual(self.evaluate(res_2), [0., 0.])
@test_util.run_deprecated_v1
def testSkipEagerTensorListGetItemGradAggregation(self):
l = list_ops.tensor_list_reserve(
element_shape=[], num_elements=1, element_dtype=dtypes.float32)
x = constant_op.constant(1.0)
l = list_ops.tensor_list_set_item(l, 0, x)
l_read1 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
l_read2 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
grad = gradients_impl.gradients([l_read1, l_read2], [x])
with self.cached_session() as sess:
self.assertSequenceEqual(self.evaluate(grad), [2.])
@test_util.run_deprecated_v1
def testSkipEagerBuildElementShape(self):
fn = list_ops._build_element_shape
# Unknown shape -> -1.
self.assertEqual(fn(None), -1)
self.assertEqual(fn(tensor_shape.unknown_shape()), -1)
# Scalar shape -> [] with type int32.
self.assertEqual(fn([]).dtype, dtypes.int32)
self.assertEqual(fn(tensor_shape.scalar()).dtype, dtypes.int32)
self.assertAllEqual(self.evaluate(fn([])), np.array([], np.int32))
self.assertAllEqual(
self.evaluate(fn(tensor_shape.scalar())), np.array([], np.int32))
# Tensor -> Tensor
shape = constant_op.constant(1)
self.assertIs(fn(shape), shape)
# Shape with unknown dims -> shape list with -1's.
shape = [None, 5]
self.assertAllEqual(fn(shape), [-1, 5])
self.assertAllEqual(fn(tensor_shape.TensorShape(shape)), [-1, 5])
# Shape with unknown dims and tensor dims -> shape list with -1's and tensor
# dims.
t = array_ops.placeholder(dtypes.int32)
shape = [None, 5, t]
result = fn(shape)
self.assertAllEqual(result[:2], [-1, 5])
self.assertIs(result[2], t)
def testAddN(self):
l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])
l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
result = math_ops.add_n((l1, l2, l3))
result_t = list_ops.tensor_list_stack(result, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(result_t), [9., 12.])
def testAddNNestedList(self):
l1 = list_ops.tensor_list_from_tensor([1.0, 2.0], element_shape=[])
l2 = list_ops.tensor_list_from_tensor([3.0, 4.0], element_shape=[])
l3 = list_ops.tensor_list_from_tensor([5.0, 6.0], element_shape=[])
l4 = list_ops.tensor_list_from_tensor([7.0, 8.0], element_shape=[])
a = list_ops.empty_tensor_list(
element_dtype=dtypes.variant, element_shape=[])
a = list_ops.tensor_list_push_back(a, l1)
a = list_ops.tensor_list_push_back(a, l2)
b = list_ops.empty_tensor_list(
element_dtype=dtypes.variant, element_shape=[])
b = list_ops.tensor_list_push_back(b, l3)
b = list_ops.tensor_list_push_back(b, l4)
result = math_ops.add_n((a, b))
result_0 = list_ops.tensor_list_stack(
list_ops.tensor_list_get_item(result, 0, element_dtype=dtypes.variant),
element_dtype=dtypes.float32)
result_1 = list_ops.tensor_list_stack(
list_ops.tensor_list_get_item(result, 1, element_dtype=dtypes.variant),
element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(result_0), [6., 8.])
self.assertAllEqual(self.evaluate(result_1), [10., 12.])
@test_util.run_deprecated_v1
def testSkipEagerConcatShapeInference(self):
def BuildTensor(element_shape):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=element_shape)
return list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertIsNone(BuildTensor(None).shape.rank)
self.assertAllEqual(BuildTensor([None, 2, 3]).shape.as_list(), [None, 2, 3])
self.assertAllEqual(
BuildTensor([None, 2, None]).shape.as_list(), [None, 2, None])
self.assertAllEqual(BuildTensor([1, 2, 3]).shape.as_list(), [None, 2, 3])
def testConcatWithFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[2, 2])
l = list_ops.tensor_list_push_back(l, [[0., 1.], [2., 3.]])
l = list_ops.tensor_list_push_back(l, [[4., 5.], [6., 7.]])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(
self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.], [6., 7.]])
def testConcatWithNonFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[None, 2])
l = list_ops.tensor_list_push_back(l, [[0., 1.]])
l = list_ops.tensor_list_push_back(l, [[2., 3.], [4., 5.]])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [[0., 1.], [2., 3.], [4., 5.]])
def testConcatWithMismatchingTensorShapesFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
l = list_ops.tensor_list_push_back(l, [[0., 1.]])
l = list_ops.tensor_list_push_back(l, [[2.], [4.]])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Tried to concat tensors with unequal shapes: "
r"\[2\] vs \[1\]"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatEmptyListWithFullyDefinedElementShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[5, 2])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 2))
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[None, 2])
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t).shape, (0, 2))
def testConcatEmptyListWithUnknownElementShapeFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"All except the first dimension must be fully"
" defined when concating an empty tensor list"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatEmptyListWithPartiallyDefinedElementShapeFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=[2, None])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"All except the first dimension must be fully"
" defined when concating an empty tensor list"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatListWithScalarElementShapeFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=tensor_shape.scalar())
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"Concat requires elements to be at least vectors, "
"found scalars instead"):
t = list_ops.tensor_list_concat(l, element_dtype=dtypes.float32)
self.evaluate(t)
def testConcatListWithScalarElementsFails(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=None)
l1 = list_ops.tensor_list_push_back(l, 1.)
with self.assertRaisesRegexp(
errors.InvalidArgumentError, "Concat saw a scalar shape at index 0"
" but requires at least vectors"):
t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)
self.evaluate(t)
l1 = list_ops.tensor_list_push_back(l, [1.])
l1 = list_ops.tensor_list_push_back(l1, 2.)
with self.assertRaisesRegexp(
errors.InvalidArgumentError, "Concat saw a scalar shape at index 1"
" but requires at least vectors"):
t = list_ops.tensor_list_concat(l1, element_dtype=dtypes.float32)
self.evaluate(t)
def testEvenSplit(self):
def RunTest(input_tensor, lengths, expected_stacked_output):
l = list_ops.tensor_list_split(
input_tensor, element_shape=None, lengths=lengths)
self.assertAllEqual(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32),
expected_stacked_output)
RunTest([1., 2., 3.], [1, 1, 1], [[1.], [2.], [3.]])
RunTest([1., 2., 3., 4.], [2, 2], [[1., 2.], [3., 4.]])
RunTest([[1., 2.], [3., 4.]], [1, 1], [[[1., 2.]], [[3., 4.]]])
def testUnevenSplit(self):
l = list_ops.tensor_list_split([1., 2., 3., 4., 5],
element_shape=None,
lengths=[3, 2])
self.assertAllEqual(list_ops.tensor_list_length(l), 2)
self.assertAllEqual(
list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32),
[1., 2., 3.])
self.assertAllEqual(
list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32),
[4., 5.])
@test_util.run_deprecated_v1
def testSkipEagerSplitWithInvalidTensorShapeFails(self):
with self.cached_session():
tensor = array_ops.placeholder(dtype=dtypes.float32)
l = list_ops.tensor_list_split(tensor, element_shape=None, lengths=[1])
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Tensor must be at least a vector, but saw shape: \[\]"):
l.eval({tensor: 1})
@test_util.run_deprecated_v1
def testSkipEagerSplitWithInvalidLengthsShapeFails(self):
with self.cached_session():
lengths = array_ops.placeholder(dtype=dtypes.int64)
l = list_ops.tensor_list_split([1., 2.],
element_shape=None,
lengths=lengths)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Expected lengths to be a vector, received shape: \[\]"):
l.eval({lengths: 1})
def testSplitWithInvalidLengthsFails(self):
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"Invalid value in lengths: -1"):
l = list_ops.tensor_list_split([1., 2.],
element_shape=None,
lengths=[1, -1])
self.evaluate(l)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Attempting to slice \[0, 3\] from tensor with length 2"):
l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[3])
self.evaluate(l)
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"Unused values in tensor. Length of tensor: 2 Values used: 1"):
l = list_ops.tensor_list_split([1., 2.], element_shape=None, lengths=[1])
self.evaluate(l)
@test_util.run_deprecated_v1
def testSkipEagerSplitWithScalarElementShapeFails(self):
with self.assertRaisesRegexp(ValueError,
r"Shapes must be equal rank, but are 1 and 0"):
l = list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1])
with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"TensorListSplit requires element_shape to be at least of rank 1, "
r"but saw: \[\]"):
element_shape = array_ops.placeholder(dtype=dtypes.int32)
l = list_ops.tensor_list_split([1., 2.],
element_shape=element_shape,
lengths=[1, 1])
l.eval({element_shape: []})
def testEagerOnlySplitWithScalarElementShapeFails(self):
if context.executing_eagerly():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"TensorListSplit requires element_shape to be at least of rank 1, "
r"but saw: \[\]"):
list_ops.tensor_list_split([1., 2.], element_shape=[], lengths=[1, 1])
@test_util.run_deprecated_v1
def testSkipEagerSplitWithIncompatibleTensorShapeAndElementShapeFails(self):
with self.assertRaisesRegexp(ValueError,
r"Shapes must be equal rank, but are 2 and 1"):
l = list_ops.tensor_list_split([[1.], [2.]],
element_shape=[1],
lengths=[1, 1])
with self.cached_session():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"):
element_shape = array_ops.placeholder(dtype=dtypes.int32)
l = list_ops.tensor_list_split([[1.], [2.]],
element_shape=element_shape,
lengths=[1, 1])
l.eval({element_shape: [1]})
def testEagerOnlySplitWithIncompatibleTensorShapeAndElementShapeFails(self):
if context.executing_eagerly():
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
r"tensor shape \[2,1\] is not compatible with element_shape \[1\]"):
list_ops.tensor_list_split([[1.], [2.]],
element_shape=[1],
lengths=[1, 1])
def testResizeGrow(self):
l = list_ops.tensor_list_from_tensor([1., 2.], element_shape=[])
l = list_ops.tensor_list_resize(l, 4)
self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 4)
self.assertEqual(
self.evaluate(
list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)),
1.)
self.assertEqual(
self.evaluate(
list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)),
2.)
def testResizeShrink(self):
l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[])
l = list_ops.tensor_list_resize(l, 2)
self.assertEqual(self.evaluate(list_ops.tensor_list_length(l)), 2)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)),
[1., 2.])
def testResizeWithInvalidSizeFails(self):
with self.assertRaisesRegexp(
errors.InvalidArgumentError,
"TensorListSlice expects size to be non-negative"):
l = list_ops.tensor_list_from_tensor([1., 2., 3.], element_shape=[])
l = list_ops.tensor_list_resize(l, -1)
self.evaluate(l)
@test_util.run_deprecated_v1
@test_util.enable_control_flow_v2
def testSkipEagerResizeGrad(self):
t = constant_op.constant([1., 2., 3.])
l = list_ops.tensor_list_from_tensor(t, element_shape=[])
l = list_ops.tensor_list_set_item(
l, 3, 4., resize_if_index_out_of_bounds=True)
t1 = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
grad = gradients_impl.gradients(t1, t)[0]
self.assertAllEqual(self.evaluate(grad), [1., 1., 1.])
if __name__ == "__main__":
test.main()
| {
"content_hash": "1546be37689a88946527c48a210267db",
"timestamp": "",
"source": "github",
"line_count": 1394,
"max_line_length": 80,
"avg_line_length": 44.21520803443329,
"alnum_prop": 0.6405347524174184,
"repo_name": "apark263/tensorflow",
"id": "3f340aa07d65d014e5f9c117c8ded3779e62aa4e",
"size": "62325",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/list_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "561314"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "54581021"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "39024"
},
{
"name": "Go",
"bytes": "1373561"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "899393"
},
{
"name": "Jupyter Notebook",
"bytes": "2618454"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "75994"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14340"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "44616385"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "504099"
},
{
"name": "Smarty",
"bytes": "10072"
}
],
"symlink_target": ""
} |
"""Provides device triggers for Z-Wave JS."""
from __future__ import annotations
from typing import Any
import voluptuous as vol
from zwave_js_server.const import CommandClass
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event, state
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import (
config_validation as cv,
device_registry,
entity_registry,
)
from homeassistant.helpers.trigger import TriggerActionType, TriggerInfo
from homeassistant.helpers.typing import ConfigType
from . import trigger
from .config_validation import VALUE_SCHEMA
from .const import (
ATTR_COMMAND_CLASS,
ATTR_DATA_TYPE,
ATTR_ENDPOINT,
ATTR_EVENT,
ATTR_EVENT_LABEL,
ATTR_EVENT_TYPE,
ATTR_LABEL,
ATTR_PROPERTY,
ATTR_PROPERTY_KEY,
ATTR_TYPE,
ATTR_VALUE,
ATTR_VALUE_RAW,
DOMAIN,
ZWAVE_JS_NOTIFICATION_EVENT,
ZWAVE_JS_VALUE_NOTIFICATION_EVENT,
)
from .device_automation_helpers import (
CONF_SUBTYPE,
NODE_STATUSES,
async_bypass_dynamic_config_validation,
generate_config_parameter_subtype,
)
from .helpers import (
async_get_node_from_device_id,
async_get_node_status_sensor_entity_id,
check_type_schema_map,
copy_available_params,
get_value_state_schema,
get_zwave_value_from_config,
remove_keys_with_empty_values,
)
from .triggers.value_updated import (
ATTR_FROM,
ATTR_TO,
PLATFORM_TYPE as VALUE_UPDATED_PLATFORM_TYPE,
)
# Trigger types
ENTRY_CONTROL_NOTIFICATION = "event.notification.entry_control"
NOTIFICATION_NOTIFICATION = "event.notification.notification"
BASIC_VALUE_NOTIFICATION = "event.value_notification.basic"
CENTRAL_SCENE_VALUE_NOTIFICATION = "event.value_notification.central_scene"
SCENE_ACTIVATION_VALUE_NOTIFICATION = "event.value_notification.scene_activation"
CONFIG_PARAMETER_VALUE_UPDATED = f"{VALUE_UPDATED_PLATFORM_TYPE}.config_parameter"
VALUE_VALUE_UPDATED = f"{VALUE_UPDATED_PLATFORM_TYPE}.value"
NODE_STATUS = "state.node_status"
NOTIFICATION_EVENT_CC_MAPPINGS = (
(ENTRY_CONTROL_NOTIFICATION, CommandClass.ENTRY_CONTROL),
(NOTIFICATION_NOTIFICATION, CommandClass.NOTIFICATION),
)
# Event based trigger schemas
BASE_EVENT_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(ATTR_COMMAND_CLASS): vol.In([cc.value for cc in CommandClass]),
}
)
NOTIFICATION_NOTIFICATION_SCHEMA = BASE_EVENT_SCHEMA.extend(
{
vol.Required(CONF_TYPE): NOTIFICATION_NOTIFICATION,
vol.Optional(f"{ATTR_TYPE}."): vol.Coerce(int),
vol.Optional(ATTR_LABEL): cv.string,
vol.Optional(ATTR_EVENT): vol.Coerce(int),
vol.Optional(ATTR_EVENT_LABEL): cv.string,
}
)
ENTRY_CONTROL_NOTIFICATION_SCHEMA = BASE_EVENT_SCHEMA.extend(
{
vol.Required(CONF_TYPE): ENTRY_CONTROL_NOTIFICATION,
vol.Optional(ATTR_EVENT_TYPE): vol.Coerce(int),
vol.Optional(ATTR_DATA_TYPE): vol.Coerce(int),
}
)
BASE_VALUE_NOTIFICATION_EVENT_SCHEMA = BASE_EVENT_SCHEMA.extend(
{
vol.Required(ATTR_PROPERTY): vol.Any(int, str),
vol.Optional(ATTR_PROPERTY_KEY): vol.Any(int, str),
vol.Required(ATTR_ENDPOINT): vol.Coerce(int),
vol.Optional(ATTR_VALUE): vol.Coerce(int),
vol.Required(CONF_SUBTYPE): cv.string,
}
)
BASIC_VALUE_NOTIFICATION_SCHEMA = BASE_VALUE_NOTIFICATION_EVENT_SCHEMA.extend(
{
vol.Required(CONF_TYPE): BASIC_VALUE_NOTIFICATION,
}
)
CENTRAL_SCENE_VALUE_NOTIFICATION_SCHEMA = BASE_VALUE_NOTIFICATION_EVENT_SCHEMA.extend(
{
vol.Required(CONF_TYPE): CENTRAL_SCENE_VALUE_NOTIFICATION,
}
)
SCENE_ACTIVATION_VALUE_NOTIFICATION_SCHEMA = (
BASE_VALUE_NOTIFICATION_EVENT_SCHEMA.extend(
{
vol.Required(CONF_TYPE): SCENE_ACTIVATION_VALUE_NOTIFICATION,
}
)
)
# State based trigger schemas
BASE_STATE_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
}
)
NODE_STATUS_SCHEMA = BASE_STATE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): NODE_STATUS,
vol.Optional(state.CONF_FROM): vol.In(NODE_STATUSES),
vol.Optional(state.CONF_TO): vol.In(NODE_STATUSES),
vol.Optional(state.CONF_FOR): cv.positive_time_period_dict,
}
)
# zwave_js.value_updated based trigger schemas
BASE_VALUE_UPDATED_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(ATTR_COMMAND_CLASS): vol.In([cc.value for cc in CommandClass]),
vol.Required(ATTR_PROPERTY): vol.Any(int, str),
vol.Optional(ATTR_PROPERTY_KEY): vol.Any(None, vol.Coerce(int), str),
vol.Optional(ATTR_ENDPOINT): vol.Any(None, vol.Coerce(int)),
vol.Optional(ATTR_FROM): VALUE_SCHEMA,
vol.Optional(ATTR_TO): VALUE_SCHEMA,
}
)
CONFIG_PARAMETER_VALUE_UPDATED_SCHEMA = BASE_VALUE_UPDATED_SCHEMA.extend(
{
vol.Required(CONF_TYPE): CONFIG_PARAMETER_VALUE_UPDATED,
vol.Required(CONF_SUBTYPE): cv.string,
}
)
VALUE_VALUE_UPDATED_SCHEMA = BASE_VALUE_UPDATED_SCHEMA.extend(
{
vol.Required(CONF_TYPE): VALUE_VALUE_UPDATED,
}
)
TYPE_SCHEMA_MAP = {
ENTRY_CONTROL_NOTIFICATION: ENTRY_CONTROL_NOTIFICATION_SCHEMA,
NOTIFICATION_NOTIFICATION: NOTIFICATION_NOTIFICATION_SCHEMA,
BASIC_VALUE_NOTIFICATION: BASIC_VALUE_NOTIFICATION_SCHEMA,
CENTRAL_SCENE_VALUE_NOTIFICATION: CENTRAL_SCENE_VALUE_NOTIFICATION_SCHEMA,
SCENE_ACTIVATION_VALUE_NOTIFICATION: SCENE_ACTIVATION_VALUE_NOTIFICATION_SCHEMA,
CONFIG_PARAMETER_VALUE_UPDATED: CONFIG_PARAMETER_VALUE_UPDATED_SCHEMA,
VALUE_VALUE_UPDATED: VALUE_VALUE_UPDATED_SCHEMA,
NODE_STATUS: NODE_STATUS_SCHEMA,
}
TRIGGER_TYPE_SCHEMA = vol.Schema(
{vol.Required(CONF_TYPE): vol.In(TYPE_SCHEMA_MAP)}, extra=vol.ALLOW_EXTRA
)
TRIGGER_SCHEMA = vol.All(
remove_keys_with_empty_values,
TRIGGER_TYPE_SCHEMA,
check_type_schema_map(TYPE_SCHEMA_MAP),
)
async def async_validate_trigger_config(
hass: HomeAssistant, config: ConfigType
) -> ConfigType:
"""Validate config."""
config = TRIGGER_SCHEMA(config)
# We return early if the config entry for this device is not ready because we can't
# validate the value without knowing the state of the device
try:
bypass_dynamic_config_validation = async_bypass_dynamic_config_validation(
hass, config[CONF_DEVICE_ID]
)
except ValueError as err:
raise InvalidDeviceAutomationConfig(
f"Device {config[CONF_DEVICE_ID]} not found"
) from err
if bypass_dynamic_config_validation:
return config
trigger_type = config[CONF_TYPE]
if get_trigger_platform_from_type(trigger_type) == VALUE_UPDATED_PLATFORM_TYPE:
try:
node = async_get_node_from_device_id(hass, config[CONF_DEVICE_ID])
get_zwave_value_from_config(node, config)
except vol.Invalid as err:
raise InvalidDeviceAutomationConfig(err.msg) from err
return config
def get_trigger_platform_from_type(trigger_type: str) -> str:
"""Get trigger platform from Z-Wave JS trigger type."""
trigger_split = trigger_type.split(".")
# Our convention for trigger types is to have the trigger type at the beginning
# delimited by a `.`. For zwave_js triggers, there is a `.` in the name
if (trigger_platform := trigger_split[0]) == DOMAIN:
return ".".join(trigger_split[:2])
return trigger_platform
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, Any]]:
"""List device triggers for Z-Wave JS devices."""
dev_reg = device_registry.async_get(hass)
node = async_get_node_from_device_id(hass, device_id, dev_reg)
triggers: list[dict] = []
base_trigger = {
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
}
# We can add a node status trigger if the node status sensor is enabled
ent_reg = entity_registry.async_get(hass)
entity_id = async_get_node_status_sensor_entity_id(
hass, device_id, ent_reg, dev_reg
)
if (
entity_id
and (entity := ent_reg.async_get(entity_id)) is not None
and not entity.disabled
):
triggers.append(
{**base_trigger, CONF_TYPE: NODE_STATUS, CONF_ENTITY_ID: entity_id}
)
# Handle notification event triggers
triggers.extend(
[
{**base_trigger, CONF_TYPE: event_type, ATTR_COMMAND_CLASS: command_class}
for event_type, command_class in NOTIFICATION_EVENT_CC_MAPPINGS
if any(cc.id == command_class for cc in node.command_classes)
]
)
# Handle central scene value notification event triggers
triggers.extend(
[
{
**base_trigger,
CONF_TYPE: CENTRAL_SCENE_VALUE_NOTIFICATION,
ATTR_PROPERTY: value.property_,
ATTR_PROPERTY_KEY: value.property_key,
ATTR_ENDPOINT: value.endpoint,
ATTR_COMMAND_CLASS: CommandClass.CENTRAL_SCENE,
CONF_SUBTYPE: f"Endpoint {value.endpoint} Scene {value.property_key}",
}
for value in node.get_command_class_values(
CommandClass.CENTRAL_SCENE
).values()
if value.property_ == "scene"
]
)
# Handle scene activation value notification event triggers
triggers.extend(
[
{
**base_trigger,
CONF_TYPE: SCENE_ACTIVATION_VALUE_NOTIFICATION,
ATTR_PROPERTY: value.property_,
ATTR_PROPERTY_KEY: value.property_key,
ATTR_ENDPOINT: value.endpoint,
ATTR_COMMAND_CLASS: CommandClass.SCENE_ACTIVATION,
CONF_SUBTYPE: f"Endpoint {value.endpoint}",
}
for value in node.get_command_class_values(
CommandClass.SCENE_ACTIVATION
).values()
if value.property_ == "sceneId"
]
)
# Handle basic value notification event triggers
# Nodes will only send Basic CC value notifications if a compatibility flag is set
if node.device_config.compat.get("treatBasicSetAsEvent", False):
triggers.extend(
[
{
**base_trigger,
CONF_TYPE: BASIC_VALUE_NOTIFICATION,
ATTR_PROPERTY: value.property_,
ATTR_PROPERTY_KEY: value.property_key,
ATTR_ENDPOINT: value.endpoint,
ATTR_COMMAND_CLASS: CommandClass.BASIC,
CONF_SUBTYPE: f"Endpoint {value.endpoint}",
}
for value in node.get_command_class_values(CommandClass.BASIC).values()
if value.property_ == "event"
]
)
# Generic value update event trigger
triggers.append({**base_trigger, CONF_TYPE: VALUE_VALUE_UPDATED})
# Config parameter value update event triggers
triggers.extend(
[
{
**base_trigger,
CONF_TYPE: CONFIG_PARAMETER_VALUE_UPDATED,
ATTR_PROPERTY: config_value.property_,
ATTR_PROPERTY_KEY: config_value.property_key,
ATTR_ENDPOINT: config_value.endpoint,
ATTR_COMMAND_CLASS: config_value.command_class,
CONF_SUBTYPE: generate_config_parameter_subtype(config_value),
}
for config_value in node.get_configuration_values().values()
]
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: TriggerActionType,
trigger_info: TriggerInfo,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
trigger_type = config[CONF_TYPE]
trigger_platform = get_trigger_platform_from_type(trigger_type)
# Take input data from automation trigger UI and add it to the trigger we are
# attaching to
if trigger_platform == "event":
event_data = {CONF_DEVICE_ID: config[CONF_DEVICE_ID]}
event_config = {
event.CONF_PLATFORM: "event",
event.CONF_EVENT_DATA: event_data,
}
if ATTR_COMMAND_CLASS in config:
event_data[ATTR_COMMAND_CLASS] = config[ATTR_COMMAND_CLASS]
if trigger_type == ENTRY_CONTROL_NOTIFICATION:
event_config[event.CONF_EVENT_TYPE] = ZWAVE_JS_NOTIFICATION_EVENT
copy_available_params(config, event_data, [ATTR_EVENT_TYPE, ATTR_DATA_TYPE])
elif trigger_type == NOTIFICATION_NOTIFICATION:
event_config[event.CONF_EVENT_TYPE] = ZWAVE_JS_NOTIFICATION_EVENT
copy_available_params(
config, event_data, [ATTR_LABEL, ATTR_EVENT_LABEL, ATTR_EVENT]
)
if (val := config.get(f"{ATTR_TYPE}.")) not in ("", None):
event_data[ATTR_TYPE] = val
elif trigger_type in (
BASIC_VALUE_NOTIFICATION,
CENTRAL_SCENE_VALUE_NOTIFICATION,
SCENE_ACTIVATION_VALUE_NOTIFICATION,
):
event_config[event.CONF_EVENT_TYPE] = ZWAVE_JS_VALUE_NOTIFICATION_EVENT
copy_available_params(
config, event_data, [ATTR_PROPERTY, ATTR_PROPERTY_KEY, ATTR_ENDPOINT]
)
if ATTR_VALUE in config:
event_data[ATTR_VALUE_RAW] = config[ATTR_VALUE]
else:
raise HomeAssistantError(f"Unhandled trigger type {trigger_type}")
event_config = event.TRIGGER_SCHEMA(event_config)
return await event.async_attach_trigger(
hass, event_config, action, trigger_info, platform_type="device"
)
if trigger_platform == "state":
if trigger_type == NODE_STATUS:
state_config = {state.CONF_PLATFORM: "state"}
state_config[state.CONF_ENTITY_ID] = config[CONF_ENTITY_ID]
copy_available_params(
config, state_config, [state.CONF_FOR, state.CONF_FROM, state.CONF_TO]
)
else:
raise HomeAssistantError(f"Unhandled trigger type {trigger_type}")
state_config = await state.async_validate_trigger_config(hass, state_config)
return await state.async_attach_trigger(
hass, state_config, action, trigger_info, platform_type="device"
)
if trigger_platform == VALUE_UPDATED_PLATFORM_TYPE:
zwave_js_config = {
state.CONF_PLATFORM: trigger_platform,
CONF_DEVICE_ID: config[CONF_DEVICE_ID],
}
copy_available_params(
config,
zwave_js_config,
[
ATTR_COMMAND_CLASS,
ATTR_PROPERTY,
ATTR_PROPERTY_KEY,
ATTR_ENDPOINT,
ATTR_FROM,
ATTR_TO,
],
)
zwave_js_config = await trigger.async_validate_trigger_config(
hass, zwave_js_config
)
return await trigger.async_attach_trigger(
hass, zwave_js_config, action, trigger_info
)
raise HomeAssistantError(f"Unhandled trigger type {trigger_type}")
async def async_get_trigger_capabilities(
hass: HomeAssistant, config: ConfigType
) -> dict[str, vol.Schema]:
"""List trigger capabilities."""
trigger_type = config[CONF_TYPE]
node = async_get_node_from_device_id(hass, config[CONF_DEVICE_ID])
# Add additional fields to the automation trigger UI
if trigger_type == NOTIFICATION_NOTIFICATION:
return {
"extra_fields": vol.Schema(
{
vol.Optional(f"{ATTR_TYPE}."): cv.string,
vol.Optional(ATTR_LABEL): cv.string,
vol.Optional(ATTR_EVENT): cv.string,
vol.Optional(ATTR_EVENT_LABEL): cv.string,
}
)
}
if trigger_type == ENTRY_CONTROL_NOTIFICATION:
return {
"extra_fields": vol.Schema(
{
vol.Optional(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_DATA_TYPE): cv.string,
}
)
}
if trigger_type == NODE_STATUS:
return {
"extra_fields": vol.Schema(
{
vol.Optional(state.CONF_FROM): vol.In(NODE_STATUSES),
vol.Optional(state.CONF_TO): vol.In(NODE_STATUSES),
vol.Optional(state.CONF_FOR): cv.positive_time_period_dict,
}
)
}
if trigger_type in (
BASIC_VALUE_NOTIFICATION,
CENTRAL_SCENE_VALUE_NOTIFICATION,
SCENE_ACTIVATION_VALUE_NOTIFICATION,
):
value_schema = get_value_state_schema(get_zwave_value_from_config(node, config))
# We should never get here, but just in case we should add a guard
if not value_schema:
return {}
return {"extra_fields": vol.Schema({vol.Optional(ATTR_VALUE): value_schema})}
if trigger_type == CONFIG_PARAMETER_VALUE_UPDATED:
value_schema = get_value_state_schema(get_zwave_value_from_config(node, config))
if not value_schema:
return {}
return {
"extra_fields": vol.Schema(
{
vol.Optional(ATTR_FROM): value_schema,
vol.Optional(ATTR_TO): value_schema,
}
)
}
if trigger_type == VALUE_VALUE_UPDATED:
# Only show command classes on this node and exclude Configuration CC since it
# is already covered
return {
"extra_fields": vol.Schema(
{
vol.Required(ATTR_COMMAND_CLASS): vol.In(
{
CommandClass(cc.id).value: cc.name
for cc in sorted(
node.command_classes, key=lambda cc: cc.name
)
if cc.id != CommandClass.CONFIGURATION
}
),
vol.Required(ATTR_PROPERTY): cv.string,
vol.Optional(ATTR_PROPERTY_KEY): cv.string,
vol.Optional(ATTR_ENDPOINT): cv.string,
vol.Optional(ATTR_FROM): cv.string,
vol.Optional(ATTR_TO): cv.string,
}
)
}
return {}
| {
"content_hash": "80d6558c4d6570fb7d1fe60d9ee2072a",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 88,
"avg_line_length": 34.31147540983606,
"alnum_prop": 0.6162340075383553,
"repo_name": "mezz64/home-assistant",
"id": "76a7f134d17a5dfdd4e6d0ccd074acfe73706da8",
"size": "18837",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zwave_js/device_trigger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
import os
import sys
here = lambda *a: os.path.join(os.path.dirname(__file__), *a)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
readme = open(here('README.md')).read()
history = open(here('HISTORY.rst')).read().replace('.. :changelog:', '')
requirements = [x.strip() for x in open(here('requirements.txt')).readlines()]
setup(
name='ouimeaux',
version='0.8.2',
description='Open source control for Belkin WeMo devices',
long_description=readme + '\n\n' + history,
author='Ian McCracken',
author_email='ian.mccracken@gmail.com',
url='https://github.com/iancmcc/ouimeaux',
packages=[
'ouimeaux',
],
package_dir={'ouimeaux': 'ouimeaux'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='ouimeaux',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Topic :: Home Automation',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
entry_points={
'console_scripts': [
'wemo = ouimeaux.cli:wemo'
]
},
# added CORS as dependency
extras_require = {
'server': [
"flask-restful",
"flask-basicauth",
"gevent-socketio",
"flask-cors",
],
},
test_suite='tests',
)
| {
"content_hash": "080f66e4712e444ba57c9b0521e05826",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 78,
"avg_line_length": 28.46969696969697,
"alnum_prop": 0.5816923895689197,
"repo_name": "iancmcc/ouimeaux",
"id": "d3ef8b02366a3bb8662b7d98a7f47958b9c840b5",
"size": "1926",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "387"
},
{
"name": "HTML",
"bytes": "3772"
},
{
"name": "JavaScript",
"bytes": "103426"
},
{
"name": "Makefile",
"bytes": "1186"
},
{
"name": "Python",
"bytes": "230246"
}
],
"symlink_target": ""
} |
"""
This module contains helper functions for controlling caching. It does so by
managing the "Vary" header of responses. It includes functions to patch the
header of response objects directly and decorators that change functions to do
that header-patching themselves.
For information on the Vary header, see:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.44
Essentially, the "Vary" HTTP header defines which headers a cache should take
into account when building its cache key. Requests with the same path but
different header content for headers named in "Vary" need to get different
cache keys to prevent delivery of wrong content.
An example: i18n middleware would need to distinguish caches by the
"Accept-language" header.
"""
import re
import time
from airy.core.conf import settings
from airy.core.cache import get_cache
from airy.utils.encoding import smart_str, iri_to_uri
from airy.utils.http import http_date
from airy.utils.hashcompat import md5_constructor
from airy.utils.translation import get_language
from airy.http import HttpRequest
cc_delim_re = re.compile(r'\s*,\s*')
def patch_cache_control(response, **kwargs):
"""
This function patches the Cache-Control header by adding all
keyword arguments to it. The transformation is as follows:
* All keyword parameter names are turned to lowercase, and underscores
are converted to hyphens.
* If the value of a parameter is True (exactly True, not just a
true value), only the parameter name is added to the header.
* All other parameters are added with their value, after applying
str() to it.
"""
def dictitem(s):
t = s.split('=', 1)
if len(t) > 1:
return (t[0].lower(), t[1])
else:
return (t[0].lower(), True)
def dictvalue(t):
if t[1] is True:
return t[0]
else:
return t[0] + '=' + smart_str(t[1])
if response.has_header('Cache-Control'):
cc = cc_delim_re.split(response['Cache-Control'])
cc = dict([dictitem(el) for el in cc])
else:
cc = {}
# If there's already a max-age header but we're being asked to set a new
# max-age, use the minimum of the two ages. In practice this happens when
# a decorator and a piece of middleware both operate on a given view.
if 'max-age' in cc and 'max_age' in kwargs:
kwargs['max_age'] = min(cc['max-age'], kwargs['max_age'])
# Allow overriding private caching and vice versa
if 'private' in cc and 'public' in kwargs:
del cc['private']
elif 'public' in cc and 'private' in kwargs:
del cc['public']
for (k, v) in kwargs.items():
cc[k.replace('_', '-')] = v
cc = ', '.join([dictvalue(el) for el in cc.items()])
response['Cache-Control'] = cc
def get_max_age(response):
"""
Returns the max-age from the response Cache-Control header as an integer
(or ``None`` if it wasn't found or wasn't an integer.
"""
if not response.has_header('Cache-Control'):
return
cc = dict([_to_tuple(el) for el in
cc_delim_re.split(response['Cache-Control'])])
if 'max-age' in cc:
try:
return int(cc['max-age'])
except (ValueError, TypeError):
pass
def patch_response_headers(response, cache_timeout=None):
"""
Adds some useful headers to the given HttpResponse object:
ETag, Last-Modified, Expires and Cache-Control
Each header is only added if it isn't already set.
cache_timeout is in seconds. The CACHE_MIDDLEWARE_SECONDS setting is used
by default.
"""
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
if cache_timeout < 0:
cache_timeout = 0 # Can't have max-age negative
if settings.USE_ETAGS and not response.has_header('ETag'):
response['ETag'] = '"%s"' % md5_constructor(response.content).hexdigest()
if not response.has_header('Last-Modified'):
response['Last-Modified'] = http_date()
if not response.has_header('Expires'):
response['Expires'] = http_date(time.time() + cache_timeout)
patch_cache_control(response, max_age=cache_timeout)
def add_never_cache_headers(response):
"""
Adds headers to a response to indicate that a page should never be cached.
"""
patch_response_headers(response, cache_timeout=-1)
def patch_vary_headers(response, newheaders):
"""
Adds (or updates) the "Vary" header in the given HttpResponse object.
newheaders is a list of header names that should be in "Vary". Existing
headers in "Vary" aren't removed.
"""
# Note that we need to keep the original order intact, because cache
# implementations may rely on the order of the Vary contents in, say,
# computing an MD5 hash.
if response.has_header('Vary'):
vary_headers = cc_delim_re.split(response['Vary'])
else:
vary_headers = []
# Use .lower() here so we treat headers as case-insensitive.
existing_headers = set([header.lower() for header in vary_headers])
additional_headers = [newheader for newheader in newheaders
if newheader.lower() not in existing_headers]
response['Vary'] = ', '.join(vary_headers + additional_headers)
def has_vary_header(response, header_query):
"""
Checks to see if the response has a given header name in its Vary header.
"""
if not response.has_header('Vary'):
return False
vary_headers = cc_delim_re.split(response['Vary'])
existing_headers = set([header.lower() for header in vary_headers])
return header_query.lower() in existing_headers
def _i18n_cache_key_suffix(request, cache_key):
"""If enabled, returns the cache key ending with a locale."""
if settings.USE_I18N:
# first check if LocaleMiddleware or another middleware added
# LANGUAGE_CODE to request, then fall back to the active language
# which in turn can also fall back to settings.LANGUAGE_CODE
cache_key += '.%s' % getattr(request, 'LANGUAGE_CODE', get_language())
return cache_key
def _generate_cache_key(request, method, headerlist, key_prefix):
"""Returns a cache key from the headers given in the header list."""
ctx = md5_constructor()
for header in headerlist:
value = request.META.get(header, None)
if value is not None:
ctx.update(value)
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, request.method, path.hexdigest(), ctx.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def _generate_cache_header_key(key_prefix, request):
"""Returns a cache key for the header cache."""
path = md5_constructor(iri_to_uri(request.get_full_path()))
cache_key = 'views.decorators.cache.cache_header.%s.%s' % (
key_prefix, path.hexdigest())
return _i18n_cache_key_suffix(request, cache_key)
def get_cache_key(request, key_prefix=None, method='GET', cache=None):
"""
Returns a cache key based on the request path and query. It can be used
in the request phase because it pulls the list of headers to take into
account from the global path registry and uses those to build a cache key
to check against.
If there is no headerlist stored, the page needs to be rebuilt, so this
function returns None.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
headerlist = cache.get(cache_key, None)
if headerlist is not None:
return _generate_cache_key(request, method, headerlist, key_prefix)
else:
return None
def learn_cache_key(request, response, cache_timeout=None, key_prefix=None, cache=None):
"""
Learns what headers to take into account for some request path from the
response object. It stores those headers in a global path registry so that
later access to that path will know what headers to take into account
without building the response object itself. The headers are named in the
Vary header of the response, but we want to prevent response generation.
The list of headers to use for cache key generation is stored in the same
cache as the pages themselves. If the cache ages some data out of the
cache, this just means that we have to build the response once to get at
the Vary header and so at the list of headers to use for the cache key.
"""
if key_prefix is None:
key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
if cache_timeout is None:
cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS
cache_key = _generate_cache_header_key(key_prefix, request)
if cache is None:
cache = get_cache(settings.CACHE_MIDDLEWARE_ALIAS)
if response.has_header('Vary'):
headerlist = ['HTTP_'+header.upper().replace('-', '_')
for header in cc_delim_re.split(response['Vary'])]
cache.set(cache_key, headerlist, cache_timeout)
return _generate_cache_key(request, request.method, headerlist, key_prefix)
else:
# if there is no Vary header, we still need a cache key
# for the request.get_full_path()
cache.set(cache_key, [], cache_timeout)
return _generate_cache_key(request, request.method, [], key_prefix)
def _to_tuple(s):
t = s.split('=',1)
if len(t) == 2:
return t[0].lower(), t[1]
return t[0].lower(), True
| {
"content_hash": "29d0e1c7bb1c5dca17c34eeb7ba0a72b",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 88,
"avg_line_length": 40.65546218487395,
"alnum_prop": 0.6710417527904092,
"repo_name": "letolab/airy",
"id": "8049e4d22eb774149c2f10ab8d32f94f61af5653",
"size": "9676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airy/utils/cache.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "115012"
},
{
"name": "Python",
"bytes": "678842"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
} |
r"""Simple transfer learning with Inception v3 or Mobilenet models.
With support for TensorBoard.
This example shows how to take a Inception v3 or Mobilenet model trained on
ImageNet images, and train a new top layer that can recognize other classes of
images.
The top layer receives as input a 2048-dimensional vector (1001-dimensional for
Mobilenet) for each image. We train a softmax layer on top of this
representation. Assuming the softmax layer contains N labels, this corresponds
to learning N + 2048*N (or 1001*N) model parameters corresponding to the
learned biases and weights.
Here's an example, which assumes you have a folder containing class-named
subfolders, each full of images for each label. The example folder flower_photos
should have a structure like this:
~/flower_photos/daisy/photo1.jpg
~/flower_photos/daisy/photo2.jpg
...
~/flower_photos/rose/anotherphoto77.jpg
...
~/flower_photos/sunflower/somepicture.jpg
The subfolder names are important, since they define what label is applied to
each image, but the filenames themselves don't matter. Once your images are
prepared, you can run the training with a command like this:
```bash
bazel build tensorflow/examples/image_retraining:retrain && \
bazel-bin/tensorflow/examples/image_retraining/retrain \
--image_dir ~/flower_photos
```
Or, if you have a pip installation of tensorflow, `retrain.py` can be run
without bazel:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos
```
You can replace the image_dir argument with any folder containing subfolders of
images. The label for each image is taken from the name of the subfolder it's
in.
This produces a new model file that can be loaded and run by any TensorFlow
program, for example the label_image sample code.
By default this script will use the high accuracy, but comparatively large and
slow Inception v3 model architecture. It's recommended that you start with this
to validate that you have gathered good training data, but if you want to deploy
on resource-limited platforms, you can try the `--architecture` flag with a
Mobilenet model. For example:
Run floating-point version of mobilenet:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos --architecture mobilenet_1.0_224
```
Run quantized version of mobilenet:
```bash
python tensorflow/examples/image_retraining/retrain.py \
--image_dir ~/flower_photos/ --architecture mobilenet_1.0_224_quantized
```
There are 32 different Mobilenet models to choose from, with a variety of file
size and latency options. The first number can be '1.0', '0.75', '0.50', or
'0.25' to control the size, and the second controls the input image size, either
'224', '192', '160', or '128', with smaller sizes running faster. See
https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.
To use with TensorBoard:
By default, this script will log summaries to /tmp/retrain_logs directory
Visualize the summaries with this command:
tensorboard --logdir /tmp/retrain_logs
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from datetime import datetime
import hashlib
import os.path
import random
import re
import sys
import tarfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
from tensorflow.contrib.quantize.python import quant_ops
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import gfile
from tensorflow.python.util import compat
FLAGS = None
# These are all parameters that are tied to the particular model architecture
# we're using for Inception v3. These include things like tensor names and their
# sizes. If you want to adapt this script to work with another model, you will
# need to update these to reflect the values in the network you're using.
MAX_NUM_IMAGES_PER_CLASS = 2 ** 27 - 1 # ~134M
def create_image_lists(image_dir, testing_percentage, validation_percentage):
"""Builds a list of training images from the file system.
Analyzes the sub folders in the image directory, splits them into stable
training, testing, and validation sets, and returns a data structure
describing the lists of images for each label and their paths.
Args:
image_dir: String path to a folder containing subfolders of images.
testing_percentage: Integer percentage of the images to reserve for tests.
validation_percentage: Integer percentage of images reserved for validation.
Returns:
A dictionary containing an entry for each label subfolder, with images split
into training, testing, and validation sets within each label.
"""
if not gfile.Exists(image_dir):
tf.logging.error("Image directory '" + image_dir + "' not found.")
return None
result = {}
sub_dirs = [x[0] for x in gfile.Walk(image_dir)]
# The root directory comes first, so skip it.
is_root_dir = True
for sub_dir in sub_dirs:
if is_root_dir:
is_root_dir = False
continue
extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']
file_list = []
dir_name = os.path.basename(sub_dir)
if dir_name == image_dir:
continue
tf.logging.info("Looking for images in '" + dir_name + "'")
for extension in extensions:
file_glob = os.path.join(image_dir, dir_name, '*.' + extension)
file_list.extend(gfile.Glob(file_glob))
if not file_list:
tf.logging.warning('No files found')
continue
if len(file_list) < 20:
tf.logging.warning(
'WARNING: Folder has less than 20 images, which may cause issues.')
elif len(file_list) > MAX_NUM_IMAGES_PER_CLASS:
tf.logging.warning(
'WARNING: Folder {} has more than {} images. Some images will '
'never be selected.'.format(dir_name, MAX_NUM_IMAGES_PER_CLASS))
label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())
training_images = []
testing_images = []
validation_images = []
for file_name in file_list:
base_name = os.path.basename(file_name)
# We want to ignore anything after '_nohash_' in the file name when
# deciding which set to put an image in, the data set creator has a way of
# grouping photos that are close variations of each other. For example
# this is used in the plant disease data set to group multiple pictures of
# the same leaf.
hash_name = re.sub(r'_nohash_.*$', '', file_name)
# This looks a bit magical, but we need to decide whether this file should
# go into the training, testing, or validation sets, and we want to keep
# existing files in the same set even if more files are subsequently
# added.
# To do that, we need a stable way of deciding based on just the file name
# itself, so we do a hash of that and then use that to generate a
# probability value that we use to assign it.
hash_name_hashed = hashlib.sha1(compat.as_bytes(hash_name)).hexdigest()
percentage_hash = ((int(hash_name_hashed, 16) %
(MAX_NUM_IMAGES_PER_CLASS + 1)) *
(100.0 / MAX_NUM_IMAGES_PER_CLASS))
if percentage_hash < validation_percentage:
validation_images.append(base_name)
elif percentage_hash < (testing_percentage + validation_percentage):
testing_images.append(base_name)
else:
training_images.append(base_name)
result[label_name] = {
'dir': dir_name,
'training': training_images,
'testing': testing_images,
'validation': validation_images,
}
return result
def get_image_path(image_lists, label_name, index, image_dir, category):
""""Returns a path to an image for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Int offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of set to pull images from - training, testing, or
validation.
Returns:
File system path string to an image that meets the requested parameters.
"""
if label_name not in image_lists:
tf.logging.fatal('Label does not exist %s.', label_name)
label_lists = image_lists[label_name]
if category not in label_lists:
tf.logging.fatal('Category does not exist %s.', category)
category_list = label_lists[category]
if not category_list:
tf.logging.fatal('Label %s has no images in the category %s.',
label_name, category)
mod_index = index % len(category_list)
base_name = category_list[mod_index]
sub_dir = label_lists['dir']
full_path = os.path.join(image_dir, sub_dir, base_name)
return full_path
def get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,
category, architecture):
""""Returns a path to a bottleneck file for a label at the given index.
Args:
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be moduloed by the
available number of images for the label, so it can be arbitrarily large.
bottleneck_dir: Folder string holding cached files of bottleneck values.
category: Name string of set to pull images from - training, testing, or
validation.
architecture: The name of the model architecture.
Returns:
File system path string to an image that meets the requested parameters.
"""
return get_image_path(image_lists, label_name, index, bottleneck_dir,
category) + '_' + architecture + '.txt'
def create_model_graph(model_info):
""""Creates a graph from saved GraphDef file and returns a Graph object.
Args:
model_info: Dictionary containing information about the model architecture.
Returns:
Graph holding the trained Inception network, and various tensors we'll be
manipulating.
"""
with tf.Graph().as_default() as graph:
model_path = os.path.join(FLAGS.model_dir, model_info['model_file_name'])
print('Model path: ', model_path)
with gfile.FastGFile(model_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
bottleneck_tensor, resized_input_tensor = (tf.import_graph_def(
graph_def,
name='',
return_elements=[
model_info['bottleneck_tensor_name'],
model_info['resized_input_tensor_name'],
]))
return graph, bottleneck_tensor, resized_input_tensor
def run_bottleneck_on_image(sess, image_data, image_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Runs inference on an image to extract the 'bottleneck' summary layer.
Args:
sess: Current active TensorFlow Session.
image_data: String of raw JPEG data.
image_data_tensor: Input data layer in the graph.
decoded_image_tensor: Output of initial image resizing and preprocessing.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: Layer before the final softmax.
Returns:
Numpy array of bottleneck values.
"""
# First decode the JPEG image, resize it, and rescale the pixel values.
resized_input_values = sess.run(decoded_image_tensor,
{image_data_tensor: image_data})
# Then run it through the recognition network.
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: resized_input_values})
bottleneck_values = np.squeeze(bottleneck_values)
return bottleneck_values
def maybe_download_and_extract(data_url):
"""Download and extract model tar file.
If the pretrained model we're using doesn't already exist, this function
downloads it from the TensorFlow.org website and unpacks it into a directory.
Args:
data_url: Web location of the tar file containing the pretrained model.
"""
dest_directory = FLAGS.model_dir
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
filename = data_url.split('/')[-1]
filepath = os.path.join(dest_directory, filename)
if not os.path.exists(filepath):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> Downloading %s %.1f%%' %
(filename,
float(count * block_size) / float(total_size) * 100.0))
sys.stdout.flush()
filepath, _ = urllib.request.urlretrieve(data_url, filepath, _progress)
print()
statinfo = os.stat(filepath)
tf.logging.info('Successfully downloaded', filename, statinfo.st_size,
'bytes.')
print('Extracting file from ', filepath)
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
else:
print('Not extracting or downloading files, model already present in disk')
def ensure_dir_exists(dir_name):
"""Makes sure the folder exists on disk.
Args:
dir_name: Path string to the folder we want to create.
"""
if not os.path.exists(dir_name):
os.makedirs(dir_name)
bottleneck_path_2_bottleneck_values = {}
def create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor):
"""Create a single bottleneck file."""
tf.logging.info('Creating bottleneck at ' + bottleneck_path)
image_path = get_image_path(image_lists, label_name, index,
image_dir, category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
image_data = gfile.FastGFile(image_path, 'rb').read()
try:
bottleneck_values = run_bottleneck_on_image(
sess, image_data, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor)
except Exception as e:
raise RuntimeError('Error during processing file %s (%s)' % (image_path,
str(e)))
bottleneck_string = ','.join(str(x) for x in bottleneck_values)
with open(bottleneck_path, 'w') as bottleneck_file:
bottleneck_file.write(bottleneck_string)
def get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,
category, bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves or calculates bottleneck values for an image.
If a cached version of the bottleneck data exists on-disk, return that,
otherwise calculate the data and save it to disk for future use.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
label_name: Label string we want to get an image for.
index: Integer offset of the image we want. This will be modulo-ed by the
available number of images for the label, so it can be arbitrarily large.
image_dir: Root folder string of the subfolders containing the training
images.
category: Name string of which set to pull images from - training, testing,
or validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: The tensor to feed loaded jpeg data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The output tensor for the bottleneck values.
architecture: The name of the model architecture.
Returns:
Numpy array of values produced by the bottleneck layer for the image.
"""
label_lists = image_lists[label_name]
sub_dir = label_lists['dir']
sub_dir_path = os.path.join(bottleneck_dir, sub_dir)
ensure_dir_exists(sub_dir_path)
bottleneck_path = get_bottleneck_path(image_lists, label_name, index,
bottleneck_dir, category, architecture)
if not os.path.exists(bottleneck_path):
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
did_hit_error = False
try:
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
except ValueError:
tf.logging.warning('Invalid float found, recreating bottleneck')
did_hit_error = True
if did_hit_error:
create_bottleneck_file(bottleneck_path, image_lists, label_name, index,
image_dir, category, sess, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor)
with open(bottleneck_path, 'r') as bottleneck_file:
bottleneck_string = bottleneck_file.read()
# Allow exceptions to propagate here, since they shouldn't happen after a
# fresh creation
bottleneck_values = [float(x) for x in bottleneck_string.split(',')]
return bottleneck_values
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: Dictionary of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
architecture: The name of the model architecture.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.')
def get_random_cached_bottlenecks(sess, image_lists, how_many, category,
bottleneck_dir, image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_input_tensor,
bottleneck_tensor, architecture):
"""Retrieves bottleneck values for cached images.
If no distortions are being applied, this function can retrieve the cached
bottleneck values directly from disk for images. It picks a random set of
images from the specified category.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: If positive, a random sample of this size will be chosen.
If negative, all bottlenecks will be retrieved.
category: Name string of which set to pull from - training, testing, or
validation.
bottleneck_dir: Folder string holding cached files of bottleneck values.
image_dir: Root folder string of the subfolders containing the training
images.
jpeg_data_tensor: The layer to feed jpeg image data into.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
architecture: The name of the model architecture.
Returns:
List of bottleneck arrays, their corresponding ground truths, and the
relevant filenames.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
filenames = []
if how_many >= 0:
# Retrieve a random sample of bottlenecks.
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
else:
# Retrieve all bottlenecks.
for label_index, label_name in enumerate(image_lists.keys()):
for image_index, image_name in enumerate(
image_lists[label_name][category]):
image_name = get_image_path(image_lists, label_name, image_index,
image_dir, category)
bottleneck = get_or_create_bottleneck(
sess, image_lists, label_name, image_index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, architecture)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck)
ground_truths.append(ground_truth)
filenames.append(image_name)
return bottlenecks, ground_truths, filenames
def get_random_distorted_bottlenecks(
sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,
distorted_image, resized_input_tensor, bottleneck_tensor):
"""Retrieves bottleneck values for training images, after distortions.
If we're training with distortions like crops, scales, or flips, we have to
recalculate the full model for every image, and so we can't use cached
bottleneck values. Instead we find random images for the requested category,
run them through the distortion graph, and then the full graph to get the
bottleneck results for each.
Args:
sess: Current TensorFlow Session.
image_lists: Dictionary of training images for each label.
how_many: The integer number of bottleneck values to return.
category: Name string of which set of images to fetch - training, testing,
or validation.
image_dir: Root folder string of the subfolders containing the training
images.
input_jpeg_tensor: The input layer we feed the image data to.
distorted_image: The output node of the distortion graph.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The bottleneck output layer of the CNN graph.
Returns:
List of bottleneck arrays and their corresponding ground truths.
"""
class_count = len(image_lists.keys())
bottlenecks = []
ground_truths = []
for unused_i in range(how_many):
label_index = random.randrange(class_count)
label_name = list(image_lists.keys())[label_index]
image_index = random.randrange(MAX_NUM_IMAGES_PER_CLASS + 1)
image_path = get_image_path(image_lists, label_name, image_index, image_dir,
category)
if not gfile.Exists(image_path):
tf.logging.fatal('File does not exist %s', image_path)
jpeg_data = gfile.FastGFile(image_path, 'rb').read()
# Note that we materialize the distorted_image_data as a numpy array before
# sending running inference on the image. This involves 2 memory copies and
# might be optimized in other implementations.
distorted_image_data = sess.run(distorted_image,
{input_jpeg_tensor: jpeg_data})
bottleneck_values = sess.run(bottleneck_tensor,
{resized_input_tensor: distorted_image_data})
bottleneck_values = np.squeeze(bottleneck_values)
ground_truth = np.zeros(class_count, dtype=np.float32)
ground_truth[label_index] = 1.0
bottlenecks.append(bottleneck_values)
ground_truths.append(ground_truth)
return bottlenecks, ground_truths
def should_distort_images(flip_left_right, random_crop, random_scale,
random_brightness):
"""Whether any distortions are enabled, from the input flags.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
Returns:
Boolean value indicating whether any distortions should be applied.
"""
return (flip_left_right or (random_crop != 0) or (random_scale != 0) or
(random_brightness != 0))
def add_input_distortions(flip_left_right, random_crop, random_scale,
random_brightness, input_width, input_height,
input_depth, input_mean, input_std):
"""Creates the operations to apply the specified distortions.
During training it can help to improve the results if we run the images
through simple distortions like crops, scales, and flips. These reflect the
kind of variations we expect in the real world, and so can help train the
model to cope with natural data more effectively. Here we take the supplied
parameters and construct a network of operations to apply them to an image.
Cropping
~~~~~~~~
Cropping is done by placing a bounding box at a random position in the full
image. The cropping parameter controls the size of that box relative to the
input image. If it's zero, then the box is the same size as the input and no
cropping is performed. If the value is 50%, then the crop box will be half the
width and height of the input. In a diagram it looks like this:
< width >
+---------------------+
| |
| width - crop% |
| < > |
| +------+ |
| | | |
| | | |
| | | |
| +------+ |
| |
| |
+---------------------+
Scaling
~~~~~~~
Scaling is a lot like cropping, except that the bounding box is always
centered and its size varies randomly within the given range. For example if
the scale percentage is zero, then the bounding box is the same size as the
input and no scaling is applied. If it's 50%, then the bounding box will be in
a random range between half the width and height and full size.
Args:
flip_left_right: Boolean whether to randomly mirror images horizontally.
random_crop: Integer percentage setting the total margin used around the
crop box.
random_scale: Integer percentage of how much to vary the scale by.
random_brightness: Integer range to randomly multiply the pixel values by.
graph.
input_width: Horizontal size of expected input image to model.
input_height: Vertical size of expected input image to model.
input_depth: How many channels the expected input image should have.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
The jpeg input layer and the distorted result tensor.
"""
jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
margin_scale = 1.0 + (random_crop / 100.0)
resize_scale = 1.0 + (random_scale / 100.0)
margin_scale_value = tf.constant(margin_scale)
resize_scale_value = tf.random_uniform(tensor_shape.scalar(),
minval=1.0,
maxval=resize_scale)
scale_value = tf.multiply(margin_scale_value, resize_scale_value)
precrop_width = tf.multiply(scale_value, input_width)
precrop_height = tf.multiply(scale_value, input_height)
precrop_shape = tf.stack([precrop_height, precrop_width])
precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)
precropped_image = tf.image.resize_bilinear(decoded_image_4d,
precrop_shape_as_int)
precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])
cropped_image = tf.random_crop(precropped_image_3d,
[input_height, input_width, input_depth])
if flip_left_right:
flipped_image = tf.image.random_flip_left_right(cropped_image)
else:
flipped_image = cropped_image
brightness_min = 1.0 - (random_brightness / 100.0)
brightness_max = 1.0 + (random_brightness / 100.0)
brightness_value = tf.random_uniform(tensor_shape.scalar(),
minval=brightness_min,
maxval=brightness_max)
brightened_image = tf.multiply(flipped_image, brightness_value)
offset_image = tf.subtract(brightened_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
distort_result = tf.expand_dims(mul_image, 0, name='DistortResult')
return jpeg_data, distort_result
def variable_summaries(var):
"""Attach a lot of summaries to a Tensor (for TensorBoard visualization)."""
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
def add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor,
bottleneck_tensor_size, quantize_layer):
"""Adds a new softmax and fully-connected layer for training.
We need to retrain the top layer to identify our new classes, so this function
adds the right operations to the graph, along with some variables to hold the
weights, and then sets up all the gradients for the backward pass.
The set up for the softmax and fully-connected layers is based on:
https://www.tensorflow.org/versions/master/tutorials/mnist/beginners/index.html
Args:
class_count: Integer of how many categories of things we're trying to
recognize.
final_tensor_name: Name string for the new final node that produces results.
bottleneck_tensor: The output of the main CNN graph.
bottleneck_tensor_size: How many entries in the bottleneck vector.
quantize_layer: Boolean, specifying whether the newly added layer should be
quantized.
Returns:
The tensors for the training and cross entropy results, and tensors for the
bottleneck input and ground truth input.
"""
with tf.name_scope('input'):
bottleneck_input = tf.placeholder_with_default(
bottleneck_tensor,
shape=[None, bottleneck_tensor_size],
name='BottleneckInputPlaceholder')
ground_truth_input = tf.placeholder(tf.float32,
[None, class_count],
name='GroundTruthInput')
# Organizing the following ops as `final_training_ops` so they're easier
# to see in TensorBoard
layer_name = 'final_training_ops'
with tf.name_scope(layer_name):
with tf.name_scope('weights'):
initial_value = tf.truncated_normal(
[bottleneck_tensor_size, class_count], stddev=0.001)
layer_weights = tf.Variable(initial_value, name='final_weights')
if quantize_layer:
quantized_layer_weights = quant_ops.MovingAvgQuantize(
layer_weights, is_training=True)
variable_summaries(quantized_layer_weights)
variable_summaries(layer_weights)
with tf.name_scope('biases'):
layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')
if quantize_layer:
quantized_layer_biases = quant_ops.MovingAvgQuantize(
layer_biases, is_training=True)
variable_summaries(quantized_layer_biases)
variable_summaries(layer_biases)
with tf.name_scope('Wx_plus_b'):
if quantize_layer:
logits = tf.matmul(bottleneck_input,
quantized_layer_weights) + quantized_layer_biases
logits = quant_ops.MovingAvgQuantize(
logits,
init_min=-32.0,
init_max=32.0,
is_training=True,
num_bits=8,
narrow_range=False,
ema_decay=0.5)
tf.summary.histogram('pre_activations', logits)
else:
logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases
tf.summary.histogram('pre_activations', logits)
final_tensor = tf.nn.softmax(logits, name=final_tensor_name)
tf.summary.histogram('activations', final_tensor)
with tf.name_scope('cross_entropy'):
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
labels=ground_truth_input, logits=logits)
with tf.name_scope('total'):
cross_entropy_mean = tf.reduce_mean(cross_entropy)
tf.summary.scalar('cross_entropy', cross_entropy_mean)
with tf.name_scope('train'):
optimizer = tf.train.GradientDescentOptimizer(FLAGS.learning_rate)
train_step = optimizer.minimize(cross_entropy_mean)
return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,
final_tensor)
def add_evaluation_step(result_tensor, ground_truth_tensor):
"""Inserts the operations we need to evaluate the accuracy of our results.
Args:
result_tensor: The new final node that produces results.
ground_truth_tensor: The node we feed ground truth data
into.
Returns:
Tuple of (evaluation step, prediction).
"""
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
prediction = tf.argmax(result_tensor, 1)
correct_prediction = tf.equal(
prediction, tf.argmax(ground_truth_tensor, 1))
with tf.name_scope('accuracy'):
evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('accuracy', evaluation_step)
return evaluation_step, prediction
def save_graph_to_file(sess, graph, graph_file_name):
output_graph_def = graph_util.convert_variables_to_constants(
sess, graph.as_graph_def(), [FLAGS.final_tensor_name])
with gfile.FastGFile(graph_file_name, 'wb') as f:
f.write(output_graph_def.SerializeToString())
return
def prepare_file_system():
# Setup the directory we'll write summaries to for TensorBoard
if tf.gfile.Exists(FLAGS.summaries_dir):
tf.gfile.DeleteRecursively(FLAGS.summaries_dir)
tf.gfile.MakeDirs(FLAGS.summaries_dir)
if FLAGS.intermediate_store_frequency > 0:
ensure_dir_exists(FLAGS.intermediate_output_graphs_dir)
return
def create_model_info(architecture):
"""Given the name of a model architecture, returns information about it.
There are different base image recognition pretrained models that can be
retrained using transfer learning, and this function translates from the name
of a model to the attributes that are needed to download and train with it.
Args:
architecture: Name of a model architecture.
Returns:
Dictionary of information about the model, or None if the name isn't
recognized
Raises:
ValueError: If architecture name is unknown.
"""
architecture = architecture.lower()
is_quantized = False
if architecture == 'inception_v3':
# pylint: disable=line-too-long
data_url = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
# pylint: enable=line-too-long
bottleneck_tensor_name = 'pool_3/_reshape:0'
bottleneck_tensor_size = 2048
input_width = 299
input_height = 299
input_depth = 3
resized_input_tensor_name = 'Mul:0'
model_file_name = 'classify_image_graph_def.pb'
input_mean = 128
input_std = 128
elif architecture.startswith('mobilenet_'):
parts = architecture.split('_')
if len(parts) != 3 and len(parts) != 4:
tf.logging.error("Couldn't understand architecture name '%s'",
architecture)
return None
version_string = parts[1]
if (version_string != '1.0' and version_string != '0.75' and
version_string != '0.50' and version_string != '0.25'):
tf.logging.error(
""""The Mobilenet version should be '1.0', '0.75', '0.50', or '0.25',
but found '%s' for architecture '%s'""",
version_string, architecture)
return None
size_string = parts[2]
if (size_string != '224' and size_string != '192' and
size_string != '160' and size_string != '128'):
tf.logging.error(
"""The Mobilenet input size should be '224', '192', '160', or '128',
but found '%s' for architecture '%s'""",
size_string, architecture)
return None
if len(parts) == 3:
is_quantized = False
else:
if parts[3] != 'quantized':
tf.logging.error(
"Couldn't understand architecture suffix '%s' for '%s'", parts[3],
architecture)
return None
is_quantized = True
if is_quantized:
data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'
data_url += version_string + '_' + size_string + '_quantized_frozen.tgz'
bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'
resized_input_tensor_name = 'Placeholder:0'
model_dir_name = ('mobilenet_v1_' + version_string + '_' + size_string +
'_quantized_frozen')
model_base_name = 'quantized_frozen_graph.pb'
else:
data_url = 'http://download.tensorflow.org/models/mobilenet_v1_'
data_url += version_string + '_' + size_string + '_frozen.tgz'
bottleneck_tensor_name = 'MobilenetV1/Predictions/Reshape:0'
resized_input_tensor_name = 'input:0'
model_dir_name = 'mobilenet_v1_' + version_string + '_' + size_string
model_base_name = 'frozen_graph.pb'
bottleneck_tensor_size = 1001
input_width = int(size_string)
input_height = int(size_string)
input_depth = 3
model_file_name = os.path.join(model_dir_name, model_base_name)
input_mean = 127.5
input_std = 127.5
else:
tf.logging.error("Couldn't understand architecture name '%s'", architecture)
raise ValueError('Unknown architecture', architecture)
return {
'data_url': data_url,
'bottleneck_tensor_name': bottleneck_tensor_name,
'bottleneck_tensor_size': bottleneck_tensor_size,
'input_width': input_width,
'input_height': input_height,
'input_depth': input_depth,
'resized_input_tensor_name': resized_input_tensor_name,
'model_file_name': model_file_name,
'input_mean': input_mean,
'input_std': input_std,
'quantize_layer': is_quantized,
}
def add_jpeg_decoding(input_width, input_height, input_depth, input_mean,
input_std):
"""Adds operations that perform JPEG decoding and resizing to the graph..
Args:
input_width: Desired width of the image fed into the recognizer graph.
input_height: Desired width of the image fed into the recognizer graph.
input_depth: Desired channels of the image fed into the recognizer graph.
input_mean: Pixel value that should be zero in the image for the graph.
input_std: How much to divide the pixel values by before recognition.
Returns:
Tensors for the node to feed JPEG data into, and the output of the
preprocessing steps.
"""
jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)
decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
resize_shape = tf.stack([input_height, input_width])
resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
resized_image = tf.image.resize_bilinear(decoded_image_4d,
resize_shape_as_int)
offset_image = tf.subtract(resized_image, input_mean)
mul_image = tf.multiply(offset_image, 1.0 / input_std)
return jpeg_data, mul_image
def main(_):
# Needed to make sure the logging output is visible.
# See https://github.com/tensorflow/tensorflow/issues/3047
tf.logging.set_verbosity(tf.logging.INFO)
# Prepare necessary directories that can be used during training
prepare_file_system()
# Gather information about the model architecture we'll be using.
model_info = create_model_info(FLAGS.architecture)
if not model_info:
tf.logging.error('Did not recognize architecture flag')
return -1
# Set up the pre-trained graph.
maybe_download_and_extract(model_info['data_url'])
graph, bottleneck_tensor, resized_image_tensor = (
create_model_graph(model_info))
# Look at the folder structure, and create lists of all the images.
image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,
FLAGS.validation_percentage)
class_count = len(image_lists.keys())
if class_count == 0:
tf.logging.error('No valid folders of images found at ' + FLAGS.image_dir)
return -1
if class_count == 1:
tf.logging.error('Only one valid folder of images found at ' +
FLAGS.image_dir +
' - multiple classes are needed for classification.')
return -1
# See if the command-line flags mean we're applying any distortions.
do_distort_images = should_distort_images(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness)
with tf.Session(graph=graph) as sess:
# Set up the image decoding sub-graph.
jpeg_data_tensor, decoded_image_tensor = add_jpeg_decoding(
model_info['input_width'], model_info['input_height'],
model_info['input_depth'], model_info['input_mean'],
model_info['input_std'])
if do_distort_images:
# We will be applying distortions, so setup the operations we'll need.
(distorted_jpeg_data_tensor,
distorted_image_tensor) = add_input_distortions(
FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,
FLAGS.random_brightness, model_info['input_width'],
model_info['input_height'], model_info['input_depth'],
model_info['input_mean'], model_info['input_std'])
else:
# We'll make sure we've calculated the 'bottleneck' image summaries and
# cached them on disk.
cache_bottlenecks(sess, image_lists, FLAGS.image_dir,
FLAGS.bottleneck_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor,
bottleneck_tensor, FLAGS.architecture)
# Add the new layer that we'll be training.
(train_step, cross_entropy, bottleneck_input, ground_truth_input,
final_tensor) = add_final_training_ops(
len(image_lists.keys()), FLAGS.final_tensor_name, bottleneck_tensor,
model_info['bottleneck_tensor_size'], model_info['quantize_layer'])
# Create the operations we need to evaluate the accuracy of our new layer.
evaluation_step, prediction = add_evaluation_step(
final_tensor, ground_truth_input)
# Merge all the summaries and write them out to the summaries_dir
merged = tf.summary.merge_all()
train_writer = tf.summary.FileWriter(FLAGS.summaries_dir + '/train',
sess.graph)
validation_writer = tf.summary.FileWriter(
FLAGS.summaries_dir + '/validation')
# Set up all our weights to their initial default values.
init = tf.global_variables_initializer()
sess.run(init)
# Run the training for as many cycles as requested on the command line.
for i in range(FLAGS.how_many_training_steps):
# Get a batch of input bottleneck values, either calculated fresh every
# time with distortions applied, or from the cache stored on disk.
if do_distort_images:
(train_bottlenecks,
train_ground_truth) = get_random_distorted_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.image_dir, distorted_jpeg_data_tensor,
distorted_image_tensor, resized_image_tensor, bottleneck_tensor)
else:
(train_bottlenecks,
train_ground_truth, _) = get_random_cached_bottlenecks(
sess, image_lists, FLAGS.train_batch_size, 'training',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture)
# Feed the bottlenecks and ground truth into the graph, and run a training
# step. Capture training summaries for TensorBoard with the `merged` op.
train_summary, _ = sess.run(
[merged, train_step],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
train_writer.add_summary(train_summary, i)
# Every so often, print out how well the graph is training.
is_last_step = (i + 1 == FLAGS.how_many_training_steps)
if (i % FLAGS.eval_step_interval) == 0 or is_last_step:
train_accuracy, cross_entropy_value = sess.run(
[evaluation_step, cross_entropy],
feed_dict={bottleneck_input: train_bottlenecks,
ground_truth_input: train_ground_truth})
tf.logging.info('%s: Step %d: Train accuracy = %.1f%%' %
(datetime.now(), i, train_accuracy * 100))
tf.logging.info('%s: Step %d: Cross entropy = %f' %
(datetime.now(), i, cross_entropy_value))
validation_bottlenecks, validation_ground_truth, _ = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.validation_batch_size, 'validation',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
# Run a validation step and capture training summaries for TensorBoard
# with the `merged` op.
validation_summary, validation_accuracy = sess.run(
[merged, evaluation_step],
feed_dict={bottleneck_input: validation_bottlenecks,
ground_truth_input: validation_ground_truth})
validation_writer.add_summary(validation_summary, i)
tf.logging.info('%s: Step %d: Validation accuracy = %.1f%% (N=%d)' %
(datetime.now(), i, validation_accuracy * 100,
len(validation_bottlenecks)))
# Store intermediate results
intermediate_frequency = FLAGS.intermediate_store_frequency
if (intermediate_frequency > 0 and (i % intermediate_frequency == 0)
and i > 0):
intermediate_file_name = (FLAGS.intermediate_output_graphs_dir +
'intermediate_' + str(i) + '.pb')
tf.logging.info('Save intermediate result to : ' +
intermediate_file_name)
save_graph_to_file(sess, graph, intermediate_file_name)
# We've completed all our training, so run a final test evaluation on
# some new images we haven't used before.
test_bottlenecks, test_ground_truth, test_filenames = (
get_random_cached_bottlenecks(
sess, image_lists, FLAGS.test_batch_size, 'testing',
FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,
decoded_image_tensor, resized_image_tensor, bottleneck_tensor,
FLAGS.architecture))
test_accuracy, predictions = sess.run(
[evaluation_step, prediction],
feed_dict={bottleneck_input: test_bottlenecks,
ground_truth_input: test_ground_truth})
tf.logging.info('Final test accuracy = %.1f%% (N=%d)' %
(test_accuracy * 100, len(test_bottlenecks)))
if FLAGS.print_misclassified_test_images:
tf.logging.info('=== MISCLASSIFIED TEST IMAGES ===')
for i, test_filename in enumerate(test_filenames):
if predictions[i] != test_ground_truth[i].argmax():
tf.logging.info('%70s %s' %
(test_filename,
list(image_lists.keys())[predictions[i]]))
# Write out the trained graph and labels with the weights stored as
# constants.
save_graph_to_file(sess, graph, FLAGS.output_graph)
with gfile.FastGFile(FLAGS.output_labels, 'w') as f:
f.write('\n'.join(image_lists.keys()) + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--image_dir',
type=str,
default='',
help='Path to folders of labeled images.'
)
parser.add_argument(
'--output_graph',
type=str,
default='/tmp/output_graph.pb',
help='Where to save the trained graph.'
)
parser.add_argument(
'--intermediate_output_graphs_dir',
type=str,
default='/tmp/intermediate_graph/',
help='Where to save the intermediate graphs.'
)
parser.add_argument(
'--intermediate_store_frequency',
type=int,
default=0,
help="""\
How many steps to store intermediate graph. If "0" then will not
store.\
"""
)
parser.add_argument(
'--output_labels',
type=str,
default='/tmp/output_labels.txt',
help='Where to save the trained graph\'s labels.'
)
parser.add_argument(
'--summaries_dir',
type=str,
default='/tmp/retrain_logs',
help='Where to save summary logs for TensorBoard.'
)
parser.add_argument(
'--how_many_training_steps',
type=int,
default=4000,
help='How many training steps to run before ending.'
)
parser.add_argument(
'--learning_rate',
type=float,
default=0.01,
help='How large a learning rate to use when training.'
)
parser.add_argument(
'--testing_percentage',
type=int,
default=10,
help='What percentage of images to use as a test set.'
)
parser.add_argument(
'--validation_percentage',
type=int,
default=10,
help='What percentage of images to use as a validation set.'
)
parser.add_argument(
'--eval_step_interval',
type=int,
default=10,
help='How often to evaluate the training results.'
)
parser.add_argument(
'--train_batch_size',
type=int,
default=100,
help='How many images to train on at a time.'
)
parser.add_argument(
'--test_batch_size',
type=int,
default=-1,
help="""\
How many images to test on. This test set is only used once, to evaluate
the final accuracy of the model after training completes.
A value of -1 causes the entire test set to be used, which leads to more
stable results across runs.\
"""
)
parser.add_argument(
'--validation_batch_size',
type=int,
default=100,
help="""\
How many images to use in an evaluation batch. This validation set is
used much more often than the test set, and is an early indicator of how
accurate the model is during training.
A value of -1 causes the entire validation set to be used, which leads to
more stable results across training iterations, but may be slower on large
training sets.\
"""
)
parser.add_argument(
'--print_misclassified_test_images',
default=False,
help="""\
Whether to print out a list of all misclassified test images.\
""",
action='store_true'
)
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/imagenet',
help="""\
Path to classify_image_graph_def.pb,
imagenet_synset_to_human_label_map.txt, and
imagenet_2012_challenge_label_map_proto.pbtxt.\
"""
)
parser.add_argument(
'--bottleneck_dir',
type=str,
default='/tmp/bottleneck',
help='Path to cache bottleneck layer values as files.'
)
parser.add_argument(
'--final_tensor_name',
type=str,
default='final_result',
help="""\
The name of the output classification layer in the retrained graph.\
"""
)
parser.add_argument(
'--flip_left_right',
default=False,
help="""\
Whether to randomly flip half of the training images horizontally.\
""",
action='store_true'
)
parser.add_argument(
'--random_crop',
type=int,
default=0,
help="""\
A percentage determining how much of a margin to randomly crop off the
training images.\
"""
)
parser.add_argument(
'--random_scale',
type=int,
default=0,
help="""\
A percentage determining how much to randomly scale up the size of the
training images by.\
"""
)
parser.add_argument(
'--random_brightness',
type=int,
default=0,
help="""\
A percentage determining how much to randomly multiply the training image
input pixels up or down by.\
"""
)
parser.add_argument(
'--architecture',
type=str,
default='inception_v3',
help="""\
Which model architecture to use. 'inception_v3' is the most accurate, but
also the slowest. For faster or smaller models, chose a MobileNet with the
form 'mobilenet_<parameter size>_<input_size>[_quantized]'. For example,
'mobilenet_1.0_224' will pick a model that is 17 MB in size and takes 224
pixel input images, while 'mobilenet_0.25_128_quantized' will choose a much
less accurate, but smaller and faster network that's 920 KB on disk and
takes 128x128 images. See https://research.googleblog.com/2017/06/mobilenets-open-source-models-for.html
for more information on Mobilenet.\
""")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| {
"content_hash": "a349ad5db3a3219f4883aeb95ff379a1",
"timestamp": "",
"source": "github",
"line_count": 1362,
"max_line_length": 110,
"avg_line_length": 40.91116005873715,
"alnum_prop": 0.6670914018054235,
"repo_name": "alistairlow/tensorflow",
"id": "ebddfb20f4b60986fba1cdbfe3fcb184149b0a99",
"size": "56410",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/examples/image_retraining/retrain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8572"
},
{
"name": "C",
"bytes": "314472"
},
{
"name": "C++",
"bytes": "34078509"
},
{
"name": "CMake",
"bytes": "212405"
},
{
"name": "Go",
"bytes": "1005950"
},
{
"name": "Java",
"bytes": "533607"
},
{
"name": "Jupyter Notebook",
"bytes": "1940739"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "44807"
},
{
"name": "Objective-C",
"bytes": "12460"
},
{
"name": "Objective-C++",
"bytes": "94483"
},
{
"name": "PHP",
"bytes": "1429"
},
{
"name": "Perl",
"bytes": "6186"
},
{
"name": "Perl 6",
"bytes": "1360"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "29856457"
},
{
"name": "Ruby",
"bytes": "547"
},
{
"name": "Shell",
"bytes": "401880"
}
],
"symlink_target": ""
} |
"""
libcloud provides a unified interface to the cloud computing resources.
:var __version__: Current version of libcloud
"""
__all__ = ['__version__', 'enable_debug']
__version__ = '0.17.1-dev'
import os
import codecs
try:
import paramiko
have_paramiko = True
except ImportError:
have_paramiko = False
def enable_debug(fo):
"""
Enable library wide debugging to a file-like object.
:param fo: Where to append debugging information
:type fo: File like object, only write operations are used.
"""
from libcloud.common.base import (Connection,
LoggingHTTPConnection,
LoggingHTTPSConnection)
LoggingHTTPSConnection.log = fo
LoggingHTTPConnection.log = fo
Connection.conn_classes = (LoggingHTTPConnection,
LoggingHTTPSConnection)
def _init_once():
"""
Utility function that is ran once on Library import.
This checks for the LIBCLOUD_DEBUG environment variable, which if it exists
is where we will log debug information about the provider transports.
"""
path = os.getenv('LIBCLOUD_DEBUG')
if path:
fo = codecs.open(path, 'a', encoding='utf8')
enable_debug(fo)
if have_paramiko:
paramiko.common.logging.basicConfig(level=paramiko.common.DEBUG)
_init_once()
| {
"content_hash": "139c70d024aed8776c044e0f0ad68872",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 79,
"avg_line_length": 27.615384615384617,
"alnum_prop": 0.616991643454039,
"repo_name": "Hybrid-Cloud/badam",
"id": "d3514f0d6a3b6645024dbf54baa8ab823db955c5",
"size": "2231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "patches_tool/aws_patch/aws_deps/libcloud/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3817"
},
{
"name": "Python",
"bytes": "29372474"
},
{
"name": "Shell",
"bytes": "17334"
}
],
"symlink_target": ""
} |
from workalendar.core import WesternCalendar, ChristianMixin
class Poland(WesternCalendar, ChristianMixin):
"Poland"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(1, 6, 'Trzech Kroli'),
(5, 1, 'Labour Day'),
(5, 3, 'Constitution Day'),
(11, 11, 'Independence Day'),
)
include_easter_sunday = True
include_easter_monday = True
include_whit_sunday = True
whit_sunday_label = "Pentecost Sunday"
include_corpus_christi = True
include_assumption = True
include_all_saints = True
include_boxing_day = True
| {
"content_hash": "e8f792819263551c103529497f378974",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 60,
"avg_line_length": 30.789473684210527,
"alnum_prop": 0.6547008547008547,
"repo_name": "gregn610/workalendar",
"id": "73278f6db34a5757f5ed1a362cbeae8b1b485e20",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "workalendar/europe/poland.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "310466"
}
],
"symlink_target": ""
} |
import hindkit as k
import functools
ODIA_STEMS = "K KH G GH NG C CH J JH NY TT TTH DD DDH NN T TH D DH N P PH B V BH M Y YY R LL L SH SS S H W".split()
STEMS = k.constants.CONSONANT_STEMS
# STEMS = ODIA_STEMS
STEMS_AKHAND = ["K_SS", "J_NY"]
ORDER = []
for stem in STEMS + STEMS_AKHAND:
for i in ["A", "", "Ac2"]:
part = stem + i
ORDER.append(part)
if part == "R":
ORDER.append("Eyelash")
def get_name(rule):
return rule.split()[-1][:-1]
def get_key(name, akhands=None, reverse=False):
if akhands is None:
akhands = []
main = name[2:]
main = main.replace("_", " ")
for akhand in akhands:
main = main.replace(akhand.replace("_", " "), akhand)
parts = main.split(" ")
key_major = []
key_minor = []
for part in parts:
key_major.append(ORDER.index(part.replace("x", "")))
key_minor.append("x" in part)
key_minor.reverse()
if reverse:
key_major.reverse()
key_minor.reverse()
key = [key_major, key_minor]
return key
RULES = """\
sub mlKA mlVirama mlKA by mlK_KA;
""".splitlines()
NAMES = """\
mlK_KA
""".splitlines()
with open("lab/input.txt") as f:
names = f.read().splitlines()
names.sort(
key = functools.partial(
get_key,
# akhands = STEMS_AKHAND,
# reverse = True,
),
)
with open("lab/output.txt", "w") as f:
f.write("\n".join(names))
| {
"content_hash": "08bcedaf9b9f27b6e6864e65bf2cb8e6",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 115,
"avg_line_length": 24.912280701754387,
"alnum_prop": 0.5669014084507042,
"repo_name": "itfoundry/hindkit",
"id": "3becd88fbbdf263fa3945bf6d990e24b34ba1d45",
"size": "1420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab/sort_glyph_names.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "608"
},
{
"name": "Python",
"bytes": "193040"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("../src")
import pcs
import pcs.packets.udp
class udpv4(pcs.packets.udp.udp):
_layout = pcs.Layout()
_map = None
def __init__(self, bytes = None, timestamp = None, **kv):
"""Initialize a UDP packet for IPv4"""
pcs.packets.udp.udp.__init__(self, bytes, timestamp, **kv)
def cksum(self, ip, data = ""):
"""Calculate the checksum for this UDPv4 header,
outside of a chain."""
from socket import IPPROTO_UDP
from pcs.packets.ipv4 import ipv4
from pcs.packets.ipv4 import pseudoipv4
tmpip = pseudoipv4()
tmpip.src = ip.src
tmpip.dst = ip.dst
tmpip.protocol = IPPROTO_UDP
tmpip.length = len(self.getbytes()) + len(data)
tmpbytes = tmpip.getbytes() + self.getbytes() + data
return ipv4.ipv4_cksum(tmpbytes)
def calc_checksum(self):
"""Calculate and store the checksum for this UDPv4 datagram.
The packet must be part of a chain.
udpv4 is a specialization of udp whose outer header must
always be ipv4, therefore we enforce this."""
from pcs.packets.ipv4 import ipv4
ip = None
if self._head is not None:
ip = self._head.find_preceding(self, pcs.packets.ipv4.ipv4)
if ip is None:
self.checksum = 0
self.checksum = ipv4.ipv4.ipv4_cksum(self.getbytes())
return
pcs.packets.udp.udp.calc_checksum_v4(self, ip)
| {
"content_hash": "e0e1f442232c7bf198dcd238d69a8223",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 34.74418604651163,
"alnum_prop": 0.6037483266398929,
"repo_name": "gvnn3/PCS",
"id": "b3b01f329b36d2fb7e78f0d0444f636564db3d87",
"size": "3194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pcs/packets/udpv4.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6791"
},
{
"name": "Makefile",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "735921"
}
],
"symlink_target": ""
} |
def cheese_and_crackers(cheese_count, boxes_of_crackers):
print ("You have %d cheeses!" % cheese_count)
print ("You have %d boxes of crackers!" % boxes_of_crackers)
print ("Man that's enough for a party!")
print ("Get a blanket.\n")
print ("We can just give the function numbers directly:")
cheese_and_crackers(20, 30)
print ("OR, we can use variables from our script:")
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_crackers(amount_of_cheese, amount_of_crackers)
print ("We can even do maths inside too:")
cheese_and_crackers(10 + 20, 5 + 6)
print ("And we can combine the two, variables and maths:")
cheese_and_crackers(amount_of_cheese + 100, amount_of_crackers + 1000)
| {
"content_hash": "0f1cbd38a70c699535f6d8be1b3deadd",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 70,
"avg_line_length": 29.541666666666668,
"alnum_prop": 0.7023977433004231,
"repo_name": "Paul-Haley/LPTHW_python3",
"id": "6f33127301e77058737bb77f54407ac12923a6f6",
"size": "709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ex19.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64611"
}
],
"symlink_target": ""
} |
"""
Client side of the cert manager RPC API.
"""
from oslo.config import cfg
from oslo import messaging
from nova import rpc
rpcapi_opts = [
cfg.StrOpt('cert_topic',
default='cert',
help='The topic cert nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('cert',
help='Set a version cap for messages sent to cert services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class CertAPI(object):
'''Client side of the cert rpc API.
API version history:
1.0 - Initial version.
1.1 - Added get_backdoor_port()
... Grizzly and Havana support message version 1.1. So, any changes to
existing methods in 1.x after that point should be done such that they
can handle the version_cap being set to 1.1.
2.0 - Major API rev for Icehouse
... Icehouse and Juno support message version 2.0. So, any changes to
existing methods in 2.x after that point should be done such that they
can handle the version_cap being set to 2.0.
'''
VERSION_ALIASES = {
'grizzly': '1.1',
'havana': '1.1',
'icehouse': '2.0',
'juno': '2.0',
}
def __init__(self):
super(CertAPI, self).__init__()
target = messaging.Target(topic=CONF.cert_topic, version='2.0')
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.cert,
CONF.upgrade_levels.cert)
self.client = rpc.get_client(target, version_cap=version_cap)
def revoke_certs_by_user(self, ctxt, user_id):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'revoke_certs_by_user', user_id=user_id)
def revoke_certs_by_project(self, ctxt, project_id):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'revoke_certs_by_project',
project_id=project_id)
def revoke_certs_by_user_and_project(self, ctxt, user_id, project_id):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'revoke_certs_by_user_and_project',
user_id=user_id, project_id=project_id)
def generate_x509_cert(self, ctxt, user_id, project_id):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'generate_x509_cert',
user_id=user_id,
project_id=project_id)
def fetch_ca(self, ctxt, project_id):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'fetch_ca', project_id=project_id)
def fetch_crl(self, ctxt, project_id):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'fetch_crl', project_id=project_id)
def decrypt_text(self, ctxt, project_id, text):
cctxt = self.client.prepare()
return cctxt.call(ctxt, 'decrypt_text',
project_id=project_id,
text=text)
| {
"content_hash": "8f4cb00bbee8873b74c171d581fe0998",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 33.42696629213483,
"alnum_prop": 0.5956302521008403,
"repo_name": "shakamunyi/nova",
"id": "48cdd0ea211c34e5aec75d59663a785c59cb6182",
"size": "3582",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "nova/cert/rpcapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15322211"
},
{
"name": "Shell",
"bytes": "17730"
},
{
"name": "Smarty",
"bytes": "489682"
}
],
"symlink_target": ""
} |
import unittest
import os
import tempfile
from azure.cli.core.application import Application, Configuration, IterateAction
from azure.cli.core.commands import CliCommand
from azure.cli.core.util import CLIError
class TestApplication(unittest.TestCase):
def test_client_request_id_is_not_assigned_when_application_is_created(self):
app = Application()
self.assertNotIn('x-ms-client-request-id', app.session['headers'])
def test_client_request_id_is_refreshed_correctly(self):
app = Application()
app.refresh_request_id()
self.assertIn('x-ms-client-request-id', app.session['headers'])
old_id = app.session['headers']['x-ms-client-request-id']
app.refresh_request_id()
self.assertIn('x-ms-client-request-id', app.session['headers'])
self.assertNotEquals(old_id, app.session['headers']['x-ms-client-request-id'])
def test_client_request_id_is_refreshed_after_execution(self):
def _handler(args):
return True
config = Configuration()
config.get_command_table = lambda *_: {'test': CliCommand('test', _handler)}
app = Application(config)
app.execute(['test'])
self.assertIn('x-ms-client-request-id', app.session['headers'])
old_id = app.session['headers']['x-ms-client-request-id']
app.execute(['test'])
self.assertIn('x-ms-client-request-id', app.session['headers'])
self.assertNotEquals(old_id, app.session['headers']['x-ms-client-request-id'])
def test_application_register_and_call_handlers(self):
handler_called = [False]
def handler(**kwargs):
kwargs['args'][0] = True
def other_handler(**kwargs):
self.assertEqual(kwargs['args'], 'secret sauce')
app = Application()
app.initialize(Configuration())
app.raise_event('was_handler_called', args=handler_called)
self.assertFalse(handler_called[0],
"Raising event with no handlers registered somehow failed...")
app.register('was_handler_called', handler)
self.assertFalse(handler_called[0])
# Registered handler won't get called if event with different name
# is raised...
app.raise_event('other_handler_called', args=handler_called)
self.assertFalse(handler_called[0], 'Wrong handler called!')
app.raise_event('was_handler_called', args=handler_called)
self.assertTrue(handler_called[0], "Handler didn't get called")
app.raise_event('other_handler_called', args='secret sauce')
def test_list_value_parameter(self):
hellos = []
def handler(args):
hellos.append(args)
command = CliCommand('test command', handler)
command.add_argument('hello', '--hello', nargs='+', action=IterateAction)
command.add_argument('something', '--something')
cmd_table = {'test command': command}
argv = 'az test command --hello world sir --something else'.split()
config = Configuration()
config.get_command_table = lambda argv: cmd_table
application = Application(config)
application.execute(argv[1:])
self.assertEqual(2, len(hellos))
self.assertEqual(hellos[0]['hello'], 'world')
self.assertEqual(hellos[0]['something'], 'else')
self.assertEqual(hellos[1]['hello'], 'sir')
self.assertEqual(hellos[1]['something'], 'else')
def test_case_insensitive_command_path(self):
import argparse
def handler(args):
return 'PASSED'
command = CliCommand('test command', handler)
command.add_argument('var', '--var', '-v')
cmd_table = {'test command': command}
def _test(cmd_line):
argv = cmd_line.split()
config = Configuration()
config.get_command_table = lambda argv: cmd_table
application = Application(config)
return application.execute(argv[1:])
# case insensitive command paths
result = _test('az TEST command --var blah')
self.assertEqual(result.result, 'PASSED')
result = _test('az test COMMAND --var blah')
self.assertEqual(result.result, 'PASSED')
result = _test('az test command -v blah')
self.assertEqual(result.result, 'PASSED')
# verify that long and short options remain case sensitive
with self.assertRaises(SystemExit):
_test('az test command --vAR blah')
with self.assertRaises(SystemExit):
_test('az test command -V blah')
def test_expand_file_prefixed_files(self):
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
f_with_bom = tempfile.NamedTemporaryFile(delete=False)
f_with_bom.close()
with open(f.name, 'w+') as stream:
stream.write('foo')
from codecs import open as codecs_open
with codecs_open(f_with_bom.name, encoding='utf-8-sig', mode='w+') as stream:
stream.write('foo')
cases = [
[['bar=baz'], ['bar=baz']],
[['bar', 'baz'], ['bar', 'baz']],
[['bar=@{}'.format(f.name)], ['bar=foo']],
[['bar=@{}'.format(f_with_bom.name)], ['bar=foo']],
[['bar', '@{}'.format(f.name)], ['bar', 'foo']],
[['bar', f.name], ['bar', f.name]],
[['bar=name@company.com'], ['bar=name@company.com']],
[['bar', 'name@company.com'], ['bar', 'name@company.com']],
[['bar=mymongo=@connectionstring'], ['bar=mymongo=@connectionstring']]
]
for test_case in cases:
try:
args = Application._expand_file_prefixed_files(test_case[0]) # pylint: disable=protected-access
self.assertEqual(args, test_case[1], 'Failed for: {}'.format(test_case[0]))
except CLIError as ex:
self.fail('Unexpected error for {} ({}): {}'.format(test_case[0], args, ex))
os.remove(f.name)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a2b200c1f4b559f4490d0395d7c7bd96",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 112,
"avg_line_length": 36.71084337349398,
"alnum_prop": 0.599442074171316,
"repo_name": "QingChenmsft/azure-cli",
"id": "76d3122cccad7123e1fbca43eee8024bfec92729",
"size": "6440",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/azure-cli-core/azure/cli/core/tests/test_application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11279"
},
{
"name": "C++",
"bytes": "275"
},
{
"name": "JavaScript",
"bytes": "380"
},
{
"name": "Python",
"bytes": "5372365"
},
{
"name": "Shell",
"bytes": "25445"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, with_statement
from revolver import core as _core
| {
"content_hash": "b78176bd247152881226a9e4df878b69",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 64,
"avg_line_length": 33.666666666666664,
"alnum_prop": 0.7821782178217822,
"repo_name": "michaelcontento/revolver",
"id": "6659d93a18ebcc862366cbac2bdb59fe717f42bc",
"size": "126",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "revolver/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "10174"
},
{
"name": "Python",
"bytes": "68675"
}
],
"symlink_target": ""
} |
import numpy as np
from vizdoom import *
from doom_instance import DoomInstance
from doom_instance_cig import DoomInstanceCig
from doom_instance_map import DoomInstanceMap
from doom_instance_obj import DoomInstanceObj
from doom_instance_oblige import DoomInstanceOblige
from doom_instance_oblige_map import DoomInstanceObligeMap
from doom_instance_d3 import DoomInstanceD3
def init_doom_env(args):
if args.action_set == 'noset':
args.action_set = []
elif args.action_set is not None:
args.action_set = np.load(args.action_set).tolist()
instance_class = {
'basic': DoomInstance,
'cig': DoomInstanceCig,
'd3': DoomInstanceD3,
'map': DoomInstanceMap,
'obj': DoomInstanceObj,
'oblige': DoomInstanceOblige,
'oblige_map': DoomInstanceObligeMap
}
try:
args.doom_instance
except NameError:
args.doom_instance = 'basic'
args.instance_class = instance_class[args.doom_instance]
doom = args.instance_class(
args.vizdoom_config,
wad=args.wad_path,
skiprate=args.skiprate,
visible=False,
mode=Mode.PLAYER,
actions=args.action_set)
state = doom.get_state_normalized()
args.button_num = doom.get_button_num()
args.screen_size = state.screen.shape
args.variable_num = len(state.variables)
if state.variables is not None:
args.variables_size = state.variables.shape
| {
"content_hash": "47571b408bc717e6a6784bcba660e400",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 60,
"avg_line_length": 31.52173913043478,
"alnum_prop": 0.6820689655172414,
"repo_name": "akolishchak/doom-net-pytorch",
"id": "537b4eb853af11fb9fec5de9146a64d1a339d995",
"size": "1524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/doom_env.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "303488"
},
{
"name": "Shell",
"bytes": "38833"
}
],
"symlink_target": ""
} |
import wx
#----------------------------------------------------------------------
ID_CopyBtn = wx.NewId()
ID_PasteBtn = wx.NewId()
ID_BitmapBtn = wx.NewId()
#----------------------------------------------------------------------
class ClipTextPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent, -1)
self.log = log
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(
wx.StaticText(
self, -1, "Copy/Paste text to/from\n"
"this window and other apps"
),
0, wx.EXPAND|wx.ALL, 2
)
self.text = wx.TextCtrl(self, -1, "", style=wx.TE_MULTILINE|wx.HSCROLL)
sizer.Add(self.text, 1, wx.EXPAND)
hsz = wx.BoxSizer(wx.HORIZONTAL)
hsz.Add(wx.Button(self, ID_CopyBtn, " Copy "), 1, wx.EXPAND|wx.ALL, 2)
hsz.Add(wx.Button(self, ID_PasteBtn, " Paste "), 1, wx.EXPAND|wx.ALL, 2)
sizer.Add(hsz, 0, wx.EXPAND)
sizer.Add(
wx.Button(self, ID_BitmapBtn, " Copy Bitmap "),
0, wx.EXPAND|wx.ALL, 2
)
self.Bind(wx.EVT_BUTTON, self.OnCopy, id=ID_CopyBtn)
self.Bind(wx.EVT_BUTTON, self.OnPaste, id=ID_PasteBtn)
self.Bind(wx.EVT_BUTTON, self.OnCopyBitmap, id=ID_BitmapBtn)
self.SetSizer(sizer)
def OnCopy(self, evt):
self.do = wx.TextDataObject()
self.do.SetText(self.text.GetValue())
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(self.do)
wx.TheClipboard.Close()
else:
wx.MessageBox("Unable to open the clipboard", "Error")
def OnPaste(self, evt):
success = False
do = wx.TextDataObject()
if wx.TheClipboard.Open():
success = wx.TheClipboard.GetData(do)
wx.TheClipboard.Close()
if success:
self.text.SetValue(do.GetText())
else:
wx.MessageBox(
"There is no data in the clipboard in the required format",
"Error"
)
def OnCopyBitmap(self, evt):
dlg = wx.FileDialog(self, "Choose a bitmap to copy", wildcard="*.bmp")
if dlg.ShowModal() == wx.ID_OK:
bmp = wx.Bitmap(dlg.GetPath(), wx.BITMAP_TYPE_BMP)
bmpdo = wx.BitmapDataObject(bmp)
if wx.TheClipboard.Open():
wx.TheClipboard.SetData(bmpdo)
wx.TheClipboard.Close()
wx.MessageBox(
"The bitmap is now in the Clipboard. Switch to a graphics\n"
"editor and try pasting it in..."
)
else:
wx.MessageBox(
"There is no data in the clipboard in the required format",
"Error"
)
dlg.Destroy()
#----------------------------------------------------------------------
class OtherDropTarget(wx.DropTarget):
def __init__(self, window, log):
wx.DropTarget.__init__(self)
self.log = log
self.do = wx.FileDataObject()
self.SetDataObject(self.do)
def OnEnter(self, x, y, d):
self.log.WriteText("OnEnter: %d, %d, %d\n" % (x, y, d))
return wx.DragCopy
#def OnDragOver(self, x, y, d):
# self.log.WriteText("OnDragOver: %d, %d, %d\n" % (x, y, d))
# return wx.DragCopy
def OnLeave(self):
self.log.WriteText("OnLeave\n")
def OnDrop(self, x, y):
self.log.WriteText("OnDrop: %d %d\n" % (x, y))
return True
def OnData(self, x, y, d):
self.log.WriteText("OnData: %d, %d, %d\n" % (x, y, d))
self.GetData()
self.log.SetLabel("%s\n" % self.do.GetFilenames())
return d
class MyFileDropTarget(wx.FileDropTarget):
def __init__(self, window, log):
wx.FileDropTarget.__init__(self)
self.window = window
self.log = log
def OnDropFiles(self, x, y, filenames):
txt = "\n%d file(s) dropped at %d,%d:\n" % (len(filenames), x, y)
txt += '\n'.join(filenames)
self.window.SetLabel(txt)
return True
class MyTextDropTarget(wx.TextDropTarget):
def __init__(self, window, log):
wx.TextDropTarget.__init__(self)
self.window = window
self.log = log
def OnDropText(self, x, y, text):
self.window.SetLabel("(%d, %d)\n%s\n" % (x, y, text))
return True
def OnDragOver(self, x, y, d):
return wx.DragCopy
class FileDropPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent, -1)
#self.SetFont(wx.Font(10, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(
wx.StaticText(self, -1, " \nDrag some files here:"),
0, wx.EXPAND|wx.ALL, 2
)
self.text = wx.StaticText(self, -1, "", style=wx.ST_NO_AUTORESIZE|wx.BORDER_SIMPLE)
self.text.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
dt = MyFileDropTarget(self, log)
self.text.SetDropTarget(dt)
sizer.Add(self.text, 1, wx.EXPAND|wx.ALL, 5)
sizer.Add(
wx.StaticText(self, -1, " \nDrag some text here:"),
0, wx.EXPAND|wx.ALL, 2
)
self.text2 = wx.StaticText(self, -1, "", style=wx.ST_NO_AUTORESIZE|wx.BORDER_SIMPLE)
self.text2.SetWindowVariant(wx.WINDOW_VARIANT_SMALL)
dt = MyTextDropTarget(self.text2, log)
self.text2.SetDropTarget(dt)
sizer.Add(self.text2, 1, wx.EXPAND|wx.ALL, 5)
self.SetAutoLayout(True)
self.SetSizer(sizer)
def SetLabel(self, text):
self.text.SetLabel(text)
#----------------------------------------------------------------------
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
wx.Panel.__init__(self, parent, -1)
self.SetAutoLayout(True)
outsideSizer = wx.BoxSizer(wx.VERTICAL)
msg = "Clipboard / Drag-And-Drop"
text = wx.StaticText(self, -1, "", style=wx.ALIGN_CENTRE)
text.SetFont(wx.Font(24, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False))
text.SetLabel(msg)
w,h = text.GetTextExtent(msg)
text.SetSize(wx.Size(w,h+1))
text.SetForegroundColour(wx.BLUE)
outsideSizer.Add(text, 0, wx.EXPAND|wx.ALL, 5)
outsideSizer.Add(wx.StaticLine(self, -1), 0, wx.EXPAND)
inSizer = wx.BoxSizer(wx.HORIZONTAL)
inSizer.Add(ClipTextPanel(self, log), 1, wx.EXPAND)
inSizer.Add(FileDropPanel(self, log), 1, wx.EXPAND)
outsideSizer.Add(inSizer, 1, wx.EXPAND)
self.SetSizer(outsideSizer)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """\
<html>
<body>
This demo shows some examples of data transfer through clipboard or
drag and drop. In wxWindows, these two ways to transfer data (either
between different applications or inside one and the same) are very
similar which allows to implement both of them using almost the same
code - or, in other words, if you implement drag and drop support for
your application, you get clipboard support for free and vice versa.
<p>
At the heart of both clipboard and drag and drop operations lies the
wxDataObject class. The objects of this class (or, to be precise,
classes derived from it) represent the data which is being carried by
the mouse during drag and drop operation or copied to or pasted from
the clipboard. wxDataObject is a "smart" piece of data because it
knows which formats it supports (see GetFormatCount and GetAllFormats)
and knows how to render itself in any of them (see GetDataHere). It
can also receive its value from the outside in a format it supports if
it implements the SetData method. Please see the documentation of this
class for more details.
<p>
Both clipboard and drag and drop operations have two sides: the source
and target, the data provider and the data receiver. These which may
be in the same application and even the same window when, for example,
you drag some text from one position to another in a word
processor. Let us describe what each of them should do.
</body>
</html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| {
"content_hash": "84772b474c74aefefe244d2e44a8b5d0",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 103,
"avg_line_length": 32.73384030418251,
"alnum_prop": 0.5646416540829364,
"repo_name": "dnxbjyj/python-basic",
"id": "b1325143199b6d7a452849c0bfbe8c8dd8a3ce27",
"size": "8632",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/wxpython/wxPython-demo-4.0.1/demo/DragAndDrop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70"
},
{
"name": "HTML",
"bytes": "274934"
},
{
"name": "Jupyter Notebook",
"bytes": "868723"
},
{
"name": "Python",
"bytes": "4032747"
},
{
"name": "Shell",
"bytes": "446"
}
],
"symlink_target": ""
} |
from django.test import TestCase
from django_dynamic_fixture import G
from user_guide.models import Guide
class GuideTest(TestCase):
def test_guide_unicode(self):
guide_obj = G(Guide, guide_name='test_name')
self.assertEqual(str(guide_obj), 'test_name')
| {
"content_hash": "16ee13bab98447df5f2e6d264ca01560",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 25.272727272727273,
"alnum_prop": 0.7158273381294964,
"repo_name": "ambitioninc/django-user-guide",
"id": "85414a148124153c4de6e441264d94d6acbb1355",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "user_guide/tests/model_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1188"
},
{
"name": "HTML",
"bytes": "1297"
},
{
"name": "JavaScript",
"bytes": "28100"
},
{
"name": "Python",
"bytes": "20406"
}
],
"symlink_target": ""
} |
from google.cloud import dialogflow_v2
def sample_delete_context():
# Create a client
client = dialogflow_v2.ContextsClient()
# Initialize request argument(s)
request = dialogflow_v2.DeleteContextRequest(
name="name_value",
)
# Make the request
client.delete_context(request=request)
# [END dialogflow_v2_generated_Contexts_DeleteContext_sync]
| {
"content_hash": "ff9808401337049fc0b296455fa2a430",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 59,
"avg_line_length": 22.705882352941178,
"alnum_prop": 0.7098445595854922,
"repo_name": "googleapis/python-dialogflow",
"id": "fc244f00a934050716f2b4b20cc123699f5083e7",
"size": "1772",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dialogflow_v2_generated_contexts_delete_context_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "11184005"
},
{
"name": "Shell",
"bytes": "30672"
}
],
"symlink_target": ""
} |
"""
@file model_xml.py
@brief model xml operations
@author Josh Madden <cyrex562@gmail.com>
"""
import os
from bs4 import BeautifulSoup
from data_gateway import add_table_row
from model_objects import User, App, Customer, Order, CartItem
import utils
XML_FILE = 'data.xml'
load_data_handlers = []
store_data_handlers = []
def get_xml_tag_string(soup_ele):
"""
Get the string contents of an XML tag from the DOM
:param soup_ele:
:return:
"""
return unicode(soup_ele.contents[0].string.strip())
def user_data_loader(soup):
"""
load user data from XML DOM
:param soup:
:return:
"""
users = soup.data.users
for user_child_xml in users.children:
if user_child_xml.string != '\n':
user_to_add = User()
user_to_add.id = user_child_xml['id']
user_to_add.user_type = get_xml_tag_string(user_child_xml.user_type)
user_to_add.username = get_xml_tag_string(user_child_xml.username)
user_to_add.password = get_xml_tag_string(user_child_xml.password)
add_table_row('users', user_to_add)
def app_data_loader(soup):
"""
Load app data from the XML DOM
:param soup:
:return:
"""
apps_xml = soup.data.apps
for app_xml in apps_xml.children:
if app_xml.string != '\n':
app_to_add = App()
app_to_add.id = int(app_xml['id'])
app_to_add.app_name = get_xml_tag_string(app_xml.app_name)
app_to_add.download_link = get_xml_tag_string(app_xml.download_link)
app_to_add.platform = get_xml_tag_string(app_xml.platform)
app_to_add.platform_requirements = get_xml_tag_string(
app_xml.platform_requirements)
app_to_add.app_publisher = get_xml_tag_string(app_xml.app_publisher)
app_to_add.app_description = get_xml_tag_string(
app_xml.app_description)
app_to_add.license_count = \
int(get_xml_tag_string(app_xml.license_count))
app_to_add.app_image = get_xml_tag_string(app_xml.app_image)
app_to_add.price = float(get_xml_tag_string(app_xml.price))
add_table_row('apps', app_to_add)
def customer_data_loader(soup):
"""
Load customer data from the XML DOM
:param soup:
:return:
"""
customers = soup.data.customers
for customer_xml in customers.children:
if customer_xml.string != '\n':
customer_to_add = Customer()
customer_to_add.id = customer_xml['id']
customer_to_add.user_id = get_xml_tag_string(customer_xml.user_id)
customer_to_add.billing_address = get_xml_tag_string(
customer_xml.billing_address)
customer_to_add.shipping_address = get_xml_tag_string(
customer_xml.shipping_address)
customer_to_add.email_address = \
get_xml_tag_string(customer_xml.email_address)
customer_to_add.person_name = \
get_xml_tag_string(customer_xml.person_name)
customer_to_add.rating = get_xml_tag_string(customer_xml.rating)
add_table_row('customers', customer_to_add)
def order_data_loader(soup):
"""
Load order data from the XML document
:param soup:
:return:
"""
orders = soup.data.orders
for order_xml in orders.children:
if order_xml.string != '\n':
order_to_add = Order()
order_to_add.id = order_xml['id']
order_to_add.handling_fee = float(get_xml_tag_string(
order_xml.order_handling_fee))
order_to_add.tax_amount = float(get_xml_tag_string(
order_xml.order_tax_amount))
order_to_add.total_cost = float(get_xml_tag_string(
order_xml.order_total_cost))
order_to_add.subtotal = float(get_xml_tag_string(
order_xml.order_subtotal))
order_to_add.customer_id = int(get_xml_tag_string(
order_xml.order_customer_id))
order_to_add.billing_address = get_xml_tag_string(
order_xml.order_billing_address)
order_to_add.shipping_address = get_xml_tag_string(
order_xml.order_shipping_address)
for item_xml in order_xml.items.children:
if item_xml.string != '\n':
item_to_add = CartItem()
item_to_add.app_id = int(get_xml_tag_string(
item_xml.app_id))
item_to_add.quantity = int(get_xml_tag_string(
item_xml.quantity
))
item_to_add.subtotal = float(get_xml_tag_string(
item_xml.subtotal))
order_to_add.items.append(item_to_add)
add_table_row('orders', order_to_add)
def append_xml_tag(soup, tag_parent, tag_name, tag_val):
"""
Append an xml tag and its value to a DOM
:param soup:
:param tag_parent:
:param tag_name:
:param tag_val:
:return:
"""
new_tag = soup.new_tag(tag_name)
new_tag.string = tag_val
tag_parent.append(new_tag)
def load_data():
"""
Load the persistent data from the data xml file.
:return: void
"""
path = os.path.dirname(utils.__file__)
soup = BeautifulSoup(open(path + '/' + XML_FILE), "xml")
for f in load_data_handlers:
f(soup)
def store_data():
"""
Store in-memory data to the data xml file
:return:
"""
# create a new xml document
soup = BeautifulSoup()
soup.append(soup.new_tag('data'))
soup.data.append(soup.new_tag('users'))
soup.data.append(soup.new_tag('customers'))
soup.data.append(soup.new_tag('apps'))
soup.data.append(soup.new_tag('orders'))
for f in store_data_handlers:
f(soup)
out_xml = soup.prettify()
path = os.path.dirname(utils.__file__)
xml_file = open(path + '/' + XML_FILE, 'wb')
xml_file.write(out_xml)
xml_file.close()
| {
"content_hash": "a869571980d80d44e607757bb2cef3f9",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 80,
"avg_line_length": 33.97191011235955,
"alnum_prop": 0.580949231023648,
"repo_name": "cyrex562/COMP461",
"id": "7e7af361ce459cff60c7942304de061887aeccf2",
"size": "6047",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model_xml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "38"
},
{
"name": "JavaScript",
"bytes": "25624"
},
{
"name": "Python",
"bytes": "44361"
}
],
"symlink_target": ""
} |
from letsgetlouder.settings import *
# You will need to get these from either Paul or Julia
TWITTER_CONSUMER_KEY = ''
TWITTER_CONSUMER_SECRET = ''
FACEBOOK_APP_ID = ''
FACEBOOK_API_SECRET = ''
GITHUB_APP_ID = ''
GITHUB_API_SECRET = '' | {
"content_hash": "ce13eadfcf5e26e9d1ff165ad9ec3e10",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 55,
"avg_line_length": 36.333333333333336,
"alnum_prop": 0.5137614678899083,
"repo_name": "paulsmith/letsgetlouder",
"id": "49f6c0c1c6358598194e09e34a4667c553056439",
"size": "327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letsgetlouder/local_settings.example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "50108"
},
{
"name": "Python",
"bytes": "10652"
}
],
"symlink_target": ""
} |
"""Command for describing target instances."""
from googlecloudsdk.compute.lib import base_classes
class Describe(base_classes.ZonalDescriber):
"""Describe a target instance."""
@staticmethod
def Args(parser):
base_classes.ZonalDescriber.Args(parser)
base_classes.AddFieldsFlag(parser, 'targetInstances')
@property
def service(self):
return self.compute.targetInstances
@property
def resource_type(self):
return 'targetInstances'
Describe.detailed_help = {
'brief': 'Describe a target instance',
'DESCRIPTION': """\
*{command}* displays all data associated with a Google Compute
Engine target instance in a project.
""",
}
| {
"content_hash": "0bfd553632434a68c11d662eef68ce3f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.703757225433526,
"repo_name": "ychen820/microblog",
"id": "520cd58e0da279efc1daf8f198f3616c55ea1db2",
"size": "742",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/target_instances/describe.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
} |
import BoostBuild
import os
t = BoostBuild.Tester(pass_toolset=0)
t.write("link-target", "")
os.symlink("link-target", "link")
t.write("file.jam", """
ECHO [ READLINK link ] ;
EXIT [ READLINK link-target ] : 0 ;
""")
t.run_build_system(["-ffile.jam"], stdout="""link-target
""")
t.cleanup()
| {
"content_hash": "b8b7958bb17ed3db34f61bf0f73dfef0",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 56,
"avg_line_length": 16.5,
"alnum_prop": 0.6430976430976431,
"repo_name": "davehorton/drachtio-server",
"id": "ea120fd842c8f957211f1f8e11384f4b22aa275a",
"size": "491",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "deps/boost_1_77_0/tools/build/test/builtin_readlink.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "662596"
},
{
"name": "Dockerfile",
"bytes": "1330"
},
{
"name": "JavaScript",
"bytes": "60639"
},
{
"name": "M4",
"bytes": "35273"
},
{
"name": "Makefile",
"bytes": "5960"
},
{
"name": "Shell",
"bytes": "47298"
}
],
"symlink_target": ""
} |
from datetime import datetime, time, timedelta
import hashlib
def export_event(event, format='ical'):
# Only ical format supported at the moment
if format != 'ical':
return
# Begin event
# VEVENT format: http://www.kanzaki.com/docs/ical/vevent.html
ical_components = [
'BEGIN:VCALENDAR',
'VERSION:2.0',
'PRODID:-//Torchbox//wagtail//EN',
]
# Work out number of days the event lasts
if event.date_to is not None:
days = (event.date_to - event.date_from).days + 1
else:
days = 1
for day in range(days):
# Get date
date = event.date_from + timedelta(days=day)
# Get times
if event.time_from is not None:
start_time = event.time_from
else:
start_time = time.min
if event.time_to is not None:
end_time = event.time_to
else:
end_time = time.max
# Combine dates and times
start_datetime = datetime.combine(
date,
start_time
)
end_datetime = datetime.combine(date, end_time)
def add_slashes(string):
string.replace('"', '\\"')
string.replace('\\', '\\\\')
string.replace(',', '\\,')
string.replace(':', '\\:')
string.replace(';', '\\;')
string.replace('\n', '\\n')
return string
# Make a uid
uid = hashlib.sha1(event.url + str(start_datetime)).hexdigest() + '@ethagavalprj'
# Make event
ical_components.extend([
'BEGIN:VEVENT',
'UID:' + add_slashes(uid),
'URL:' + add_slashes(event.url),
'DTSTAMP:' + start_time.strftime('%Y%m%dT%H%M%S'),
'SUMMARY:' + add_slashes(event.title),
'DESCRIPTION:' + add_slashes(event.search_description),
'LOCATION:' + add_slashes(event.location),
'DTSTART;TZID=Europe/London:' + start_datetime.strftime('%Y%m%dT%H%M%S'),
'DTEND;TZID=Europe/London:' + end_datetime.strftime('%Y%m%dT%H%M%S'),
'END:VEVENT',
])
# Finish event
ical_components.extend([
'END:VCALENDAR',
])
# Join components
return '\r'.join(ical_components)
| {
"content_hash": "98f66f0880081c87ca09b08bced92869",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 89,
"avg_line_length": 29.26923076923077,
"alnum_prop": 0.5295663600525624,
"repo_name": "arvindram03/e-thagaval",
"id": "dbfc64fa28b05477b9d6add119bc05379ec022cf",
"size": "2283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ethagaval/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1987"
},
{
"name": "HTML",
"bytes": "20087"
},
{
"name": "JavaScript",
"bytes": "218"
},
{
"name": "Python",
"bytes": "84149"
},
{
"name": "Shell",
"bytes": "6308"
}
],
"symlink_target": ""
} |
from batch_isp import BatchISP
if __name__ == '__main__':
batchISP = BatchISP()
exit(batchISP.run())
| {
"content_hash": "32ae11b30e54359411e9199719fc9cbf",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 30,
"avg_line_length": 22,
"alnum_prop": 0.6090909090909091,
"repo_name": "pinkavaj/batch_isp",
"id": "9447b335a99075f4b91c96d9dedf593828b52c51",
"size": "130",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__main__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "25674"
}
],
"symlink_target": ""
} |
print('Hello world!')
| {
"content_hash": "167f0033072a0180992d63bbf0425e4c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 23,
"alnum_prop": 0.6521739130434783,
"repo_name": "alkryukov/sandbox",
"id": "b7013b134d2010aefa6e2f19ed4b1d51d8dd098f",
"size": "23",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "learnings/python/basics/00010_hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "70"
},
{
"name": "C",
"bytes": "97"
},
{
"name": "CSS",
"bytes": "90"
},
{
"name": "Dockerfile",
"bytes": "507"
},
{
"name": "Go",
"bytes": "2918"
},
{
"name": "HTML",
"bytes": "3807"
},
{
"name": "JavaScript",
"bytes": "10367"
},
{
"name": "Python",
"bytes": "101707"
},
{
"name": "Shell",
"bytes": "1162"
},
{
"name": "TypeScript",
"bytes": "1913"
}
],
"symlink_target": ""
} |
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import pyaudio
from scipy.io import wavfile
import preprocessing
def plotWaveform(audio, samplingFreq, ax=plt.figure().add_subplot(111)):
'''Plot the audio waveform in time domain.'''
x_values = np.arange(0, audio.shape[0], 1) / samplingFreq
x_values = x_values * 1000
ax.plot(x_values, audio, 'k')
ax.set_xlabel('Time (ms)')
ax.set_ylabel('Amplitude')
ax.set_title('Audio signal')
plt.draw()
def plotWaveforms(audio, samplingFreq, fig=plt.figure()):
'''Plot the waveforms of in time domain.'''
if audio.ndim == 1:
plotWaveform(audio,samplingFreq, ax=fig.add_subplot(111))
elif audio.ndim == 2:
ax = fig.add_subplot(211)
plotWaveform(audio[:,0],samplingFreq,ax)
ax = fig.add_subplot(212)
plotWaveform(audio[:,1],samplingFreq,ax)
plt.show()
def plotPowerSpectrum(audio, samplingFreq):
'''Plot the audio power spectrum.'''
preprocessing.powerSpectrum(audio, samplingFreq, plotEnabled=True)
def plotSpectrogram(audio, samplingFreq):
'''Plot the audio spectrogram.'''
preprocessing.audioSpectrogram(audio, samplingFreq, plotEnabled=True)
def playBack(audio, samplingFreq):
'''Play back audio from numpy array, based on pyaudio'''
audio = audio.astype(np.float32)
# instantiate PyAudio (1)
p = pyaudio.PyAudio()
# open stream (2)
stream = p.open(format=p.get_format_from_width(audio.dtype.itemsize),
channels=audio.ndim,
rate=samplingFreq,
output=True)
# play stream (3)
stream.write(audio.tostring())
# stop stream (4)
stream.stop_stream()
stream.close()
# close PyAudio (5)
p.terminate()
def playBackFile(fileName):
'''Play back audio file along with summary information, based on pyaudio'''
samplingFreq, audio = wavfile.read(fileName, 'rb')
audio = audio / np.amax(np.abs(audio))
audio = audio.astype(np.float32)
print('=====Audio information=====')
print('Shape:', audio.shape)
print('Datatype:', audio.dtype)
print('SamplingFrequency:', samplingFreq)
print('Duration:', round(audio.shape[0] / samplingFreq, 3), 'seconds')
playBack(audio, samplingFreq)
| {
"content_hash": "8ee5718d2dff610871007d719c99d31c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 77,
"avg_line_length": 29.743243243243242,
"alnum_prop": 0.6905951840072694,
"repo_name": "liborutgers12/audiospeech",
"id": "ffc9478bd544a24fc0cb5d0522f3469f302e7cb8",
"size": "2201",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "9595"
}
],
"symlink_target": ""
} |
from typing import List, Sequence
import itertools
from e2e_test_framework.definitions import common_definitions, iree_definitions
def generate_e2e_model_run_configs(
module_generation_configs: Sequence[
iree_definitions.ModuleGenerationConfig],
module_execution_configs: Sequence[iree_definitions.ModuleExecutionConfig],
device_specs: Sequence[common_definitions.DeviceSpec],
) -> List[iree_definitions.E2EModelRunConfig]:
"""Generates the run specs from the product of compile specs and run configs.
"""
return [
iree_definitions.E2EModelRunConfig(
module_generation_config=module_generation_config,
module_execution_config=module_execution_config,
target_device_spec=device_spec,
input_data=common_definitions.ZEROS_MODEL_INPUT_DATA)
for module_generation_config,
module_execution_config, device_spec in itertools.product(
module_generation_configs, module_execution_configs, device_specs)
]
| {
"content_hash": "00f2830ca47b553742c683dd015d6021",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 41.416666666666664,
"alnum_prop": 0.7474849094567404,
"repo_name": "google/iree",
"id": "6df10397d50ab54e63cf8f68bdb4da4cf3c429a0",
"size": "1213",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "build_tools/python/benchmark_suites/iree/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "23010"
},
{
"name": "Batchfile",
"bytes": "353"
},
{
"name": "C",
"bytes": "3830546"
},
{
"name": "C++",
"bytes": "8161374"
},
{
"name": "CMake",
"bytes": "899403"
},
{
"name": "Dockerfile",
"bytes": "28245"
},
{
"name": "GLSL",
"bytes": "2629"
},
{
"name": "HTML",
"bytes": "31018"
},
{
"name": "Java",
"bytes": "31697"
},
{
"name": "JavaScript",
"bytes": "18714"
},
{
"name": "MLIR",
"bytes": "5606822"
},
{
"name": "NASL",
"bytes": "3852"
},
{
"name": "PowerShell",
"bytes": "7893"
},
{
"name": "Python",
"bytes": "1143963"
},
{
"name": "Shell",
"bytes": "248374"
},
{
"name": "Starlark",
"bytes": "600260"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.