code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright (C) 2009 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
##############################################################################
"""A paint engine to produce EMF exports.
Requires: PyQt-x11-gpl-4.6-snapshot-20090906.tar.gz
sip-4.9-snapshot-20090906.tar.gz
pyemf
"""
import struct
import pyemf
from .. import qtall as qt
inch_mm = 25.4
scale = 100
def isStockObject(obj):
"""Is this a stock windows object."""
return (obj & 0x80000000) != 0
class _EXTCREATEPEN(pyemf._EMR._EXTCREATEPEN):
"""Extended pen creation record with custom line style."""
typedef = [
('i','handle',0),
('i','offBmi',0),
('i','cbBmi',0),
('i','offBits',0),
('i','cbBits',0),
('i','style'),
('i','penwidth'),
('i','brushstyle'),
('i','color'),
('i','brushhatch',0),
('i','numstyleentries')
]
def __init__(self, style=pyemf.PS_SOLID, width=1, color=0,
styleentries=[]):
"""Create pen.
styleentries is a list of dash and space lengths."""
pyemf._EMR._EXTCREATEPEN.__init__(self)
self.style = style
self.penwidth = width
self.color = pyemf._normalizeColor(color)
self.brushstyle = 0x0 # solid
if style & pyemf.PS_STYLE_MASK != pyemf.PS_USERSTYLE:
styleentries = []
self.numstyleentries = len(styleentries)
if styleentries:
self.unhandleddata = struct.pack(
"i"*self.numstyleentries, *styleentries)
def hasHandle(self):
return True
class EMFPaintEngine(qt.QPaintEngine):
"""Custom EMF paint engine."""
def __init__(self, width_in, height_in, dpi=75):
qt.QPaintEngine.__init__(
self,
qt.QPaintEngine.Antialiasing |
qt.QPaintEngine.PainterPaths |
qt.QPaintEngine.PrimitiveTransform |
qt.QPaintEngine.PaintOutsidePaintEvent |
qt.QPaintEngine.PatternBrush
)
self.width = width_in
self.height = height_in
self.dpi = dpi
def begin(self, paintdevice):
self.emf = pyemf.EMF(self.width, self.height, int(self.dpi*scale))
self.pen = self.emf.GetStockObject(pyemf.BLACK_PEN)
self.pencolor = (0, 0, 0)
self.brush = self.emf.GetStockObject(pyemf.NULL_BRUSH)
self.paintdevice = paintdevice
return True
def drawLines(self, lines):
"""Draw lines to emf output."""
for line in lines:
self.emf.Polyline(
[ (int(line.x1()*scale), int(line.y1()*scale)),
(int(line.x2()*scale), int(line.y2()*scale)) ] )
def drawPolygon(self, points, mode):
"""Draw polygon on output."""
# print "Polygon"
pts = [(int(p.x()*scale), int(p.y()*scale)) for p in points]
if mode == qt.QPaintEngine.PolylineMode:
self.emf.Polyline(pts)
else:
self.emf.SetPolyFillMode({
qt.QPaintEngine.WindingMode: pyemf.WINDING,
qt.QPaintEngine.OddEvenMode: pyemf.ALTERNATE,
qt.QPaintEngine.ConvexMode: pyemf.WINDING
})
self.emf.Polygon(pts)
def drawEllipse(self, rect):
"""Draw an ellipse."""
# print "ellipse"
args = (
int(rect.left()*scale), int(rect.top()*scale),
int(rect.right()*scale), int(rect.bottom()*scale),
int(rect.left()*scale), int(rect.top()*scale),
int(rect.left()*scale), int(rect.top()*scale),
)
self.emf.Pie(*args)
self.emf.Arc(*args)
def drawPoints(self, points):
"""Draw points."""
# print "points"
for pt in points:
x, y = (pt.x()-0.5)*scale, (pt.y()-0.5)*scale
self.emf.Pie(
int(x), int(y),
int((pt.x()+0.5)*scale), int((pt.y()+0.5)*scale),
int(x), int(y), int(x), int(y) )
def drawPixmap(self, r, pixmap, sr):
"""Draw pixmap to display."""
# convert pixmap to BMP format
bytearr = qt.QByteArray()
buf = qt.QBuffer(bytearr)
buf.open(qt.QIODevice.WriteOnly)
pixmap.save(buf, "BMP")
# chop off bmp header to get DIB
bmp = bytes(buf.data())
dib = bmp[0xe:]
hdrsize, = struct.unpack('<i', bmp[0xe:0x12])
dataindex, = struct.unpack('<i', bmp[0xa:0xe])
datasize, = struct.unpack('<i', bmp[0x22:0x26])
epix = pyemf._EMR._STRETCHDIBITS()
epix.rclBounds_left = int(r.left()*scale)
epix.rclBounds_top = int(r.top()*scale)
epix.rclBounds_right = int(r.right()*scale)
epix.rclBounds_bottom = int(r.bottom()*scale)
epix.xDest = int(r.left()*scale)
epix.yDest = int(r.top()*scale)
epix.cxDest = int(r.width()*scale)
epix.cyDest = int(r.height()*scale)
epix.xSrc = int(sr.left())
epix.ySrc = int(sr.top())
epix.cxSrc = int(sr.width())
epix.cySrc = int(sr.height())
epix.dwRop = 0xcc0020 # SRCCOPY
offset = epix.format.minstructsize + 8
epix.offBmiSrc = offset
epix.cbBmiSrc = hdrsize
epix.offBitsSrc = offset + dataindex - 0xe
epix.cbBitsSrc = datasize
epix.iUsageSrc = 0x0 # DIB_RGB_COLORS
epix.unhandleddata = dib
self.emf._append(epix)
def _createPath(self, path):
"""Convert qt path to emf path"""
self.emf.BeginPath()
count = path.elementCount()
i = 0
#print "Start path"
while i < count:
e = path.elementAt(i)
if e.type == qt.QPainterPath.MoveToElement:
self.emf.MoveTo( int(e.x*scale), int(e.y*scale) )
#print "M", e.x*scale, e.y*scale
elif e.type == qt.QPainterPath.LineToElement:
self.emf.LineTo( int(e.x*scale), int(e.y*scale) )
#print "L", e.x*scale, e.y*scale
elif e.type == qt.QPainterPath.CurveToElement:
e1 = path.elementAt(i+1)
e2 = path.elementAt(i+2)
params = (
( int(e.x*scale), int(e.y*scale) ),
( int(e1.x*scale), int(e1.y*scale) ),
( int(e2.x*scale), int(e2.y*scale) ),
)
self.emf.PolyBezierTo(params)
#print "C", params
i += 2
else:
assert False
i += 1
ef = path.elementAt(0)
el = path.elementAt(count-1)
if ef.x == el.x and ef.y == el.y:
self.emf.CloseFigure()
#print "closing"
self.emf.EndPath()
def drawPath(self, path):
"""Draw a path on the output."""
# print "path"
self._createPath(path)
self.emf.StrokeAndFillPath()
def drawTextItem(self, pt, textitem):
"""Convert text to a path and draw it.
"""
# print "text", pt, textitem.text()
path = qt.QPainterPath()
path.addText(pt, textitem.font(), textitem.text())
fill = self.emf.CreateSolidBrush(self.pencolor)
self.emf.SelectObject(fill)
self._createPath(path)
self.emf.FillPath()
self.emf.SelectObject(self.brush)
self.emf.DeleteObject(fill)
def end(self):
return True
def saveFile(self, filename):
self.emf.save(filename)
def _updatePen(self, pen):
"""Update the pen to the currently selected one."""
# line style
style = {
qt.Qt.NoPen: pyemf.PS_NULL,
qt.Qt.SolidLine: pyemf.PS_SOLID,
qt.Qt.DashLine: pyemf.PS_DASH,
qt.Qt.DotLine: pyemf.PS_DOT,
qt.Qt.DashDotLine: pyemf.PS_DASHDOT,
qt.Qt.DashDotDotLine: pyemf.PS_DASHDOTDOT,
qt.Qt.CustomDashLine: pyemf.PS_USERSTYLE,
}[pen.style()]
if style != pyemf.PS_NULL:
# set cap style
style |= {
qt.Qt.FlatCap: pyemf.PS_ENDCAP_FLAT,
qt.Qt.SquareCap: pyemf.PS_ENDCAP_SQUARE,
qt.Qt.RoundCap: pyemf.PS_ENDCAP_ROUND,
}[pen.capStyle()]
# set join style
style |= {
qt.Qt.MiterJoin: pyemf.PS_JOIN_MITER,
qt.Qt.BevelJoin: pyemf.PS_JOIN_BEVEL,
qt.Qt.RoundJoin: pyemf.PS_JOIN_ROUND,
qt.Qt.SvgMiterJoin: pyemf.PS_JOIN_MITER,
}[pen.joinStyle()]
# use proper widths of lines
style |= pyemf.PS_GEOMETRIC
width = int(pen.widthF()*scale)
qc = pen.color()
color = (qc.red(), qc.green(), qc.blue())
self.pencolor = color
if pen.style() == qt.Qt.CustomDashLine:
# make an extended pen if we need a custom dash pattern
dash = [int(pen.widthF()*scale*f) for f in pen.dashPattern()]
newpen = self.emf._appendHandle( _EXTCREATEPEN(
style, width=width, color=color, styleentries=dash))
else:
# use a standard create pen
newpen = self.emf.CreatePen(style, width, color)
self.emf.SelectObject(newpen)
# delete old pen if it is not a stock object
if not isStockObject(self.pen):
self.emf.DeleteObject(self.pen)
self.pen = newpen
def _updateBrush(self, brush):
"""Update to selected brush."""
style = brush.style()
qc = brush.color()
color = (qc.red(), qc.green(), qc.blue())
# print "brush", color
if style == qt.Qt.SolidPattern:
newbrush = self.emf.CreateSolidBrush(color)
elif style == qt.Qt.NoBrush:
newbrush = self.emf.GetStockObject(pyemf.NULL_BRUSH)
else:
try:
hatch = {
qt.Qt.HorPattern: pyemf.HS_HORIZONTAL,
qt.Qt.VerPattern: pyemf.HS_VERTICAL,
qt.Qt.CrossPattern: pyemf.HS_CROSS,
qt.Qt.BDiagPattern: pyemf.HS_BDIAGONAL,
qt.Qt.FDiagPattern: pyemf.HS_FDIAGONAL,
qt.Qt.DiagCrossPattern: pyemf.HS_DIAGCROSS
}[brush.style()]
except KeyError:
newbrush = self.emf.CreateSolidBrush(color)
else:
newbrush = self.emf.CreateHatchBrush(hatch, color)
self.emf.SelectObject(newbrush)
if not isStockObject(self.brush):
self.emf.DeleteObject(self.brush)
self.brush = newbrush
def _updateClipPath(self, path, operation):
"""Update clipping path."""
# print "clip"
if operation != qt.Qt.NoClip:
self._createPath(path)
clipmode = {
qt.Qt.ReplaceClip: pyemf.RGN_COPY,
qt.Qt.IntersectClip: pyemf.RGN_AND,
}[operation]
else:
# is this the only wave to get rid of clipping?
self.emf.BeginPath()
self.emf.MoveTo(0,0)
w = int(self.width*self.dpi*scale)
h = int(self.height*self.dpi*scale)
self.emf.LineTo(w, 0)
self.emf.LineTo(w, h)
self.emf.LineTo(0, h)
self.emf.CloseFigure()
self.emf.EndPath()
clipmode = pyemf.RGN_COPY
self.emf.SelectClipPath(mode=clipmode)
def _updateTransform(self, m):
"""Update transformation."""
self.emf.SetWorldTransform(
m.m11(), m.m12(),
m.m21(), m.m22(),
m.dx()*scale, m.dy()*scale)
def updateState(self, state):
"""Examine what has changed in state and call apropriate function."""
ss = state.state()
if ss & qt.QPaintEngine.DirtyPen:
self._updatePen(state.pen())
if ss & qt.QPaintEngine.DirtyBrush:
self._updateBrush(state.brush())
if ss & qt.QPaintEngine.DirtyTransform:
self._updateTransform(state.transform())
if ss & qt.QPaintEngine.DirtyClipPath:
self._updateClipPath(state.clipPath(), state.clipOperation())
if ss & qt.QPaintEngine.DirtyClipRegion:
path = qt.QPainterPath()
path.addRegion(state.clipRegion())
self._updateClipPath(path, state.clipOperation())
def type(self):
return qt.QPaintEngine.PostScript
class EMFPaintDevice(qt.QPaintDevice):
"""Paint device for EMF paint engine."""
def __init__(self, width_in, height_in, dpi=75):
qt.QPaintDevice.__init__(self)
self.engine = EMFPaintEngine(width_in, height_in, dpi=dpi)
def paintEngine(self):
return self.engine
def metric(self, m):
"""Return the metrics of the painter."""
if m == qt.QPaintDevice.PdmWidth:
return int(self.engine.width * self.engine.dpi)
elif m == qt.QPaintDevice.PdmHeight:
return int(self.engine.height * self.engine.dpi)
elif m == qt.QPaintDevice.PdmWidthMM:
return int(self.engine.width * inch_mm)
elif m == qt.QPaintDevice.PdmHeightMM:
return int(self.engine.height * inch_mm)
elif m == qt.QPaintDevice.PdmNumColors:
return 2147483647
elif m == qt.QPaintDevice.PdmDepth:
return 24
elif m == qt.QPaintDevice.PdmDpiX:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmDpiY:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmPhysicalDpiX:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmPhysicalDpiY:
return int(self.engine.dpi)
elif m == qt.QPaintDevice.PdmDevicePixelRatio:
return 1
# Qt >= 5.6
elif m == getattr(qt.QPaintDevice, 'PdmDevicePixelRatioScaled', -1):
return 1
else:
# fall back
return qt.QPaintDevice.metric(self, m)
| veusz/veusz | veusz/document/emf_export.py | Python | gpl-2.0 | 14,778 |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import uuid
import mock
import testscenarios
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
from oslo_utils import timeutils
from sqlalchemy.orm import query
from neutron.common import constants
from neutron.common import topics
from neutron import context as q_context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_v2 as db_v2
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.extensions import l3agentscheduler as l3agent
from neutron import manager
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
# the below code is required for the following reason
# (as documented in testscenarios)
"""Multiply tests depending on their 'scenarios' attribute.
This can be assigned to 'load_tests' in any test module to make this
automatically work across tests in the module.
"""
load_tests = testscenarios.load_tests_apply_scenarios
HOST_DVR = 'my_l3_host_dvr'
DVR_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
DVR_SNAT_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR_SNAT,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr_snat'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
def schedule(self):
pass
def _choose_router_agent(self):
pass
def _choose_router_agents_for_ha(self):
pass
class FakePortDB(object):
def __init__(self, port_list):
self._port_list = port_list
def _get_query_answer(self, port_list, filters):
answers = []
for port in port_list:
matched = True
for key, search_values in filters.items():
port_value = port.get(key, None)
if not port_value:
matched = False
break
if isinstance(port_value, list):
sub_answers = self._get_query_answer(port_value,
search_values)
matched = len(sub_answers) > 0
else:
matched = port_value in search_values
if not matched:
break
if matched:
answers.append(port)
return answers
def get_port(self, context, port_id):
for port in self._port_list:
if port['id'] == port_id:
if port['tenant_id'] == context.tenant_id or context.is_admin:
return port
break
return None
def get_ports(self, context, filters=None):
query_filters = dict()
if filters:
query_filters.update(filters)
if not context.is_admin:
query_filters['tenant_id'] = [context.tenant_id]
result = self._get_query_answer(self._port_list, query_filters)
return result
class L3SchedulerBaseTestCase(base.BaseTestCase):
def setUp(self):
super(L3SchedulerBaseTestCase, self).setUp()
self.scheduler = FakeL3Scheduler()
self.plugin = mock.Mock()
def test_auto_schedule_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, '_get_routers_to_schedule'),
mock.patch.object(self.scheduler, '_get_routers_can_schedule')
) as (gs, gr):
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertTrue(result)
self.assertTrue(gs.called)
self.assertTrue(gr.called)
def test_auto_schedule_routers_no_agents(self):
self.plugin.get_enabled_agent_on_host.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_unscheduled_routers(self):
type(self.plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=[]))
with mock.patch.object(self.scheduler,
'_get_routers_to_schedule') as mock_routers:
mock_routers.return_value = []
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_target_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, '_get_routers_to_schedule'),
mock.patch.object(self.scheduler, '_get_routers_can_schedule')
) as (mock_unscheduled_routers, mock_target_routers):
mock_unscheduled_routers.return_value = mock.ANY
mock_target_routers.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test__get_routers_to_schedule_with_router_ids(self):
router_ids = ['foo_router_1', 'foo_router_2']
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
self.plugin.get_routers.return_value = expected_routers
with mock.patch.object(self.scheduler,
'_filter_unscheduled_routers') as mock_filter:
mock_filter.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin, router_ids)
mock_filter.assert_called_once_with(
mock.ANY, self.plugin, expected_routers)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_without_router_ids(self):
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_exclude_distributed(self):
routers = [
{'id': 'foo_router1', 'distributed': True}, {'id': 'foo_router_2'}
]
expected_routers = [{'id': 'foo_router_2'}]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin,
router_ids=None, exclude_distributed=True)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def _test__get_routers_can_schedule(self, routers, agent, target_routers):
self.plugin.get_l3_agent_candidates.return_value = agent
result = self.scheduler._get_routers_can_schedule(
mock.ANY, self.plugin, routers, mock.ANY)
self.assertEqual(target_routers, result)
def _test__filter_unscheduled_routers(self, routers, agents, expected):
self.plugin.get_l3_agents_hosting_routers.return_value = agents
unscheduled_routers = self.scheduler._filter_unscheduled_routers(
mock.ANY, self.plugin, routers)
self.assertEqual(expected, unscheduled_routers)
def test__filter_unscheduled_routers_already_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
[{'id': 'foo_agent_id'}], [])
def test__filter_unscheduled_routers_non_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}])
def test__get_routers_can_schedule_with_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, mock.ANY, routers)
def test__get_routers_can_schedule_with_no_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, None, [])
def test__bind_routers_centralized(self):
routers = [{'id': 'foo_router'}]
with mock.patch.object(self.scheduler, 'bind_router') as mock_bind:
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, mock.ANY)
mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY)
def _test__bind_routers_ha(self, has_binding):
routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}]
agent = agents_db.Agent(id='foo_agent')
with contextlib.nested(
mock.patch.object(self.scheduler, '_router_has_binding',
return_value=has_binding),
mock.patch.object(self.scheduler, '_create_ha_router_binding')
) as (
mock_has_binding, mock_bind):
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent)
mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router',
'foo_agent')
self.assertEqual(not has_binding, mock_bind.called)
def test__bind_routers_ha_has_binding(self):
self._test__bind_routers_ha(has_binding=True)
def test__bind_routers_ha_no_binding(self):
self._test__bind_routers_ha(has_binding=False)
class L3SchedulerBaseMixin(object):
def _register_l3_agent(self, host, agent_mode='legacy', plugin=None):
if not plugin:
plugin = self.plugin
agent = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': agent_mode},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.strtime())
agent_db = plugin.get_agents_db(self.adminContext,
filters={'host': [agent['host']]})
return agent_db[0]
def _register_l3_agents(self, plugin=None):
self.agent1 = self._register_l3_agent('host_1', plugin=plugin)
self.agent_id1 = self.agent1.id
self.agent2 = self._register_l3_agent('host_2', plugin=plugin)
self.agent_id2 = self.agent2.id
def _register_l3_dvr_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR]})
self.l3_dvr_agent = agent_db[0]
self.l3_dvr_agent_id = agent_db[0].id
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_SNAT_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR_SNAT]})
self.l3_dvr_snat_id = agent_db[0].id
self.l3_dvr_snat_agent = agent_db[0]
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
def _set_l3_agent_dead(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.adminContext, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=str(uuid.uuid4()),
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
class L3SchedulerTestBaseMixin(object):
def _test_add_router_to_l3_agent(self,
distributed=False,
already_scheduled=False,
external_gw=None):
agent_id = self.agent_id1
agent = self.agent1
if distributed:
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
if already_scheduled:
self._test_schedule_bind_router(agent, router)
with contextlib.nested(
mock.patch.object(self, "validate_agent_router_combination"),
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])
) as (valid, auto_s, gr):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
self.assertNotEqual(already_scheduled, auto_s.called)
def test__unbind_router_removes_binding(self):
agent_id = self.agent_id1
agent = self.agent1
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
self._test_schedule_bind_router(agent, router)
self._unbind_router(self.adminContext,
router['router']['id'],
agent_id)
bindings = self._get_l3_bindings_hosting_routers(
self.adminContext, [router['router']['id']])
self.assertEqual(0, len(bindings))
def _create_router_for_l3_agent_dvr_test(self,
distributed=False,
external_gw=None):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
return router
def _prepare_l3_agent_dvr_move_exceptions(self,
distributed=False,
external_gw=None,
agent_id=None,
expected_exception=None):
router = self._create_router_for_l3_agent_dvr_test(
distributed=distributed, external_gw=external_gw)
with contextlib.nested(
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])):
self.assertRaises(expected_exception,
self.add_router_to_l3_agent,
self.adminContext, agent_id,
router['router']['id'])
def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self):
self._register_l3_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.agent_id1,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_dvr_to_snat(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._create_router_for_l3_agent_dvr_test(
distributed=True,
external_gw=external_gw_info)
with contextlib.nested(
mock.patch.object(self, "validate_agent_router_combination"),
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])
) as (valid_agent_rtr, rtr_agent_binding, get_rtr):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
rtr_agent_binding.assert_called_once_with(
self.adminContext, agent, router['router'])
def test_add_router_to_l3_agent(self):
self._test_add_router_to_l3_agent()
def test_add_distributed_router_to_l3_agent(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
external_gw=external_gw_info)
def test_add_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(already_scheduled=True)
def test_add_distributed_router_to_l3_agent_already_scheduled(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=True,
external_gw=external_gw_info)
def _prepare_schedule_dvr_tests(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
return scheduler, agent, plugin
def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin.get_router.return_value = sync_router
with contextlib.nested(
mock.patch.object(scheduler, 'bind_router'),
mock.patch.object(
plugin, 'get_snat_bindings', return_value=False)
):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_dvr_router_with_snatbinding_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {'id': 'foo_router_id',
'distributed': True}
plugin.get_router.return_value = sync_router
with mock.patch.object(plugin, 'get_snat_bindings', return_value=True):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.unbind_snat_servicenode(mock.ANY, 'foo_router_id'),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_router_distributed(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
plugin.get_router.return_value = sync_router
with mock.patch.object(
plugin, 'get_snat_bindings', return_value=False):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.schedule_snat_router(
mock.ANY, 'foo_router_id', sync_router),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
session = ctx.session
db = l3_agentschedulers_db.RouterL3AgentBinding
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(ctx, rid, agent)
results = (session.query(db).filter_by(router_id=rid).all())
self.assertTrue(len(results) > 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_absent_router(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
# checking that bind_router() is not throwing
# when supplied with router_id of non-existing router
scheduler.bind_router(self.adminContext, "dummyID", self.agent1)
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(
self, router, agent_list, exp_host, count=1):
candidates = self.get_l3_agent_candidates(self.adminContext,
router, agent_list)
self.assertEqual(len(candidates), count)
if count:
self.assertEqual(candidates[0]['host'], exp_host)
def test_get_l3_agent_candidates_legacy(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = 'host_1'
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test dvr agent_mode case only dvr agent should be candidate
router['distributed'] = True
exp_host = DVR_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
exp_host = DVR_L3_AGENT.get('host')
router['distributed'] = True
# Test no VMs present case
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_dvr_snat(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
# Test no VMs present case
self.check_ports_exist_on_l3agent.return_value = False
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_centralized(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
# check centralized test case
router['distributed'] = False
exp_host = DVR_SNAT_L3_AGENT.get('host')
agent_list = [self.l3_dvr_snat_agent]
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def _prepare_check_ports_exist_tests(self):
l3_agent = agents_db.Agent()
l3_agent.admin_state_up = True
l3_agent.host = 'host_1'
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
self.plugin.get_ports = mock.Mock(return_value=[])
self.get_subnet_ids_on_router = mock.Mock(return_value=[])
return l3_agent, router
def test_check_ports_exist_on_l3agent_no_subnets(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no subnets
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_if_no_subnets_then_return(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
with mock.patch.object(manager.NeutronManager,
'get_plugin') as getp:
getp.return_value = self.plugin
# no subnets and operation is remove_router_interface,
# so return immediately without calling get_ports
self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(self.plugin.get_ports.called)
def test_check_ports_exist_on_l3agent_no_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no matching subnet
self.plugin.get_subnet_ids_on_router = mock.Mock(
return_value=[str(uuid.uuid4())])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# matching subnet
port = {'subnet_id': str(uuid.uuid4()),
'binding:host_id': 'host_1',
'device_owner': 'compute:',
'id': 1234}
self.plugin.get_ports.return_value = [port]
self.get_subnet_ids_on_router = mock.Mock(
return_value=[port['subnet_id']])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertTrue(val)
def test_get_l3_agents_hosting_routers(self):
agent = self._register_l3_agent('host_6')
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
ctx = self.adminContext
router_id = router['router']['id']
self.plugin.router_scheduler.bind_router(ctx, router_id, agent)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([agent.id], [agt.id for agt in agents])
self._set_l3_agent_admin_state(ctx, agent.id, False)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([], agents)
class L3SchedulerTestCase(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
l3_db.L3_NAT_db_mixin,
common_db_mixin.CommonDbMixin,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
L3SchedulerBaseMixin,
L3SchedulerTestBaseMixin):
def setUp(self):
self.mock_rescheduling = False
ext_mgr = test_l3.L3TestExtensionManager()
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
super(L3SchedulerTestCase, self).setUp(plugin=plugin_str,
ext_mgr=ext_mgr)
self.adminContext = q_context.get_admin_context()
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCase):
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 1)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 2)
random_patch.stop()
def test_scheduler_auto_schedule_when_agent_added(self):
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, True)
self.plugin.auto_schedule_routers(self.adminContext,
'host_1',
[r1['router']['id']])
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual('host_1', agents[0]['host'])
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCase):
def setUp(self):
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
def setUp(self):
plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(plugin)
super(L3DvrSchedulerTestCase, self).setUp()
self.adminContext = q_context.get_admin_context()
self.dut = L3DvrScheduler()
def test__notify_port_delete(self):
plugin = manager.NeutronManager.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={'L3_ROUTER_NAT': l3plugin}):
kwargs = {
'context': self.adminContext,
'port': mock.ANY,
'removed_routers': [
{'agent_id': 'foo_agent', 'router_id': 'foo_id'},
],
}
l3_dvrscheduler_db._notify_port_delete(
'port', 'after_delete', plugin, **kwargs)
l3plugin.dvr_vmarp_table_update.assert_called_once_with(
self.adminContext, mock.ANY, 'del')
l3plugin.remove_router_from_l3_agent.assert_called_once_with(
self.adminContext, 'foo_agent', 'foo_id')
def test_dvr_update_router_addvm(self):
port = {
'device_id': 'abcd',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
self.dut.dvr_update_router_addvm(self.adminContext, port)
def test_get_dvr_routers_by_portid(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_port', return_value=dvr_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
router_id = self.dut.get_dvr_routers_by_portid(self.adminContext,
dvr_port['id'])
self.assertEqual(router_id.pop(), r1['id'])
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def test_check_ports_active_on_host_and_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost', 'dvr_port1',
sub_ids)
self.assertFalse(result)
def _test_dvr_serviced_port_exists_on_subnet(self, port):
with mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', return_value=[port]):
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost',
'dvr1-intf-id',
'my-subnet-id')
self.assertTrue(result)
def test_dvr_serviced_vip_port_exists_on_subnet(self):
vip_port = {
'id': 'lbaas-vip-port1',
'device_id': 'vip-pool-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.1'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=vip_port)
def _create_port(self, port_name, tenant_id, host, subnet_id, ip_address,
status='ACTIVE',
device_owner='compute:nova'):
return {
'id': port_name + '-port-id',
'tenant_id': tenant_id,
'device_id': port_name,
'device_owner': device_owner,
'status': status,
'binding:host_id': host,
'fixed_ips': [
{
'subnet_id': subnet_id,
'ip_address': ip_address
}
]
}
def test_dvr_deletens_if_no_port_no_routers(self):
# Delete a vm port, the port subnet has no router interface.
vm_tenant_id = 'tenant-1'
my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False)
vm_port_host = 'compute-node-1'
vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
'shared-subnet', '10.10.10.3',
status='INACTIVE')
vm_port_id = vm_port['id']
fakePortDB = FakePortDB([vm_port])
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.'
'get_port_binding_host', return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', return_value=vm_port)) as (
_, mock_get_port_binding_host, _, _):
routers = self.dut.dvr_deletens_if_no_port(my_context, vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, vm_port_id)
def test_dvr_deletens_if_no_ports_no_removeable_routers(self):
# A VM port is deleted, but the router can't be unscheduled from the
# compute node because there is another VM port present.
vm_tenant_id = 'tenant-1'
my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
dvr_port = self._create_port(
'dvr-router', 'admin-tenant', vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', 'tenant-2', vm_port_host,
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, deleted_vm_port, dvr_port])
vm_port_binding = {
'port_id': deleted_vm_port_id,
'host': vm_port_host
}
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host',
return_value=vm_port_binding)) as (_,
mock_get_port_binding_host, _,
mock_get_ports,
mock_get_dvr_port_binding_by_host):
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_get_dvr_port_binding_by_host.called)
def _test_dvr_deletens_if_no_ports_delete_routers(self,
vm_tenant,
router_tenant):
class FakeAgent(object):
def __init__(self, id, host, agent_type):
self.id = id
self.host = host
self.agent_type = agent_type
my_context = q_context.Context('user-1', vm_tenant, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
router_id = 'dvr-router'
dvr_port = self._create_port(
router_id, router_tenant, vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
dvr_port_id = dvr_port['id']
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', vm_tenant, 'compute-node-2',
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, dvr_port, deleted_vm_port])
dvr_port_binding = {
'port_id': dvr_port_id, 'host': vm_port_host
}
agent_id = 'l3-agent-on-compute-node-1'
l3_agent_on_vm_host = FakeAgent(agent_id,
vm_port_host,
constants.AGENT_TYPE_L3)
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host',
return_value=dvr_port_binding),
mock.patch('neutron.db.agents_db.AgentDbMixin.'
'_get_agent_by_type_and_host',
return_value=l3_agent_on_vm_host)) as (_,
mock_get_port_binding_host, _,
mock_get_ports,
mock_get_dvr_port_binding_by_host,
mock__get_agent_by_type_and_host):
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
expected_router = {
'router_id': router_id,
'host': vm_port_host,
'agent_id': agent_id
}
self.assertEqual([expected_router], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
mock_get_dvr_port_binding_by_host.assert_called_once_with(
my_context.session, dvr_port_id, vm_port_host)
def test_dvr_deletens_if_no_ports_delete_admin_routers(self):
# test to see whether the last VM using a router created
# by the admin will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'admin-tenant')
def test_dvr_deletens_if_no_ports_delete_tenant_routers(self):
# test to see whether the last VM using a tenant's private
# router will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'tenant-1')
def test_dvr_serviced_dhcp_port_exists_on_subnet(self):
dhcp_port = {
'id': 'dhcp-port1',
'device_id': 'dhcp-net-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.2'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=dhcp_port)
def _prepare_schedule_snat_tests(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
return agent, router
def test_schedule_snat_router_duplicate_entry(self):
self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'bind_snat_servicenode',
side_effect=db_exc.DBDuplicateEntry()),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode')
) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr):
self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar')
self.assertTrue(mock_bind_snat.called)
self.assertFalse(mock_bind_dvr.called)
def test_schedule_snat_router_return_value(self):
agent, router = self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'bind_snat_servicenode'),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode')
) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr):
mock_snat_canidates.return_value = [agent]
mock_bind_snat.return_value = [agent]
mock_bind_dvr.return_value = [agent]
chosen_agent = self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertEqual(chosen_agent, [agent])
def test_schedule_router_unbind_snat_servicenode_negativetest(self):
router = {
'id': 'foo_router_id',
'distributed': True
}
with contextlib.nested(
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'get_snat_bindings'),
mock.patch.object(self.dut, 'unbind_snat_servicenode')
) as (mock_rd, mock_snat_bind, mock_unbind):
mock_rd.return_value = router
mock_snat_bind.return_value = False
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertFalse(mock_unbind.called)
def test_schedule_snat_router_with_snat_candidates(self):
agent, router = self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(query.Query, 'first'),
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode'),
mock.patch.object(self.dut, 'bind_snat_servicenode')) as (
mock_query, mock_agents,
mock_candidates, mock_rd, mock_dvr, mock_bind):
mock_rd.return_value = router
mock_query.return_value = []
mock_agents.return_value = [agent]
mock_candidates.return_value = [agent]
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', mock.ANY)
mock_bind.assert_called_once_with(
self.adminContext, 'foo_router_id', [agent])
def test_unbind_snat_servicenode(self):
router_id = 'foo_router_id'
core_plugin = mock.PropertyMock()
type(self.dut)._core_plugin = core_plugin
(self.dut._core_plugin.get_ports_on_host_by_subnet.
return_value) = []
core_plugin.reset_mock()
l3_notifier = mock.PropertyMock()
type(self.dut).l3_rpc_notifier = l3_notifier
binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding(
router_id=router_id, l3_agent_id='foo_l3_agent_id',
l3_agent=agents_db.Agent())
with contextlib.nested(
mock.patch.object(query.Query, 'one'),
mock.patch.object(self.adminContext.session, 'delete'),
mock.patch.object(query.Query, 'delete'),
mock.patch.object(self.dut, 'get_subnet_ids_on_router')) as (
mock_query, mock_session, mock_delete, mock_get_subnets):
mock_query.return_value = binding
mock_get_subnets.return_value = ['foo_subnet_id']
self.dut.unbind_snat_servicenode(self.adminContext, router_id)
mock_get_subnets.assert_called_with(self.adminContext, router_id)
self.assertTrue(mock_session.call_count)
self.assertTrue(mock_delete.call_count)
core_plugin.assert_called_once_with()
l3_notifier.assert_called_once_with()
class L3HAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
supported_extension_aliases = ["l3-ha"]
class L3HATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3HATestCaseMixin, self).setUp()
self.adminContext = q_context.get_admin_context()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated').start()
cfg.CONF.set_override('max_l3_agents_per_router', 0)
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
def _create_ha_router(self, ha=True, tenant_id='tenant1'):
self.adminContext.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
return self.plugin.create_router(self.adminContext,
{'router': router})
class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3_HA_scheduler_db_mixinTestCase,
self)._register_l3_agents(plugin=plugin)
self.agent3 = self._register_l3_agent('host_3', plugin=plugin)
self.agent_id3 = self.agent3.id
self.agent4 = self._register_l3_agent('host_4', plugin=plugin)
self.agent_id4 = self.agent4.id
def test_get_ha_routers_l3_agents_count(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router1['id'])
self.plugin.schedule_router(self.adminContext, router2['id'])
self.plugin.schedule_router(self.adminContext, router3['id'])
result = self.plugin.get_ha_routers_l3_agents_count(
self.adminContext).all()
self.assertEqual(2, len(result))
self.assertIn((router1['id'], router1['tenant_id'], 4), result)
self.assertIn((router2['id'], router2['tenant_id'], 4), result)
self.assertNotIn((router3['id'], router3['tenant_id'], mock.ANY),
result)
def test_get_ordered_l3_agents_by_num_routers(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
router4 = self._create_ha_router(ha=False)
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
# host 2, and agent 4 will host 3.
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent2, self.agent4])
self.plugin.schedule_router(self.adminContext, router2['id'],
candidates=[self.agent3, self.agent4])
self.plugin.schedule_router(self.adminContext, router3['id'],
candidates=[self.agent3])
self.plugin.schedule_router(self.adminContext, router4['id'],
candidates=[self.agent4])
agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
self.agent_id4]
result = self.plugin.get_l3_agents_ordered_by_num_routers(
self.adminContext, agent_ids)
self.assertEqual(agent_ids, [record['id'] for record in result])
class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
def test_reschedule_ha_routers_from_down_agents(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_dead(self.agent_id1)
with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
self.plugin.reschedule_routers_from_down_agents()
self.assertFalse(reschedule.called)
def test_list_l3_agents_hosting_ha_router(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertEqual('standby', agent['ha_state'])
self.plugin.update_routers_states(
self.adminContext, {router['id']: 'active'}, self.agent1.host)
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
expected_state = ('active' if agent['host'] == self.agent1.host
else 'standby')
self.assertEqual(expected_state, agent['ha_state'])
def test_list_l3_agents_hosting_legacy_router(self):
router = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertIsNone(agent['ha_state'])
def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self):
self.assertEqual({'agents': []},
self.plugin._get_agents_dict_for_router([]))
class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
def test_scheduler_with_ha_enabled(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
for agent in agents:
sync_data = self.plugin.get_ha_sync_data_for_host(
self.adminContext, router_ids=[router['id']],
host=agent.host)
self.assertEqual(1, len(sync_data))
interface = sync_data[0][constants.HA_INTERFACE_KEY]
self.assertIsNotNone(interface)
def test_auto_schedule(self):
router = self._create_ha_router()
self.plugin.auto_schedule_routers(
self.adminContext, self.agent1.host, None)
self.plugin.auto_schedule_routers(
self.adminContext, self.agent2.host, None)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])
self.assertEqual(2, len(agents))
def test_auto_schedule_specific_router_when_agent_added(self):
self._auto_schedule_when_agent_added(True)
def test_auto_schedule_all_routers_when_agent_added(self):
self._auto_schedule_when_agent_added(False)
def _auto_schedule_when_agent_added(self, specific_router):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
agent = self._register_l3_agent('host_3')
self.agent_id3 = agent.id
routers_to_auto_schedule = [router['id']] if specific_router else []
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(3, len(agents))
# Simulate agent restart to make sure we don't try to re-bind
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
def test_scheduler_with_ha_enabled_not_enough_agent(self):
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3HALeastRoutersSchedulerTestCase,
self)._register_l3_agents(plugin=plugin)
agent = self._register_l3_agent('host_3', plugin=plugin)
self.agent_id3 = agent.id
agent = self._register_l3_agent('host_4', plugin=plugin)
self.agent_id4 = agent.id
def setUp(self):
super(L3HALeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
# disable the third agent to be sure that the router will
# be scheduled of the two firsts
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, False)
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, True)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, True)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id3, agent_ids)
self.assertIn(self.agent_id4, agent_ids)
class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
"""Test cases to test get_l3_agents.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'agent_mode' filter with various values.
5 l3 agents are registered in the order - legacy, dvr_snat, dvr, fake_mode
and legacy
"""
scenarios = [
('no filter',
dict(agent_modes=[],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'fake_mode', 'legacy'])),
('legacy',
dict(agent_modes=['legacy'],
expected_agent_modes=['legacy', 'legacy'])),
('dvr_snat',
dict(agent_modes=['dvr_snat'],
expected_agent_modes=['dvr_snat'])),
('dvr ',
dict(agent_modes=['dvr'],
expected_agent_modes=['dvr'])),
('legacy and dvr snat',
dict(agent_modes=['legacy', 'dvr_snat', 'legacy'],
expected_agent_modes=['legacy', 'dvr_snat', 'legacy'])),
('legacy and dvr',
dict(agent_modes=['legacy', 'dvr'],
expected_agent_modes=['legacy', 'dvr', 'legacy'])),
('dvr_snat and dvr',
dict(agent_modes=['dvr_snat', 'dvr'],
expected_agent_modes=['dvr_snat', 'dvr'])),
('legacy, dvr_snat and dvr',
dict(agent_modes=['legacy', 'dvr_snat', 'dvr'],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'legacy'])),
('invalid',
dict(agent_modes=['invalid'],
expected_agent_modes=[])),
]
def setUp(self):
super(TestGetL3AgentsWithAgentModeFilter, self).setUp()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
self.adminContext = q_context.get_admin_context()
hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5']
agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy']
for host, agent_mode in zip(hosts, agent_modes):
self._register_l3_agent(host, agent_mode, self.plugin)
def _get_agent_mode(self, agent):
agent_conf = self.plugin.get_configuration_dict(agent)
return agent_conf.get('agent_mode', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'agent_modes': self.agent_modes})
self.assertEqual(len(self.expected_agent_modes), len(l3_agents))
returned_agent_modes = [self._get_agent_mode(agent)
for agent in l3_agents]
self.assertEqual(self.expected_agent_modes, returned_agent_modes)
| Stavitsky/neutron | neutron/tests/unit/scheduler/test_l3_agent_scheduler.py | Python | apache-2.0 | 75,255 |
import os, sys, tempfile, urllib2, sys
from fabric.api import env, local, lcd, prefix
from fabric.colors import red, green
from contextlib import contextmanager
test_root = '~/tmp'
test_root = os.path.expanduser(test_root)
test_root = os.path.expandvars(test_root)
project_name = 'testproject'
config_dir = '~/tmp/configs'
config_dir = os.path.expanduser(config_dir)
config_dir = os.path.expandvars(config_dir)
rcp = 'rsync -a --partial --progress '
smallstack = """fab create_ec2:expatest-small-1 \
bootstrap:expatest-small-1,core \
bootstrap:expatest-small-1,gis \
bootstrap:expatest-small-1,blog \
deployapp:expatest-small-1,core \
deployapp:expatest-small-1,gis \
deployapp:expatest-small-1,app \
deploywp:expatest-small-1"""
fullstack = """fab create_rds:expacore-db-1,core,postgres \
create_rds:expagis-db-1,gis,postgres \
create_rds:expablog-db-1,blog,mysql \
create_rds:expatest-db-1,app,postgres \
create_ec2:expacore-full-1 \
create_ec2:expagis-full-1 \
create_ec2:expatest-full-1 \
bootstrap:expacore-full-1,core \
bootstrap:expacore-full-1,blog \
bootstrap:expagis-full-1,gis \
bootstrap:expatest-full-1,app \
deployapp:expacore-full-1,core \
deployapp:expagis-full-1,gis \
deploywp:expacore-full-1 \
deployapp:expatest-full-1,app"""
#----------HELPER FUNCTIONS-----------
@contextmanager
def virtualenv(directory):
env.activate = 'source %s/bin/activate' % directory
with lcd(envdir):
with prefix(env.activate):
yield
#--------------------------------------
# Setup virtual env
if len(sys.argv) == 2:
envdir = sys.argv[1]
else:
envdir = tempfile.mkdtemp(prefix=project_name + '.', dir=test_root)
try:
os.mkdir(test_root)
except OSError:
pass
# Cleanup previous settings files
#local('rm ./*_settings.json')
print "creating venv %s..." % envdir
local('virtualenv %s' % envdir)
with lcd(envdir):
with virtualenv(envdir):
local('pip install -q django==1.6')
local('django-admin.py startproject --template=https://github.com/expa/skeleton/archive/master.zip --extension=py,rst,html,conf,xml --name=Vagrantfile --name=crontab %s' % project_name)
with lcd(project_name):
local(rcp + config_dir + '/*.cfg ' + config_dir + '/keys ' + ' ./ ')
local('pip install -q -r requirements/local.txt')
local(smallstack)
# test urls
urls = ['https://core.test.expa.com', 'https://gis.test.expa.com', 'https://www.test.expa.com', 'https://test.expa.com', 'http://blog.test.expa.com']
url_response = dict.fromkeys(urls)
for url in urls:
try:
response = urllib2.urlopen(url)
url_response[url] = response.code
print url + ": " + green(str(response.code))
except urllib2.HTTPError, error:
print url + ": " + red(error.code)
except urllib2.URLError, error:
print url + ": " + red(error.args)
try:
type(error)
print('fail')
sys.exit(1)
except NameError:
if range(401, 600) in url_response.values():
print('fail')
sys.exit(1)
print('pass')
| yadudoc/skeleton | tests/test-aws-env.py | Python | mit | 3,436 |
'''
Created on 13. jan. 2012
@author: pcn
'''
class TemplateList(object):
def __init__(self):
pass
class ConfigurationTemplatesX(object):
def __init__(self, configurationTree):
self._configurationTree = configurationTree
def _getConfiguration(self):
self.loadMediaFromConfiguration()
def checkAndUpdateFromConfiguration(self):
if(self._configurationTree.isConfigurationUpdated()):
print "mediaPool config is updated..."
self._getConfiguration()
for mediaFile in self._mediaPool:
if(mediaFile != None):
mediaFile.checkAndUpdateFromConfiguration()
self._configurationTree.resetConfigurationUpdated()
| perchrn/TaktPlayer | src/configuration/ConfigurationTemplates.py | Python | gpl-2.0 | 764 |
from django.contrib.auth import get_user_model
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse
from misago.core import errorpages, mail
from misago.core.decorators import require_POST
from misago.core.shortcuts import paginate, validate_slug
from misago.core.testproject.models import Model
from misago.core.views import noscript
def test_mail_user(request):
User = get_user_model()
test_user = User.objects.all().first()
mail.mail_user(request,
test_user,
"Misago Test Mail",
"misago/emails/base")
return HttpResponse("Mailed user!")
def test_mail_users(request):
User = get_user_model()
mail.mail_users(request,
User.objects.iterator(),
"Misago Test Spam",
"misago/emails/base")
return HttpResponse("Mailed users!")
def test_pagination(request, page=None):
items = range(15)
page = paginate(items, page, 5)
return HttpResponse(",".join([str(x) for x in page.object_list]))
def validate_slug_view(request, model_id, model_slug):
model = Model(int(model_id), 'eric-the-fish')
validate_slug(model, model_slug)
return HttpResponse("Allright!")
def raise_misago_403(request):
raise PermissionDenied('Misago 403')
def raise_misago_404(request):
raise Http404('Misago 404')
def raise_misago_405(request):
return errorpages.not_allowed(request)
def raise_403(request):
raise PermissionDenied()
def raise_404(request):
raise Http404()
def test_noscript(request):
return noscript(request, **request.POST)
@require_POST
def test_require_post(request):
return HttpResponse("Request method: %s" % request.method)
@errorpages.shared_403_exception_handler
def mock_custom_403_error_page(request):
return HttpResponse("Custom 403", status=403)
@errorpages.shared_404_exception_handler
def mock_custom_404_error_page(request):
return HttpResponse("Custom 404", status=404)
| leture/Misago | misago/core/testproject/views.py | Python | gpl-2.0 | 2,044 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Why our own memcache client?
By Michael Barton
python-memcached doesn't use consistent hashing, so adding or
removing a memcache server from the pool invalidates a huge
percentage of cached items.
If you keep a pool of python-memcached client objects, each client
object has its own connection to every memcached server, only one of
which is ever in use. So you wind up with n * m open sockets and
almost all of them idle. This client effectively has a pool for each
server, so the number of backend connections is hopefully greatly
reduced.
python-memcache uses pickle to store things, and there was already a
huge stink about Swift using pickles in memcache
(http://osvdb.org/show/osvdb/86581). That seemed sort of unfair,
since nova and keystone and everyone else use pickles for memcache
too, but it's hidden behind a "standard" library. But changing would
be a security regression at this point.
Also, pylibmc wouldn't work for us because it needs to use python
sockets in order to play nice with eventlet.
Lucid comes with memcached: v1.4.2. Protocol documentation for that
version is at:
http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt
"""
import six.moves.cPickle as pickle
import json
import logging
import time
from bisect import bisect
from hashlib import md5
from eventlet.green import socket
from eventlet.pools import Pool
from eventlet import Timeout
from six.moves import range
from swift.common import utils
DEFAULT_MEMCACHED_PORT = 11211
CONN_TIMEOUT = 0.3
POOL_TIMEOUT = 1.0 # WAG
IO_TIMEOUT = 2.0
PICKLE_FLAG = 1
JSON_FLAG = 2
NODE_WEIGHT = 50
PICKLE_PROTOCOL = 2
TRY_COUNT = 3
# if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server
# will be considered failed for ERROR_LIMIT_DURATION seconds.
ERROR_LIMIT_COUNT = 10
ERROR_LIMIT_TIME = 60
ERROR_LIMIT_DURATION = 60
def md5hash(key):
return md5(key).hexdigest()
def sanitize_timeout(timeout):
"""
Sanitize a timeout value to use an absolute expiration time if the delta
is greater than 30 days (in seconds). Note that the memcached server
translates negative values to mean a delta of 30 days in seconds (and 1
additional second), client beware.
"""
if timeout > (30 * 24 * 60 * 60):
timeout += time.time()
return timeout
class MemcacheConnectionError(Exception):
pass
class MemcachePoolTimeout(Timeout):
pass
class MemcacheConnPool(Pool):
"""
Connection pool for Memcache Connections
The *server* parameter can be a hostname, an IPv4 address, or an IPv6
address with an optional port. See
:func:`swift.common.utils.parse_socket_string` for details.
"""
def __init__(self, server, size, connect_timeout):
Pool.__init__(self, max_size=size)
self.host, self.port = utils.parse_socket_string(
server, DEFAULT_MEMCACHED_PORT)
self._connect_timeout = connect_timeout
def create(self):
addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC,
socket.SOCK_STREAM)
family, socktype, proto, canonname, sockaddr = addrs[0]
sock = socket.socket(family, socket.SOCK_STREAM)
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
with Timeout(self._connect_timeout):
sock.connect(sockaddr)
return (sock.makefile(), sock)
def get(self):
fp, sock = super(MemcacheConnPool, self).get()
if fp is None:
# An error happened previously, so we need a new connection
fp, sock = self.create()
return fp, sock
class MemcacheRing(object):
"""
Simple, consistent-hashed memcache client.
"""
def __init__(self, servers, connect_timeout=CONN_TIMEOUT,
io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT,
tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False,
max_conns=2):
self._ring = {}
self._errors = dict(((serv, []) for serv in servers))
self._error_limited = dict(((serv, 0) for serv in servers))
for server in sorted(servers):
for i in range(NODE_WEIGHT):
self._ring[md5hash('%s-%s' % (server, i))] = server
self._tries = tries if tries <= len(servers) else len(servers)
self._sorted = sorted(self._ring)
self._client_cache = dict(((server,
MemcacheConnPool(server, max_conns,
connect_timeout))
for server in servers))
self._connect_timeout = connect_timeout
self._io_timeout = io_timeout
self._pool_timeout = pool_timeout
self._allow_pickle = allow_pickle
self._allow_unpickle = allow_unpickle or allow_pickle
def _exception_occurred(self, server, e, action='talking',
sock=None, fp=None, got_connection=True):
if isinstance(e, Timeout):
logging.error("Timeout %(action)s to memcached: %(server)s",
{'action': action, 'server': server})
elif isinstance(e, (socket.error, MemcacheConnectionError)):
logging.error("Error %(action)s to memcached: %(server)s: %(err)s",
{'action': action, 'server': server, 'err': e})
else:
logging.exception("Error %(action)s to memcached: %(server)s",
{'action': action, 'server': server})
try:
if fp:
fp.close()
del fp
except Exception:
pass
try:
if sock:
sock.close()
del sock
except Exception:
pass
if got_connection:
# We need to return something to the pool
# A new connection will be created the next time it is retrieved
self._return_conn(server, None, None)
now = time.time()
self._errors[server].append(time.time())
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._errors[server] = [err for err in self._errors[server]
if err > now - ERROR_LIMIT_TIME]
if len(self._errors[server]) > ERROR_LIMIT_COUNT:
self._error_limited[server] = now + ERROR_LIMIT_DURATION
logging.error('Error limiting server %s', server)
def _get_conns(self, key):
"""
Retrieves a server conn from the pool, or connects a new one.
Chooses the server based on a consistent hash of "key".
"""
pos = bisect(self._sorted, key)
served = []
while len(served) < self._tries:
pos = (pos + 1) % len(self._sorted)
server = self._ring[self._sorted[pos]]
if server in served:
continue
served.append(server)
if self._error_limited[server] > time.time():
continue
sock = None
try:
with MemcachePoolTimeout(self._pool_timeout):
fp, sock = self._client_cache[server].get()
yield server, fp, sock
except MemcachePoolTimeout as e:
self._exception_occurred(
server, e, action='getting a connection',
got_connection=False)
except (Exception, Timeout) as e:
# Typically a Timeout exception caught here is the one raised
# by the create() method of this server's MemcacheConnPool
# object.
self._exception_occurred(
server, e, action='connecting', sock=sock)
def _return_conn(self, server, fp, sock):
"""Returns a server connection to the pool."""
self._client_cache[server].put((fp, sock))
def set(self, key, value, serialize=True, time=0,
min_compress_len=0):
"""
Set a key/value pair in memcache
:param key: key
:param value: value
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param time: the time to live
:param min_compress_len: minimum compress length, this parameter was
added to keep the signature compatible with
python-memcached interface. This
implementation ignores it.
"""
key = md5hash(key)
timeout = sanitize_timeout(time)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
value = json.dumps(value)
flags |= JSON_FLAG
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('set %s %d %d %s\r\n%s\r\n' %
(key, flags, timeout, len(value), value))
# Wait for the set to complete
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def get(self, key):
"""
Gets the object specified by key. It will also unserialize the object
before returning if it is serialized in memcache with JSON, or if it
is pickled and unpickling is allowed.
:param key: key
:returns: value of the key in memcache
"""
key = md5hash(key)
value = None
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('get %s\r\n' % key)
line = fp.readline().strip().split()
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == 'END':
break
if line[0].upper() == 'VALUE' and line[1] == key:
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
fp.readline()
line = fp.readline().strip().split()
self._return_conn(server, fp, sock)
return value
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def incr(self, key, delta=1, time=0):
"""
Increments a key which has a numeric value by delta.
If the key can't be found, it's added as delta or 0 if delta < 0.
If passed a negative number, will use memcached's decr. Returns
the int stored in memcached
Note: The data memcached stores as the result of incr/decr is
an unsigned int. decr's that result in a number below 0 are
stored as 0.
:param key: key
:param delta: amount to add to the value of key (or set as the value
if the key is not found) will be cast to an int
:param time: the time to live
:returns: result of incrementing
:raises MemcacheConnectionError:
"""
key = md5hash(key)
command = 'incr'
if delta < 0:
command = 'decr'
delta = str(abs(int(delta)))
timeout = sanitize_timeout(time)
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('%s %s %s\r\n' % (command, key, delta))
line = fp.readline().strip().split()
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == 'NOT_FOUND':
add_val = delta
if command == 'decr':
add_val = '0'
sock.sendall('add %s %d %d %s\r\n%s\r\n' %
(key, 0, timeout, len(add_val), add_val))
line = fp.readline().strip().split()
if line[0].upper() == 'NOT_STORED':
sock.sendall('%s %s %s\r\n' % (command, key,
delta))
line = fp.readline().strip().split()
ret = int(line[0].strip())
else:
ret = int(add_val)
else:
ret = int(line[0].strip())
self._return_conn(server, fp, sock)
return ret
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
raise MemcacheConnectionError("No Memcached connections succeeded.")
def decr(self, key, delta=1, time=0):
"""
Decrements a key which has a numeric value by delta. Calls incr with
-delta.
:param key: key
:param delta: amount to subtract to the value of key (or set the
value to 0 if the key is not found) will be cast to
an int
:param time: the time to live
:returns: result of decrementing
:raises MemcacheConnectionError:
"""
return self.incr(key, delta=-delta, time=time)
def delete(self, key):
"""
Deletes a key/value pair from memcache.
:param key: key to be deleted
"""
key = md5hash(key)
for (server, fp, sock) in self._get_conns(key):
try:
with Timeout(self._io_timeout):
sock.sendall('delete %s\r\n' % key)
# Wait for the delete to complete
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def set_multi(self, mapping, server_key, serialize=True, time=0,
min_compress_len=0):
"""
Sets multiple key/value pairs in memcache.
:param mapping: dictionary of keys and values to be set in memcache
:param server_key: key to use in determining which server in the ring
is used
:param serialize: if True, value is serialized with JSON before sending
to memcache, or with pickle if configured to use
pickle instead of JSON (to avoid cache poisoning)
:param time: the time to live
:min_compress_len: minimum compress length, this parameter was added
to keep the signature compatible with
python-memcached interface. This implementation
ignores it
"""
server_key = md5hash(server_key)
timeout = sanitize_timeout(time)
msg = ''
for key, value in mapping.items():
key = md5hash(key)
flags = 0
if serialize and self._allow_pickle:
value = pickle.dumps(value, PICKLE_PROTOCOL)
flags |= PICKLE_FLAG
elif serialize:
value = json.dumps(value)
flags |= JSON_FLAG
msg += ('set %s %d %d %s\r\n%s\r\n' %
(key, flags, timeout, len(value), value))
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall(msg)
# Wait for the set to complete
for line in range(len(mapping)):
fp.readline()
self._return_conn(server, fp, sock)
return
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
def get_multi(self, keys, server_key):
"""
Gets multiple values from memcache for the given keys.
:param keys: keys for values to be retrieved from memcache
:param server_key: key to use in determining which server in the ring
is used
:returns: list of values
"""
server_key = md5hash(server_key)
keys = [md5hash(key) for key in keys]
for (server, fp, sock) in self._get_conns(server_key):
try:
with Timeout(self._io_timeout):
sock.sendall('get %s\r\n' % ' '.join(keys))
line = fp.readline().strip().split()
responses = {}
while True:
if not line:
raise MemcacheConnectionError('incomplete read')
if line[0].upper() == 'END':
break
if line[0].upper() == 'VALUE':
size = int(line[3])
value = fp.read(size)
if int(line[2]) & PICKLE_FLAG:
if self._allow_unpickle:
value = pickle.loads(value)
else:
value = None
elif int(line[2]) & JSON_FLAG:
value = json.loads(value)
responses[line[1]] = value
fp.readline()
line = fp.readline().strip().split()
values = []
for key in keys:
if key in responses:
values.append(responses[key])
else:
values.append(None)
self._return_conn(server, fp, sock)
return values
except (Exception, Timeout) as e:
self._exception_occurred(server, e, sock=sock, fp=fp)
| nadeemsyed/swift | swift/common/memcached.py | Python | apache-2.0 | 19,482 |
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_commands
----------------------------------
Tests for `dox.commands` module.
"""
import fixtures
import testscenarios
from dox import commands
from dox.tests import base
def get_fake_command(value):
def fake_value(self, args):
return value
return fake_value
class TestCommands(base.TestCase):
scenarios = [
('dox_yaml', dict(
dox_yaml=True, tox_ini=False, travis_yaml=False,
dox_value=["testr run"], tox_value=None, travis_value=None,
commands="testr run")),
('dox_yaml_ignore_others', dict(
dox_yaml=True, tox_ini=True, travis_yaml=True,
dox_value=["testr run"], tox_value=["setup.py test"],
travis_value=["gem test"],
commands="testr run")),
('tox_ini', dict(
dox_yaml=False, tox_ini=True, travis_yaml=False,
dox_value=None, tox_value=["setup.py test"], travis_value=None,
commands="setup.py test")),
('travis_yaml', dict(
dox_yaml=False, tox_ini=False, travis_yaml=True,
dox_value=["testr run"], tox_value=None, travis_value=["ruby"],
commands="ruby")),
]
def setUp(self):
super(TestCommands, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dox_yaml.DoxYaml.exists',
base.bool_to_fake(self.dox_yaml)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.tox_ini.ToxIni.exists',
base.bool_to_fake(self.tox_ini)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.travis_yaml.TravisYaml.exists',
base.bool_to_fake(self.travis_yaml)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dox_yaml.DoxYaml.get_commands',
get_fake_command(self.dox_value)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.tox_ini.ToxIni.get_commands',
get_fake_command(self.tox_value)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.travis_yaml.TravisYaml.get_commands',
get_fake_command(self.travis_value)))
def test_commands(self):
p = commands.Commands()
self.assertEqual(p.test_command(), self.commands)
def load_tests(loader, in_tests, pattern):
return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
| coolsvap/dox | dox/tests/config/test_commands.py | Python | apache-2.0 | 3,021 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from distutils.version import StrictVersion
from nose.plugins.skip import SkipTest
from elasticsearch import (
Elasticsearch
)
ELASTICSEARCH_URL = "localhost"
conn = Elasticsearch(ELASTICSEARCH_URL)
index_name = "foo"
default_doc_type = "my_doc_type"
def homogeneous(a, b):
json.dumps(a, sort_keys=True).should.equal(json.dumps(b, sort_keys=True))
def heterogeneous(a, b):
json.dumps(a, sort_keys=True).shouldnt.equal(json.dumps(b, sort_keys=True))
def add_document(index, document, **kwargs):
kwargs = _set_doc_type(kwargs)
conn.create(index=index, body=document, refresh=True, **kwargs)
def clean_elasticsearch(context):
_delete_es_index(index_name)
def prepare_elasticsearch(context):
clean_elasticsearch(context)
_create_foo_index()
conn.cluster.health(wait_for_status='yellow', index=index_name)
def _create_foo_index():
mapping = _get_mapping(index=index_name)
conn.indices.create(index=index_name, ignore=400, body=mapping)
conn.indices.refresh(index=index_name)
def _delete_es_index(index):
conn.indices.delete(index=index, ignore=[400, 404])
def _get_mapping(index, **kwargs):
kwargs = _set_doc_type(kwargs)
doc_type = kwargs['doc_type']
mapping = {
"mappings": {
doc_type: {
"properties": {
"location": {
"type": "geo_point"
},
"foo_loc": {
"type": "geo_point"
},
"child": {
"type": "nested"
}
}
}
}
}
return mapping
def _set_doc_type(kwargs):
if "doc_type" not in kwargs:
# Allow overriding doc type defaults
kwargs["doc_type"] = default_doc_type
return kwargs
prepare_data = [
prepare_elasticsearch
]
cleanup_data = [
clean_elasticsearch
]
def version_tuple(v):
return tuple(map(int, (v.split("."))))
class requires_es_gte(object):
"""
Decorator for requiring Elasticsearch version
greater than or equal to 'version'
"""
def __init__(self, version):
self.version = version
def __call__(self, test):
es_version_string = os.environ.get("ES_VERSION", None)
if es_version_string is None: # Skip check if we don't know our version
return test
es_version = StrictVersion(es_version_string)
required = StrictVersion(self.version)
if es_version >= required:
return test
raise SkipTest
| Yipit/pyeqs | tests/helpers.py | Python | mit | 2,682 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse # noqa
from django import http
from mox import IsA # noqa
from wildcard import api
from wildcard.test import helpers as test
INDEX_URL = reverse('horizon:project:queues:index')
CREATE_URL = reverse('horizon:project:queues:create')
UPDATE_URL = reverse('horizon:project:queues:update', args=[1])
class QueueTests(test.TestCase):
@test.create_stubs({api.payload: ('queue_list',)})
def test_index(self):
api.payload.queue_list(
IsA(http.HttpRequest)
).AndReturn(self.queues.list())
self.mox.ReplayAll()
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'project/queues/index.html')
queues = res.context['queues_table'].data
self.assertItemsEqual(queues, self.queues.list())
def _test_create_successful(self, queue, create_args, post_data):
api.payload.queue_create(
IsA(http.HttpRequest),
**create_args
).AndReturn(queue)
self.mox.ReplayAll()
post_data['method'] = 'CreateQueueForm'
res = self.client.post(CREATE_URL, post_data)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
def _test_update_successful(self, queue, update_args, post_data):
api.payload.queue_get(
IsA(http.HttpRequest),
queue.uuid,
).AndReturn(queue)
api.payload.queue_update(
IsA(http.HttpRequest),
queue.uuid,
**update_args
).AndReturn(None)
self.mox.ReplayAll()
post_data['method'] = 'UpdateQueueForm'
res = self.client.post(UPDATE_URL, post_data)
self.assertNoFormErrors(res)
self.assertMessageCount(success=1)
@test.create_stubs({api.payload: ('queue_create',)})
def test_create(self):
queue = self.queues.get(uuid='1')
self._test_create_successful(
queue,
{
'name': queue.name,
'description': queue.description,
},
{
'name': queue.name,
'description': queue.description,
},
)
@test.create_stubs({api.payload: ('queue_create',)})
def test_create_description_is_not_required(self):
queue = self.queues.get(uuid='1')
self._test_create_successful(
queue,
{
'name': queue.name,
'description': '',
},
{
'name': queue.name,
},
)
@test.create_stubs({api.payload: ('queue_get', 'queue_update')})
def test_update(self):
queue = self.queues.get(uuid='1')
self._test_update_successful(
queue,
{
'name': queue.name,
'description': queue.description,
},
{
'uuid': queue.uuid,
'name': queue.name,
'description': queue.description,
},
)
@test.create_stubs({api.payload: ('queue_get', 'queue_update')})
def test_update_description_not_required(self):
queue = self.queues.get(uuid='1')
self._test_update_successful(
queue,
{
'name': queue.name,
'description': '',
},
{
'uuid': queue.uuid,
'name': queue.name,
},
)
@test.create_stubs({api.payload: ('queue_delete', 'queue_list')})
def test_delete(self):
uuid = '1'
api.payload.queue_delete(
IsA(http.HttpRequest),
uuid,
)
api.payload.queue_list(
IsA(http.HttpRequest),
).AndReturn(self.queues.list())
self.mox.ReplayAll()
form_data = {'action': 'queues__delete__%s' % uuid}
res = self.client.post(INDEX_URL, form_data)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.payload: ('queue_update', 'queue_list')})
def test_enable_queue(self):
queue = self.queues.get(uuid="1")
queue.disabled = True
api.payload.queue_list(
IsA(http.HttpRequest),
).AndReturn(self.queues.list())
api.payload.queue_update(
IsA(http.HttpRequest),
queue.uuid,
disabled=False
).AndReturn(None)
self.mox.ReplayAll()
formData = {'action': 'queues__toggle__%s' % queue.uuid}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({api.payload: ('queue_update', 'queue_list')})
def test_disable_queue(self):
queue = self.queues.get(uuid="1")
queue.disabled = False
api.payload.queue_list(
IsA(http.HttpRequest),
).AndReturn(self.queues.list())
api.payload.queue_update(
IsA(http.HttpRequest),
queue.uuid,
disabled=True
).AndReturn(None)
self.mox.ReplayAll()
formData = {'action': 'queues__toggle__%s' % queue.uuid}
res = self.client.post(INDEX_URL, formData)
self.assertRedirectsNoFollow(res, INDEX_URL)
| kickstandproject/wildcard | wildcard/dashboards/project/queues/tests.py | Python | apache-2.0 | 5,919 |
# -*- coding: utf-8 -*-
"""Given an int32 number, print it in English."""
def _num2text(num):
d = { 0 : 'sıfır', 1 : 'bir', 2 : 'iki', 3 : 'üç', 4 : 'dört', 5 : 'beş',
6 : 'altı', 7 : 'yedi', 8 : 'sekiz', 9 : 'dokuz', 10 : 'on',
11 : 'on bir', 12 : 'on iki', 13 : 'on üç', 14 : 'on dört',
15 : 'on beş', 16 : 'on altı', 17 : 'on yedi', 18 : 'on sekiz',
19 : 'on dokuz', 20 : 'yirmi',
30 : 'otuz', 40 : 'kırk', 50 : 'elli', 60 : 'altmış',
70 : 'yetmiş', 80 : 'seksen', 90 : 'doksan' }
k = 1000
m = k * 1000
b = m * 1000
t = b * 1000
assert(0 <= num)
if (num < 20):
return d[num]
if (num < 100):
if num % 10 == 0: return d[num]
else: return d[num // 10 * 10] + ' ' + d[num % 10]
if (num < k):
if num % 100 == 0: return d[num // 100] + ' yüz'
else: return d[num // 100] + ' yüz ' + num2text(num % 100)
if (num < m):
if num % k == 0: return num2text(num // k) + ' bin'
else: return num2text(num // k) + ' bin ' + num2text(num % k)
if (num < b):
if (num % m) == 0: return num2text(num // m) + ' milyon'
else: return num2text(num // m) + ' milyon ' + num2text(num % m)
if (num < t):
if (num % b) == 0: return num2text(num // b) + ' milyar'
else: return num2text(num // b) + ' milyar ' + num2text(num % b)
if (num % t == 0): return num2text(num // t) + ' trilyon'
else: return num2text(num // t) + ' trilyon ' + num2text(num % t)
raise AssertionError('sayı çok büyük: %s' % str(num))
def num2text(num):
return _num2text(num).replace('bir bin', 'bin').replace('bir yüz', 'yüz')
| evrenesat/genesis | genesis/com/num2text.py | Python | gpl-3.0 | 1,718 |
# Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License. A copy of the License
# is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
import io
import os
import unittest
import unittest.mock
from tempfile import TemporaryDirectory
import sockeye.constants
import sockeye.inference
import sockeye.output_handler
import sockeye.translate
TEST_DATA = "Test file line 1\n" \
"Test file line 2\n"
def mock_open(*args, **kargs):
# work-around for [MagicMock objects not being iterable](http://bugs.python.org/issue21258)
# cf. http://stackoverflow.com/questions/24779893/customizing-unittest-mock-mock-open-for-iteration
f_open = unittest.mock.mock_open(*args, **kargs)
f_open.return_value.__iter__ = lambda self: iter(self.readline, '')
return f_open
def test_translate_by_file():
mock_output_handler = unittest.mock.Mock(spec=sockeye.output_handler.OutputHandler)
mock_translator = unittest.mock.Mock(spec=sockeye.inference.Translator)
mock_translator.translate.return_value = ['', '']
mock_translator.num_source_factors = 1
mock_translator.batch_size = 1
mock_translator.nbest_size = 1
sockeye.translate.read_and_translate(translator=mock_translator, output_handler=mock_output_handler,
chunk_size=2, input_file='/dev/null', input_factors=None)
with TemporaryDirectory() as temp:
input_filename = os.path.join(temp, 'input')
with open(input_filename, 'w') as f:
f.write(TEST_DATA)
sockeye.translate.read_and_translate(translator=mock_translator, output_handler=mock_output_handler,
chunk_size=2, input_file=input_filename, input_factors=None)
# Ensure translate gets called once. Input here will be a dummy mocked result, so we'll ignore it.
assert mock_translator.translate.call_count == 1
@unittest.mock.patch("sys.stdin", io.StringIO(TEST_DATA))
def test_translate_by_stdin_chunk2():
mock_output_handler = unittest.mock.Mock(spec=sockeye.output_handler.OutputHandler)
mock_translator = unittest.mock.Mock(spec=sockeye.inference.Translator)
mock_translator.translate.return_value = ['', '']
mock_translator.num_source_factors = 1
mock_translator.batch_size = 1
mock_translator.nbest_size = 1
sockeye.translate.read_and_translate(translator=mock_translator,
output_handler=mock_output_handler,
chunk_size=2)
# Ensure translate gets called once. Input here will be a dummy mocked result, so we'll ignore it.
assert mock_translator.translate.call_count == 1
| artemsok/sockeye | test/unit/test_translate.py | Python | apache-2.0 | 3,120 |
from typing import List, Dict, Set
import pandas as pd
from ray.data import Dataset
from ray.ml.preprocessor import Preprocessor
class OrdinalEncoder(Preprocessor):
"""Encode values within columns as ordered integer values.
Currently, order within a column is based on the values from the fitted
dataset in sorted order.
Transforming values not included in the fitted dataset will be encoded as ``None``.
Args:
columns: The columns that will individually be encoded.
"""
def __init__(self, columns: List[str]):
# TODO: allow user to specify order of values within each column.
super().__init__()
self.columns = columns
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(dataset, *self.columns)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
def column_ordinal_encoder(s: pd.Series):
s_values = self.stats_[f"unique_values({s.name})"]
return s.map(s_values)
df.loc[:, self.columns] = df.loc[:, self.columns].transform(
column_ordinal_encoder
)
return df
def __repr__(self):
return f"<Encoder columns={self.columns} stats={self.stats_}>"
class OneHotEncoder(Preprocessor):
"""Encode columns as new columns using one-hot encoding.
The transformed dataset will have a new column in the form ``{column}_{value}``
for each of the values from the fitted dataset. The value of a column will
be set to 1 if the value matches, otherwise 0.
Transforming values not included in the fitted dataset will result in all
of the encoded column values being 0.
Args:
columns: The columns that will individually be encoded.
"""
def __init__(self, columns: List[str]):
# TODO: add `drop` parameter.
super().__init__()
self.columns = columns
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(dataset, *self.columns)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, *self.columns)
# Compute new one-hot encoded columns
for column in self.columns:
column_values = self.stats_[f"unique_values({column})"]
for column_value in column_values:
df[f"{column}_{column_value}"] = (df[column] == column_value).astype(
int
)
# Drop original unencoded columns.
df = df.drop(columns=self.columns)
return df
def __repr__(self):
return f"<Encoder columns={self.columns} stats={self.stats_}>"
class LabelEncoder(Preprocessor):
"""Encode values within a label column as ordered integer values.
Currently, order within a column is based on the values from the fitted
dataset in sorted order.
Transforming values not included in the fitted dataset will be encoded as ``None``.
Args:
label_column The label column that will be encoded.
"""
def __init__(self, label_column: str):
super().__init__()
self.label_column = label_column
def _fit(self, dataset: Dataset) -> Preprocessor:
self.stats_ = _get_unique_value_indices(dataset, self.label_column)
return self
def _transform_pandas(self, df: pd.DataFrame):
_validate_df(df, self.label_column)
def column_label_encoder(s: pd.Series):
s_values = self.stats_[f"unique_values({s.name})"]
return s.map(s_values)
df[self.label_column] = df[self.label_column].transform(column_label_encoder)
return df
def __repr__(self):
return f"<Encoder label column={self.label_column} stats={self.stats_}>"
def _get_unique_value_indices(
dataset: Dataset, *columns: str
) -> Dict[str, Dict[str, int]]:
results = {}
for column in columns:
values = _get_unique_values(dataset, column)
if any(pd.isnull(v) for v in values):
raise ValueError(
f"Unable to fit column '{column}' because it contains null values. "
f"Consider imputing missing values first."
)
value_to_index = _sorted_value_indices(values)
results[f"unique_values({column})"] = value_to_index
return results
def _get_unique_values(dataset: Dataset, column: str) -> Set[str]:
agg_ds = dataset.groupby(column).count()
# TODO: Support an upper limit by using `agg_ds.take(N)` instead.
return {row[column] for row in agg_ds.iter_rows()}
def _sorted_value_indices(values: Set) -> Dict[str, int]:
"""Converts values to a Dict mapping to unique indexes.
Values will be sorted.
Example:
>>> _sorted_value_indices({"b", "a", "c", "a"})
{"a": 0, "b": 1, "c": 2}
"""
return {value: i for i, value in enumerate(sorted(values))}
def _validate_df(df: pd.DataFrame, *columns: str) -> None:
null_columns = [column for column in columns if df[column].isnull().values.any()]
if null_columns:
raise ValueError(
f"Unable to transform columns {null_columns} because they contain "
f"null values. Consider imputing missing values first."
)
| ray-project/ray | python/ray/ml/preprocessors/encoder.py | Python | apache-2.0 | 5,304 |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2016 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Widgets needed for the statusbar."""
| Konubinix/qutebrowser | qutebrowser/mainwindow/statusbar/__init__.py | Python | gpl-3.0 | 839 |
# -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
class HiSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class HiDownloaderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
| tansuo1989/mydemo | python/scrapy/hi/hi/middlewares.py | Python | mit | 3,589 |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models
from odoo import tools, _
from odoo.exceptions import ValidationError
import requests
import json
import logging
_logger = logging.getLogger(__name__)
from datetime import datetime
from dateutil.relativedelta import relativedelta, MO
class pi_transactions(models.Model):
_name = "pi.transactions"
_description = "Pi Transactions"
name = fields.Char('Name')
app_id = fields.Many2one('admin.apps', required=True, ondelete='restrict')
app = fields.Char(related="app_id.app")
action = fields.Selection([('approve', 'Approve'), ('complete', 'Complete'), ('cancelled', 'Cancelled')], 'Action', required=True)
payment_id = fields.Char('PaymentId', required=True)
txid = fields.Text('TXID')
txid_url = fields.Text('TXID URL', compute="_compute_txid_url")
pi_user_id = fields.Char('Pi User ID')
pi_user = fields.Many2one('pi.users', ondelete='restrict')
amount = fields.Float('Amount', digits=(50,8))
memo = fields.Char('Memo')
to_address = fields.Char('To address')
developer_approved = fields.Boolean('developer_approved')
transaction_verified = fields.Boolean('transaction_verified')
developer_completed = fields.Boolean('developer_completed')
cancelled = fields.Boolean('cancelled')
user_cancelled = fields.Boolean('user_cancelled')
json_result = fields.Text('JSON Result', required=True)
def _compute_txid_url(self):
for pit in self:
if pit.txid:
pit.txid_url = "https://minepi.com/blockexplorer/tx/" + pit.txid
else:
pit.txid_url = ""
def check_transactions(self):
for pit in self:
url = 'https://api.minepi.com/v2/payments/' + pit.payment_id
re = requests.get(url,headers={'Authorization': "Key " + pit.app_id.admin_key})
try:
result = re.json()
result_dict = json.loads(str(json.dumps(result)))
if (result_dict['status']['cancelled'] or result_dict['status']['user_cancelled']) and pit.action!="cancelled":
pit.write({'action': 'cancelled'})
elif result_dict['status']['developer_approved'] and not (result_dict['status']['cancelled'] or result_dict['status']['user_cancelled']) and pit.action!="approve":
pit.write({'action': 'approve'})
if result_dict["status"]["transaction_verified"] and result_dict['status']['developer_completed'] and pit.action!="complete":
pit.write({'name': "complete. PaymentId: " + pit.payment_id,
'action': 'complete',
'payment_id': pit.payment_id,
'txid': result_dict["transaction"]["txid"],
'pi_user_id': result_dict["user_uid"],
'amount': result_dict["amount"],
'memo': result_dict["memo"],
'to_address': result_dict["to_address"]})
pit.write({'developer_approved': result_dict["status"]["developer_approved"],
'transaction_verified': result_dict["status"]["transaction_verified"],
'developer_completed': result_dict["status"]["developer_completed"],
'cancelled': result_dict["status"]["cancelled"],
'user_cancelled': result_dict["status"]["user_cancelled"],
'json_result': str(result_dict)})
if pit.action == "cancelled" and (result_dict['status']['cancelled'] or result_dict['status']['user_cancelled']) and \
(datetime.now() - pit.create_date).days >= 1:
pit.unlink()
elif pit.action == "approve" and result_dict["status"]["developer_approved"] and \
result_dict["status"]["transaction_verified"] and not result_dict["status"]["developer_completed"] and \
not (result_dict['status']['cancelled'] or result_dict['status']['user_cancelled']):
self.env["admin.apps"].pi_api({'action': "complete", 'txid': result_dict["transaction"]["txid"],
'app_client': pit.app, 'paymentId': pit.payment_id})
elif pit.action == "approve" and result_dict["status"]["developer_approved"] and \
not result_dict["status"]["transaction_verified"] and not result_dict["status"]["developer_completed"] and \
not (result_dict['status']['cancelled'] or result_dict['status']['user_cancelled']) and \
(datetime.now() - pit.create_date).days >= 1:
pit.unlink()
except Exception:
_logger.info(str(re))
class admin_apps(models.Model):
_name = "admin.apps"
_description = "Admin Pi App"
_sql_constraints = [
# Partial constraint, complemented by a python constraint (see below).
('admin_apps_unique_key', 'unique (app)', 'You can not have two app with the same App code!'),
]
name = fields.Char('Name')
app = fields.Char('App code', required=True)
admin_key = fields.Char('Admin Key', required=True)
sandbox = fields.Boolean('Sandbox', required=True)
pi_transactions_ids = fields.One2many('pi.transactions', 'app_id')
pi_users_winners_ids = fields.Many2many('pi.users', 'admin_apps_pi_users_winners_rel', string='Winners')
pi_users_winners_paid_ids = fields.Many2many('pi.users', 'admin_apps_pi_users_winners_paid_rel', string='Winners Paid', domain="[('id', 'in', pi_users_winners_ids)]")
pi_users_winners_html = fields.Html('Winners HTML')
block_points = fields.Boolean('Block points', default=False)
def pi_api(self, kw):
if kw['action'] == "approve":
url = 'https://api.minepi.com/v2/payments/' + kw['paymentId'] + '/approve'
obj = {}
elif kw['action'] == "complete":
url = 'https://api.minepi.com/v2/payments/' + kw['paymentId'] + '/complete'
obj = {'txid': kw['txid']}
admin_app_list = self.env["admin.apps"].sudo().search([('app', '=', kw['app_client'])])
if len(admin_app_list) == 0:
result = {"result": False, "error": "SERVER MESSAGE: There is not API Key Stored in DB"}
return json.dumps(result)
re = requests.post(url,data=obj,json=obj,headers={'Authorization': "Key " + admin_app_list[0].admin_key})
try:
result = re.json()
result_dict = json.loads(str(json.dumps(result)))
if kw['action'] == "approve":
pi_user = self.env['pi.users'].sudo().search([('pi_user_code', '=', kw['pi_user_code'])])
self.env["pi.transactions"].sudo().create({'name': kw['action'] + ". PaymentId: " + kw['paymentId'],
'app_id': admin_app_list[0].id,
'action': kw['action'],
'payment_id': kw['paymentId'],
'json_result': str(result_dict),
'pi_user_id': result_dict["user_uid"],
'pi_user': pi_user[0].id,
'amount': result_dict["amount"],
'memo': result_dict["memo"],
'to_address': result_dict["to_address"],
'developer_approved': result_dict["status"]["developer_approved"],
'transaction_verified': result_dict["status"]["transaction_verified"],
'developer_completed': result_dict["status"]["developer_completed"],
'cancelled': result_dict["status"]["cancelled"],
'user_cancelled': result_dict["status"]["user_cancelled"]})
self.env["pi.transactions"].sudo().search([('action', '=', 'approve'),
('pi_user_id', '=', result_dict["user_uid"])]).check_transactions()
result = {"result": True, "approved": True}
elif kw['action'] == "complete":
self.env["pi.transactions"].sudo().search([('payment_id', '=', kw['paymentId'])]).write(
{'name': kw['action'] + ". PaymentId: " + kw['paymentId'],
'app_id': admin_app_list[0].id,
'action': kw['action'],
'payment_id': kw['paymentId'],
'txid': kw['txid'],
'json_result': str(result_dict),
'pi_user_id': result_dict["user_uid"],
'amount': result_dict["amount"],
'memo': result_dict["memo"],
'to_address': result_dict["to_address"],
'developer_approved': result_dict["status"]["developer_approved"],
'transaction_verified': result_dict["status"]["transaction_verified"],
'developer_completed': result_dict["status"]["developer_completed"],
'cancelled': result_dict["status"]["cancelled"],
'user_cancelled': result_dict["status"]["user_cancelled"]})
transaction = self.env["pi.transactions"].sudo().search([('payment_id', '=', kw['paymentId'])])
result = {"result": True, "completed": False}
if len(transaction) > 0 and kw['app_client'] in ['auth_pidoku', 'auth_snake', 'auth_platform', 'auth_example']:
if result_dict["status"]["transaction_verified"] and result_dict["status"]["developer_approved"] and result_dict["status"]["developer_completed"]:
users = transaction[0].pi_user
if len(users) > 0:
if (users[0].paid + float(result_dict["amount"])) >= 1:
users[0].sudo().write({'unblocked': True})
users[0].sudo().write({'paid': users[0].paid + float(result_dict["amount"])})
result = {"result": True, "completed": True}
else:
result = {"result": True, "completed": False, "approved": False}
except Exception:
result = {"result": False, "error": "SERVER MESSAGE: " + str(re)}
return json.dumps(result)
class pi_users(models.Model):
_name = "pi.users"
_description = "Pi Users"
_sql_constraints = [
# Partial constraint, complemented by a python constraint (see below).
('pi_user_unique_key', 'unique (pi_user_code)', 'You can not have two users with the same User code!'),
]
name = fields.Char('Name')
pi_user_id = fields.Char('Pi User ID', required=True)
pi_user_code = fields.Char('Pi User Code', required=True)
passkey = fields.Char('Pass Key')
points = fields.Float('Pi User Points', compute="_total_points", store=True)
points_chess = fields.Float('Chess Points', required=True, default=0)
points_sudoku = fields.Float('Sudoku Points', required=True, default=0)
points_snake = fields.Float('Snake Points', required=True, default=0)
points_datetime = fields.Datetime('Points Datetime', compute="_compute_points_datetime", store=True)
paid = fields.Float('Paid by user')
paid_in_transactions = fields.Float('Paid by user in transactions', compute="_total_paid_transactions", store=True)
pi_transactions_ids = fields.One2many('pi.transactions', 'pi_user')
unblocked = fields.Boolean('Unblocked', compute="_total_paid_transactions", store=True)
user_agent = fields.Char('User agent')
last_connection = fields.Datetime(string='Last connection', default="")
days_available = fields.Integer('Days available', store=True, default=0)
admin_apps_winners_ids = fields.Many2many('admin.apps', 'admin_apps_pi_users_winners_rel', string='Winners Apps')
admin_apps_winners_paid_ids = fields.Many2many('admin.apps', 'admin_apps_pi_users_winners_paid_rel', string='Winners Paid Apps', domain="[('id', 'in', admin_apps_winners_ids)]")
donator = fields.Boolean('Donator', compute="_compute_donator", store=True)
@api.depends("pi_transactions_ids", "pi_transactions_ids.action", "pi_transactions_ids.app_id", "pi_transactions_ids.app_id.app")
def _compute_donator(self):
for i in self:
i.donator = False
transaction = self.env['pi.transactions'].search([('id', 'in', i.pi_transactions_ids.ids), ('app_id.app', '=', 'auth_example'), ('action', '=', 'complete')], limit=1)
if len(transaction) == 0:
i.donator = False
else:
i.donator = True
@api.depends("points_chess", "points_sudoku", "points_snake", "paid", "unblocked", "pi_user_id")
def _total_points(self):
for i in self:
i.points = i.points_chess + i.points_sudoku + i.points_snake
@api.depends("points_chess", "points_sudoku", "points_snake")
def _compute_points_datetime(self):
for i in self:
i.points_datetime = datetime.now()
@api.depends("pi_transactions_ids", "pi_transactions_ids.action")
def _total_paid_transactions(self):
for i in self:
total = 0
for j in i.pi_transactions_ids:
if j.action == "complete":
total += j.amount
i.paid_in_transactions = total
if i.paid_in_transactions > 0:
i.unblocked = True
transaction = self.env['pi.transactions'].search([('id', 'in', i.pi_transactions_ids.ids), ('action', '=', 'complete')], order="create_date desc", limit=1)
if len(transaction) == 0:
i.unblocked = False
i.days_available = 0
else:
i.days_available = 30 - (datetime.now() - transaction[0].create_date).days
if i.days_available < 0:
i.days_available = 0
if i.days_available == 0:
i.unblocked = False
def check_users(self):
for piu in self:
transaction = self.env['pi.transactions'].search([('id', 'in', piu.pi_transactions_ids.ids), ('action', '=', 'complete')], order="create_date desc", limit=1)
if len(transaction) == 0:
piu.write({'unblocked': False, 'days_available': 0})
else:
days_available = 30 - (datetime.now() - transaction[0].create_date).days
if days_available < 0:
days_available = 0
piu.write({'days_available': days_available})
if days_available == 0:
piu.write({'unblocked': False})
| rockcesar/odoo_addons | website_pinetwork_odoo/models/admin_apps.py | Python | agpl-3.0 | 16,696 |
#!/usr/bin/env python
import logging
import time
import pymongo
from pyfocas.Collector import Collector
from pyfocas.Machine import Machine
from FanucImplementation.DriverImplementations import Fanuc30iDriver
from pyfocas import Exceptions
THREE_SIXTEEN = "10.108.7.41"
THREE_TWENTY = "10.108.7.42"
THREE_TWENTY_TWO = "10.108.7.44"
THREE_TWENTY_SIX = "10.108.7.45"
FOUR_SIXTEEN = "10.108.7.46"
FOUR_EIGHTEEN = "10.108.7.47"
FOUR_TWENTY_ONE = "10.108.7.39"
JAIME = "10.108.15.52"
THREE_EIGHTY_SEVEN = "10.108.7.12"
THREE_NINETY_SEVEN = "10.108.7.13"
THREE_NINETY_THREE = "10.108.7.14"
THREE_NINETY_SIX = "10.108.7.15"
def logging_reporter(machine):
"""
The logging_reporter is a reporter function to be passed
into a Collector object.
logging_reporter is intended for debugging purposes,
all machine datums will be logged to the default logger.
Parameters: Machine machine
The reporter expects to be passed a
Machine object that it will report on.
Return value: dict data
The reporter will return a dictionary
with key:value pairs representing the
data handled by the reporter.
"""
try:
data = machine.createDatum()
logging.info(data)
return data
except Exceptions.FocasConnectionException:
machine.reconnect()
def mongo_reporter(collection, machine):
"""
The logging_reporter is a reporter function to be passed
into a Collector object.
logging_reporter is intended for debugging purposes,
all machine datums will be logged to the default logger.
Parameters: Machine machine
The reporter expects to be passed a
Machine object that it will report on.
Return value: dict data
The reporter will return a dictionary
with key:value pairs representing the
data handled by the reporter.
"""
try:
data = machine.createDatum()
collection.insert_one(data)
return data
except Exceptions.FocasConnectionException:
machine.reconnect()
def main():
"""
The main method of the program. Runs a Collector forever.
"""
""" Setup logging """
logging.basicConfig(level=logging.DEBUG)
logging.info("Starting Collector")
""" Setup MongoDB logging client """
client = pymongo.MongoClient("mongodb://srvhoursapp25.nov.com:27017/")
db = client['test']
collection = db['focas']
def reporter(machine):
return mongo_reporter(collection, machine)
""" Instantiate Fanuc30iDriver """
driver30i = Fanuc30iDriver("./lib/Fwlib32.dll",
extradlls=["./lib/fwlibe1.dll"])
""" List of Machine objects to initialize the Collector with """
machines = [Machine(driver=driver30i, ip=THREE_SIXTEEN, name="316"),
Machine(driver=driver30i, ip=THREE_TWENTY, name="320"),
#Machine(driver=driver30i, ip=THREE_TWENTY_TWO, name="322"),
#Machine(driver=driver30i, ip=THREE_TWENTY_SIX, name="326"),
#Machine(driver=driver30i, ip=FOUR_SIXTEEN, name="416"),
#Machine(driver=driver30i, ip=FOUR_EIGHTEEN, name="418"),
Machine(driver=driver30i, ip=FOUR_TWENTY_ONE, name="421"),
#Machine(driver=driver30i, ip=THREE_EIGHTY_SEVEN, name="387"),
Machine(driver=driver30i, ip=THREE_NINETY_SEVEN, name="397"),
Machine(driver=driver30i, ip=THREE_NINETY_SIX, name="396"),
Machine(driver=driver30i, ip=THREE_NINETY_THREE, name="393"), ]
""" Create the Collector """
collector = Collector(reporter=reporter, machines=machines)
while True:
""" Run the collector until the process is interrupted """
collector.collect()
time.sleep(.5)
if __name__ == "__main__":
main()
| monkpit/pyfocas | aggregator.py | Python | mit | 3,926 |
"""
Implements NotebookArchive used to automatically capture notebook data
and export it to disk via the display hooks.
"""
import time, sys, os, traceback
from IPython import version_info
from IPython.display import Javascript, display
from .preprocessors import Substitute
# Import appropriate nbconvert machinery
if version_info[0] >= 4:
# Jupyter/IPython >=4.0
from nbformat import reader
from nbconvert import HTMLExporter
from nbconvert.preprocessors.clearoutput import ClearOutputPreprocessor
from nbconvert import NotebookExporter
else:
# IPython <= 3.0
from IPython.nbformat import reader
from IPython.nbconvert import HTMLExporter
if version_info[0] == 3:
# IPython 3
from IPython.nbconvert.preprocessors.clearoutput import ClearOutputPreprocessor
from IPython.nbconvert import NotebookExporter
else:
# IPython 2
from IPython.nbformat import current
NotebookExporter, ClearOutputPreprocessor = None, None
def v3_strip_output(nb):
"""strip the outputs from a notebook object"""
nb["nbformat"] = 3
nb["nbformat_minor"] = 0
nb.metadata.pop('signature', None)
for cell in nb.worksheets[0].cells:
if 'outputs' in cell:
cell['outputs'] = []
if 'prompt_number' in cell:
cell['prompt_number'] = None
return nb
import param
from ..core.io import FileArchive, Pickler
from ..plotting.renderer import HTML_TAGS, MIME_TYPES
class NotebookArchive(FileArchive):
"""
FileArchive that can automatically capture notebook data via the
display hooks and automatically adds a notebook HTML snapshot to
the archive upon export.
"""
exporters = param.List(default=[Pickler])
skip_notebook_export = param.Boolean(default=False, doc="""
Whether to skip JavaScript capture of notebook data which may
be unreliable. Also disabled automatic capture of notebook
name.""")
snapshot_name = param.String('index', doc="""
The basename of the exported notebook snapshot (html). It may
optionally use the {timestamp} formatter.""")
filename_formatter = param.String(default='{dimensions},{obj}', doc="""
Similar to FileArchive.filename_formatter except with support
for the notebook name field as {notebook}.""")
export_name = param.String(default='{notebook}', doc="""
Similar to FileArchive.filename_formatter except with support
for the notebook name field as {notebook}.""")
auto = param.Boolean(False)
# Used for debugging to view Exceptions raised from Javascript
traceback = None
ffields = FileArchive.ffields.union({'notebook'})
efields = FileArchive.efields.union({'notebook'})
def __init__(self, **params):
super(NotebookArchive, self).__init__(**params)
self.nbversion = None
self.notebook_name = None
self.export_success = None
self._auto = False
self._replacements = {}
self._notebook_data = None
self._timestamp = None
self._tags = {MIME_TYPES[k]:v for k,v in HTML_TAGS.items() if k in MIME_TYPES}
keywords = ['%s=%s' % (k, v.__class__.__name__) for k,v in self.params().items()]
self.auto.__func__.__doc__ = 'auto(enabled=Boolean, %s)' % ', '.join(keywords)
def get_namespace(self):
"""
Find the name the user is using to access holoviews.
"""
if 'holoviews' not in sys.modules:
raise ImportError('HoloViews does not seem to be imported')
matches = [k for k,v in get_ipython().user_ns.items() # noqa (get_ipython)
if not k.startswith('_') and v is sys.modules['holoviews']]
if len(matches) == 0:
raise Exception("Could not find holoviews module in namespace")
return '%s.archive' % matches[0]
def last_export_status(self):
"Helper to show the status of the last call to the export method."
if self.export_success is True:
print("The last call to holoviews.archive.export was successful.")
return
elif self.export_success is None:
print("Status of the last call to holoviews.archive.export is unknown."
"\n(Re-execute this method once kernel status is idle.)")
return
print("The last call to holoviews.archive.export was unsuccessful.")
if self.traceback is None:
print("\n<No traceback captured>")
else:
print("\n"+self.traceback)
def auto(self, enabled=True, **kwargs):
"""
Method to enable or disable automatic capture, allowing you to
simultaneously set the instance parameters.
"""
self.namespace = self.get_namespace()
self.notebook_name = "{notebook}"
self._timestamp = tuple(time.localtime())
kernel = r'var kernel = IPython.notebook.kernel; '
nbname = r"var nbname = IPython.notebook.get_notebook_name(); "
nbcmd = (r"var name_cmd = '%s.notebook_name = \"' + nbname + '\"'; " % self.namespace)
cmd = (kernel + nbname + nbcmd + "kernel.execute(name_cmd); ")
display(Javascript(cmd))
time.sleep(0.5)
self._auto=enabled
self.param.set_param(**kwargs)
tstamp = time.strftime(" [%Y-%m-%d %H:%M:%S]", self._timestamp)
print("Automatic capture is now %s.%s"
% ('enabled' if enabled else 'disabled',
tstamp if enabled else ''))
def export(self, timestamp=None):
"""
Get the current notebook data and export.
"""
if self._timestamp is None:
raise Exception("No timestamp set. Has the archive been initialized?")
if self.skip_notebook_export:
super(NotebookArchive, self).export(timestamp=self._timestamp,
info={'notebook':self.notebook_name})
return
self.export_success = None
name = self.get_namespace()
# Unfortunate javascript hacks to get at notebook data
capture_cmd = ((r"var capture = '%s._notebook_data=r\"\"\"'" % name)
+ r"+json_string+'\"\"\"'; ")
cmd = (r'var kernel = IPython.notebook.kernel; '
+ r'var json_data = IPython.notebook.toJSON(); '
+ r'var json_string = JSON.stringify(json_data); '
+ capture_cmd
+ "var pycmd = capture + ';%s._export_with_html()'; " % name
+ r"kernel.execute(pycmd)")
tstamp = time.strftime(self.timestamp_format, self._timestamp)
export_name = self._format(self.export_name, {'timestamp':tstamp, 'notebook':self.notebook_name})
print(('Export name: %r\nDirectory %r' % (export_name,
os.path.join(os.path.abspath(self.root))))
+ '\n\nIf no output appears, please check holoviews.archive.last_export_status()')
display(Javascript(cmd))
def add(self, obj=None, filename=None, data=None, info={}, html=None):
"Similar to FileArchive.add but accepts html strings for substitution"
initial_last_key = list(self._files.keys())[-1] if len(self) else None
if self._auto:
exporters = self.exporters[:]
# Can only associate html for one exporter at a time
for exporter in exporters:
self.exporters = [exporter]
super(NotebookArchive, self).add(obj, filename, data,
info=dict(info,
notebook=self.notebook_name))
# Only add substitution if file successfully added to archive.
new_last_key = list(self._files.keys())[-1] if len(self) else None
if new_last_key != initial_last_key:
self._replacements[new_last_key] = html
# Restore the full list of exporters
self.exporters = exporters
# The following methods are executed via JavaScript and so fail
# to appear in the coverage report even though they are tested.
def _generate_html(self, node, substitutions): # pragma: no cover
exporter = HTMLExporter()
exporter.register_preprocessor(Substitute(self.nbversion,
substitutions))
html,_ = exporter.from_notebook_node(node)
return html
def _clear_notebook(self, node): # pragma: no cover
if NotebookExporter is not None:
exporter = NotebookExporter()
exporter.register_preprocessor(ClearOutputPreprocessor(enabled=True))
cleared,_ = exporter.from_notebook_node(node)
else:
stripped_node = v3_strip_output(node)
cleared = current.writes(stripped_node, 'ipynb')
return cleared
def _export_with_html(self): # pragma: no cover
"Computes substitutions before using nbconvert with preprocessors"
self.export_success = False
try:
tstamp = time.strftime(self.timestamp_format, self._timestamp)
substitutions = {}
for (basename, ext), entry in self._files.items():
(_, info) = entry
html_key = self._replacements.get((basename, ext), None)
if html_key is None: continue
filename = self._format(basename, {'timestamp':tstamp,
'notebook':self.notebook_name})
fpath = filename+(('.%s' % ext) if ext else '')
info = {'src':fpath, 'mime_type':info['mime_type']}
# No mime type
if 'mime_type' not in info: pass
# Not displayable in an HTML tag
elif info['mime_type'] not in self._tags: pass
else:
basename, ext = os.path.splitext(fpath)
truncated = self._truncate_name(basename, ext[1:])
link_html = self._format(self._tags[info['mime_type']],
{'src':truncated,
'mime_type':info['mime_type'],
'css':''})
substitutions[html_key] = (link_html, truncated)
node = self._get_notebook_node()
html = self._generate_html(node, substitutions)
export_filename = self.snapshot_name
# Add the html snapshot
super(NotebookArchive, self).add(filename=export_filename,
data=html, info={'file-ext':'html',
'mime_type':'text/html',
'notebook':self.notebook_name})
# Add cleared notebook
cleared = self._clear_notebook(node)
super(NotebookArchive, self).add(filename=export_filename,
data=cleared, info={'file-ext':'ipynb',
'mime_type':'text/json',
'notebook':self.notebook_name})
# If store cleared_notebook... save here
super(NotebookArchive, self).export(timestamp=self._timestamp,
info={'notebook':self.notebook_name})
except:
self.traceback = traceback.format_exc()
else:
self.export_success = True
def _get_notebook_node(self): # pragma: no cover
"Load captured notebook node"
size = len(self._notebook_data)
if size == 0:
raise Exception("Captured buffer size for notebook node is zero.")
node = reader.reads(self._notebook_data)
self.nbversion = reader.get_version(node)
return node
notebook_archive = NotebookArchive()
| basnijholt/holoviews | holoviews/ipython/archive.py | Python | bsd-3-clause | 12,255 |
import logging
logger = logging.getLogger(__name__)
class SyncSectors:
def __init__(self, sector_model, sectors, disable_on=None, simulate=False):
self.sector_model = sector_model
self.sectors = sectors
self.disable_on = disable_on
self.simulate = simulate
def log(self, msg, level=logging.DEBUG):
logger.log(level, msg)
def __call__(self, *args, **kwargs):
self.process()
def process(self):
self.add_new_sectors()
self.update_existing_sectors()
if self.disable_on:
self.disable_sectors()
def _get_sector(self, sector_id):
try:
return self.sector_model.objects.get(id=sector_id)
except self.sector_model.DoesNotExist:
return
def _update_sector_name(self, sector, sector_name):
if sector.name != sector_name:
self.log(f'Updating Sector {sector.id}: [{sector.name} to {sector_name}]')
if self.simulate:
return
sector.name = sector_name
sector.save()
def _create_sector(self, sector_id, sector_name):
self.log(f'Creating Sector {sector_id}: [{sector_name}]')
if self.simulate:
return
self.sector_model.objects.create(id=sector_id, name=sector_name)
def _disable_sector(self, sector):
self.log(f'Disabling Sector {sector.id}: [{sector.name}]')
if self.simulate:
return
sector.disabled_on = self.disable_on
sector.save()
def add_new_sectors(self):
for sector_id, sector_name in self.sectors:
sector = self._get_sector(sector_id)
if not sector:
self._create_sector(sector_id, sector_name)
def update_existing_sectors(self):
for sector_id, sector_name in self.sectors:
sector = self._get_sector(sector_id)
if not sector:
self.log(f'Sector {sector_id}: DOES NOT EXIST [{sector_name}]')
else:
self._update_sector_name(sector, sector_name)
def disable_sectors(self):
sector_ids = list(dict(self.sectors).keys())
deprecated_sectors = self.sector_model.objects.exclude(id__in=sector_ids).filter(
disabled_on__isnull=True
)
for sector in deprecated_sectors:
self._disable_sector(sector)
| UKTradeInvestment/export-wins-data | mi/sync_sectors.py | Python | gpl-3.0 | 2,373 |
"""
`Cargo SQL Fields`
``Field-type classes for the Cargo SQL ORM``
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
Creates a model with three fields
..
from cargo import *
# Model object
class UserModel(Model):
# Field objects
uid = Int(primary=True)
username = Username(not_null=True)
password = Password(not_null=True)
..
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
Manipulate fields in the model
..
user = UserModel(username="jared", password="coolpasswordbrah")
print(user['password'])
..
|$pbkdf2-sha512$19083$VsoZY6y1NmYsZWxtDQEAoBQCoJRSaq01BiAEQMg5JwQg5Pxf...|
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
Set values via |__setitem__|
..
user['uid'] = 1234
print(user['uid'])
..
|1234|
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
Creates expressions for querying
..
# Saves the model to the DB
user.save()
# Queries the DB
user.where(
(user.username == 'jared') |
(user.username.like('jare%'))
)
user.select(user.uid, user.username)
..
|{'uid': 1234, 'username': 'jared'}|
--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--·--
The MIT License (MIT) © 2015 Jared Lunde
http://github.com/jaredlunde/cargo-orm
"""
from cargo.fields.field import Field
from cargo.fields.binary import *
from cargo.fields.bit import *
from cargo.fields.boolean import *
from cargo.fields.character import *
from cargo.fields.datetimes import *
from cargo.fields.encrypted import *
from cargo.fields.extras import *
from cargo.fields.geometry import *
from cargo.fields.identifier import *
from cargo.fields.integer import *
from cargo.fields.keyvalue import *
from cargo.fields.networking import *
from cargo.fields.numeric import *
from cargo.fields.ranges import *
from cargo.fields.sequence import *
| jaredlunde/cargo-orm | cargo/fields/__init__.py | Python | mit | 2,138 |
#@result Submitted a few seconds ago • Score: 10.00 Status: Accepted Test Case #0: 0s Test Case #1: 0.01s Test Case #2: 0s Test Case #3: 0s Test Case #4: 0.01s Test Case #5: 0s
for i in range(int(raw_input())): #More than 4 lines will result in 0 score. Blank lines won't be counted.
a = int(raw_input()); A = set(raw_input().split())
b = int(raw_input()); B = set(raw_input().split())
print B == B.union(A)
| FeiZhan/Algo-Collection | answers/hackerrank/Check Subset.py | Python | mit | 423 |
# Create your views here.
from django.shortcuts import render_to_response
from django.conf import settings
from django.template import RequestContext
from django import http
from django.utils import translation
from models import Page
def page(request, page_to_render):
try:
p = Page.objects.get(slug=page_to_render, public=True)
except Page.DoesNotExist:
raise http.Http404('%s page requested but not found' % page_to_render)
return render_to_response(
"page.html",
{"request": request, "page": p},
context_instance=RequestContext(request)
)
def langpage(request, lang, page_to_trans):
translation.activate(lang)
return page(request, page_to_trans)
def set_language(request):
next = request.REQUEST.get('next', None)
if not next:
next = request.META.get('HTTP_REFERER', None)
if not next:
next = '/'
response = http.HttpResponseRedirect(next)
if request.method == 'GET':
lang_code = request.GET.get('language', None)
if lang_code and translation.check_for_language(lang_code):
if hasattr(request, 'session'):
request.session['django_language'] = lang_code
else:
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, lang_code)
return response
| seantis/seantis-questionnaire | questionnaire/page/views.py | Python | bsd-3-clause | 1,332 |
"""
Functions in the ``as*array`` family that promote array-likes into arrays.
`require` fits this category despite its name not matching this pattern.
"""
from __future__ import division, absolute_import, print_function
from .overrides import set_module
from .multiarray import array
__all__ = [
"asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require",
]
@set_module('numpy')
def asarray(a, dtype=None, order=None):
"""Convert the input to an array.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes lists, lists of tuples, tuples, tuples of tuples, tuples
of lists and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or
column-major (Fortran-style) memory representation.
Defaults to 'C'.
Returns
-------
out : ndarray
Array interpretation of `a`. No copy is performed if the input
is already an ndarray with matching dtype and order. If `a` is a
subclass of ndarray, a base class ndarray is returned.
See Also
--------
asanyarray : Similar function which passes through subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asarray(a)
array([1, 2])
Existing arrays are not copied:
>>> a = np.array([1, 2])
>>> np.asarray(a) is a
True
If `dtype` is set, array is copied only if dtype does not match:
>>> a = np.array([1, 2], dtype=np.float32)
>>> np.asarray(a, dtype=np.float32) is a
True
>>> np.asarray(a, dtype=np.float64) is a
False
Contrary to `asanyarray`, ndarray subclasses are not passed through:
>>> issubclass(np.recarray, np.ndarray)
True
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asarray(a) is a
False
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order)
@set_module('numpy')
def asanyarray(a, dtype=None, order=None):
"""Convert the input to an ndarray, but pass ndarray subclasses through.
Parameters
----------
a : array_like
Input data, in any form that can be converted to an array. This
includes scalars, lists, lists of tuples, tuples, tuples of tuples,
tuples of lists, and ndarrays.
dtype : data-type, optional
By default, the data-type is inferred from the input data.
order : {'C', 'F'}, optional
Whether to use row-major (C-style) or column-major
(Fortran-style) memory representation. Defaults to 'C'.
Returns
-------
out : ndarray or an ndarray subclass
Array interpretation of `a`. If `a` is an ndarray or a subclass
of ndarray, it is returned as-is and no copy is performed.
See Also
--------
asarray : Similar function which always returns ndarrays.
ascontiguousarray : Convert input to a contiguous array.
asfarray : Convert input to a floating point ndarray.
asfortranarray : Convert input to an ndarray with column-major
memory order.
asarray_chkfinite : Similar function which checks input for NaNs and
Infs.
fromiter : Create an array from an iterator.
fromfunction : Construct an array by executing a function on grid
positions.
Examples
--------
Convert a list into an array:
>>> a = [1, 2]
>>> np.asanyarray(a)
array([1, 2])
Instances of `ndarray` subclasses are passed through as-is:
>>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
>>> np.asanyarray(a) is a
True
"""
return array(a, dtype, copy=False, order=order, subok=True)
@set_module('numpy')
def ascontiguousarray(a, dtype=None):
"""
Return a contiguous array (ndim >= 1) in memory (C order).
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
Data-type of returned array.
Returns
-------
out : ndarray
Contiguous array of same shape and content as `a`, with type `dtype`
if specified.
See Also
--------
asfortranarray : Convert input to an ndarray with column-major
memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> np.ascontiguousarray(x, dtype=np.float32)
array([[0., 1., 2.],
[3., 4., 5.]], dtype=float32)
>>> x.flags['C_CONTIGUOUS']
True
Note: This function returns an array with at least one-dimension (1-d)
so it will not preserve 0-d arrays.
"""
return array(a, dtype, copy=False, order='C', ndmin=1)
@set_module('numpy')
def asfortranarray(a, dtype=None):
"""
Return an array (ndim >= 1) laid out in Fortran order in memory.
Parameters
----------
a : array_like
Input array.
dtype : str or dtype object, optional
By default, the data-type is inferred from the input data.
Returns
-------
out : ndarray
The input `a` in Fortran, or column-major, order.
See Also
--------
ascontiguousarray : Convert input to a contiguous (C order) array.
asanyarray : Convert input to an ndarray with either row or
column-major memory order.
require : Return an ndarray that satisfies requirements.
ndarray.flags : Information about the memory layout of the array.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> y = np.asfortranarray(x)
>>> x.flags['F_CONTIGUOUS']
False
>>> y.flags['F_CONTIGUOUS']
True
Note: This function returns an array with at least one-dimension (1-d)
so it will not preserve 0-d arrays.
"""
return array(a, dtype, copy=False, order='F', ndmin=1)
@set_module('numpy')
def require(a, dtype=None, requirements=None):
"""
Return an ndarray of the provided type that satisfies requirements.
This function is useful to be sure that an array with the correct flags
is returned for passing to compiled code (perhaps through ctypes).
Parameters
----------
a : array_like
The object to be converted to a type-and-requirement-satisfying array.
dtype : data-type
The required data-type. If None preserve the current dtype. If your
application requires the data to be in native byteorder, include
a byteorder specification as a part of the dtype specification.
requirements : str or list of str
The requirements list can be any of the following
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
* 'ALIGNED' ('A') - ensure a data-type aligned array
* 'WRITEABLE' ('W') - ensure a writable array
* 'OWNDATA' ('O') - ensure an array that owns its own data
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
Returns
-------
out : ndarray
Array with specified requirements and type if given.
See Also
--------
asarray : Convert input to an ndarray.
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
ascontiguousarray : Convert input to a contiguous array.
asfortranarray : Convert input to an ndarray with column-major
memory order.
ndarray.flags : Information about the memory layout of the array.
Notes
-----
The returned array will be guaranteed to have the listed requirements
by making a copy if needed.
Examples
--------
>>> x = np.arange(6).reshape(2,3)
>>> x.flags
C_CONTIGUOUS : True
F_CONTIGUOUS : False
OWNDATA : False
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
>>> y.flags
C_CONTIGUOUS : False
F_CONTIGUOUS : True
OWNDATA : True
WRITEABLE : True
ALIGNED : True
WRITEBACKIFCOPY : False
UPDATEIFCOPY : False
"""
possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
'A': 'A', 'ALIGNED': 'A',
'W': 'W', 'WRITEABLE': 'W',
'O': 'O', 'OWNDATA': 'O',
'E': 'E', 'ENSUREARRAY': 'E'}
if not requirements:
return asanyarray(a, dtype=dtype)
else:
requirements = {possible_flags[x.upper()] for x in requirements}
if 'E' in requirements:
requirements.remove('E')
subok = False
else:
subok = True
order = 'A'
if requirements >= {'C', 'F'}:
raise ValueError('Cannot specify both "C" and "F" order')
elif 'F' in requirements:
order = 'F'
requirements.remove('F')
elif 'C' in requirements:
order = 'C'
requirements.remove('C')
arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
for prop in requirements:
if not arr.flags[prop]:
arr = arr.copy(order)
break
return arr
| jorisvandenbossche/numpy | numpy/core/_asarray.py | Python | bsd-3-clause | 9,940 |
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_link_status
version_added: "2.4"
short_description: Get interface link status on HUAWEI CloudEngine switches.
description:
- Get interface link status on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@QijunPan)
notes:
- Current physical state shows an interface's physical status.
- Current link state shows an interface's link layer protocol status.
- Current IPv4 state shows an interface's IPv4 protocol status.
- Current IPv6 state shows an interface's IPv6 protocol status.
- Inbound octets(bytes) shows the number of bytes that an interface received.
- Inbound unicast(pkts) shows the number of unicast packets that an interface received.
- Inbound multicast(pkts) shows the number of multicast packets that an interface received.
- Inbound broadcast(pkts) shows the number of broadcast packets that an interface received.
- Inbound error(pkts) shows the number of error packets that an interface received.
- Inbound drop(pkts) shows the total number of packets that were sent to the interface but dropped by an interface.
- Inbound rate(byte/sec) shows the rate at which an interface receives bytes within an interval.
- Inbound rate(pkts/sec) shows the rate at which an interface receives packets within an interval.
- Outbound octets(bytes) shows the number of the bytes that an interface sent.
- Outbound unicast(pkts) shows the number of unicast packets that an interface sent.
- Outbound multicast(pkts) shows the number of multicast packets that an interface sent.
- Outbound broadcast(pkts) shows the number of broadcast packets that an interface sent.
- Outbound error(pkts) shows the total number of packets that an interface sent but dropped by the remote interface.
- Outbound drop(pkts) shows the number of dropped packets that an interface sent.
- Outbound rate(byte/sec) shows the rate at which an interface sends bytes within an interval.
- Outbound rate(pkts/sec) shows the rate at which an interface sends packets within an interval.
- Speed shows the rate for an Ethernet interface.
options:
interface:
description:
- For the interface parameter, you can enter C(all) to display information about all interfaces,
an interface type such as C(40GE) to display information about interfaces of the specified type,
or full name of an interface such as C(40GE1/0/22) or C(vlanif10)
to display information about the specific interface.
required: true
'''
EXAMPLES = '''
- name: Link status test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Get specified interface link status information
ce_link_status:
interface: 40GE1/0/1
provider: "{{ cli }}"
- name: Get specified interface type link status information
ce_link_status:
interface: 40GE
provider: "{{ cli }}"
- name: Get all interfaces link status information
ce_link_status:
interface: all
provider: "{{ cli }}"
'''
RETURN = '''
result:
description: Interface link status information
returned: always
type: dict
sample: {
"40ge2/0/8": {
"Current IPv4 state": "down",
"Current IPv6 state": "down",
"Current link state": "up",
"Current physical state": "up",
"Inbound broadcast(pkts)": "0",
"Inbound drop(pkts)": "0",
"Inbound error(pkts)": "0",
"Inbound multicast(pkts)": "20151",
"Inbound octets(bytes)": "7314813",
"Inbound rate(byte/sec)": "11",
"Inbound rate(pkts/sec)": "0",
"Inbound unicast(pkts)": "0",
"Outbound broadcast(pkts)": "1",
"Outbound drop(pkts)": "0",
"Outbound error(pkts)": "0",
"Outbound multicast(pkts)": "20152",
"Outbound octets(bytes)": "7235021",
"Outbound rate(byte/sec)": "11",
"Outbound rate(pkts/sec)": "0",
"Outbound unicast(pkts)": "0",
"Speed": "40GE"
}
}
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import ce_argument_spec, get_nc_config, get_nc_next
CE_NC_GET_PORT_SPEED = """
<filter type="subtree">
<devm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ports>
<port>
<position>%s</position>
<ethernetPort>
<speed></speed>
</ethernetPort>
</port>
</ports>
</devm>
</filter>
"""
CE_NC_GET_INT_STATISTICS = """
<filter type="subtree">
<ifm xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<interfaces>
<interface>
<ifName>%s</ifName>
<ifDynamicInfo>
<ifPhyStatus></ifPhyStatus>
<ifLinkStatus></ifLinkStatus>
<ifV4State></ifV4State>
<ifV6State></ifV6State>
</ifDynamicInfo>
<ifStatistics>
<receiveByte></receiveByte>
<sendByte></sendByte>
<rcvUniPacket></rcvUniPacket>
<rcvMutiPacket></rcvMutiPacket>
<rcvBroadPacket></rcvBroadPacket>
<sendUniPacket></sendUniPacket>
<sendMutiPacket></sendMutiPacket>
<sendBroadPacket></sendBroadPacket>
<rcvErrorPacket></rcvErrorPacket>
<rcvDropPacket></rcvDropPacket>
<sendErrorPacket></sendErrorPacket>
<sendDropPacket></sendDropPacket>
</ifStatistics>
<ifClearedStat>
<inByteRate></inByteRate>
<inPacketRate></inPacketRate>
<outByteRate></outByteRate>
<outPacketRate></outPacketRate>
</ifClearedStat>
</interface>
</interfaces>
</ifm>
</filter>
"""
INTERFACE_ALL = 1
INTERFACE_TYPE = 2
INTERFACE_FULL_NAME = 3
def get_interface_type(interface):
"""Gets the type of interface, such as 10GE, ETH-TRUNK, VLANIF..."""
if interface is None:
return None
iftype = None
if interface.upper().startswith('GE'):
iftype = 'ge'
elif interface.upper().startswith('10GE'):
iftype = '10ge'
elif interface.upper().startswith('25GE'):
iftype = '25ge'
elif interface.upper().startswith('4X10GE'):
iftype = '4x10ge'
elif interface.upper().startswith('40GE'):
iftype = '40ge'
elif interface.upper().startswith('100GE'):
iftype = '100ge'
elif interface.upper().startswith('VLANIF'):
iftype = 'vlanif'
elif interface.upper().startswith('LOOPBACK'):
iftype = 'loopback'
elif interface.upper().startswith('METH'):
iftype = 'meth'
elif interface.upper().startswith('ETH-TRUNK'):
iftype = 'eth-trunk'
elif interface.upper().startswith('VBDIF'):
iftype = 'vbdif'
elif interface.upper().startswith('NVE'):
iftype = 'nve'
elif interface.upper().startswith('TUNNEL'):
iftype = 'tunnel'
elif interface.upper().startswith('ETHERNET'):
iftype = 'ethernet'
elif interface.upper().startswith('FCOE-PORT'):
iftype = 'fcoe-port'
elif interface.upper().startswith('FABRIC-PORT'):
iftype = 'fabric-port'
elif interface.upper().startswith('STACK-PORT'):
iftype = 'stack-Port'
elif interface.upper().startswith('NULL'):
iftype = 'null'
else:
return None
return iftype.lower()
def is_ethernet_port(interface):
"""Judge whether it is ethernet port"""
ethernet_port = ['ge', '10ge', '25ge', '4x10ge', '40ge', '100ge', 'meth']
if_type = get_interface_type(interface)
if if_type in ethernet_port:
return True
return False
class LinkStatus(object):
"""Get interface link status information"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# interface name
self.interface = self.module.params['interface']
self.interface = self.interface.replace(' ', '').lower()
self.param_type = None
self.if_type = None
# state
self.results = dict()
self.result = dict()
def check_params(self):
"""Check all input params"""
if not self.interface:
self.module.fail_json(msg='Error: Interface name cannot be empty.')
if self.interface and self.interface != 'all':
if not self.if_type:
self.module.fail_json(
msg='Error: Interface name of %s is error.' % self.interface)
def init_module(self):
"""Init module object"""
self.module = AnsibleModule(
argument_spec=self.spec, supports_check_mode=True)
def show_result(self):
"""Show result"""
self.results['result'] = self.result
self.module.exit_json(**self.results)
def get_intf_dynamic_info(self, dyn_info, intf_name):
"""Get interface dynamic information"""
if not intf_name:
return
if dyn_info:
for eles in dyn_info:
if eles.tag in ["ifPhyStatus", "ifV4State", "ifV6State", "ifLinkStatus"]:
if eles.tag == "ifPhyStatus":
self.result[intf_name][
'Current physical state'] = eles.text
elif eles.tag == "ifLinkStatus":
self.result[intf_name][
'Current link state'] = eles.text
elif eles.tag == "ifV4State":
self.result[intf_name][
'Current IPv4 state'] = eles.text
elif eles.tag == "ifV6State":
self.result[intf_name][
'Current IPv6 state'] = eles.text
def get_intf_statistics_info(self, stat_info, intf_name):
"""Get interface statistics information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if stat_info:
for eles in stat_info:
if eles.tag in ["receiveByte", "sendByte", "rcvUniPacket", "rcvMutiPacket", "rcvBroadPacket",
"sendUniPacket", "sendMutiPacket", "sendBroadPacket", "rcvErrorPacket",
"rcvDropPacket", "sendErrorPacket", "sendDropPacket"]:
if eles.tag == "receiveByte":
self.result[intf_name][
'Inbound octets(bytes)'] = eles.text
elif eles.tag == "rcvUniPacket":
self.result[intf_name][
'Inbound unicast(pkts)'] = eles.text
elif eles.tag == "rcvMutiPacket":
self.result[intf_name][
'Inbound multicast(pkts)'] = eles.text
elif eles.tag == "rcvBroadPacket":
self.result[intf_name][
'Inbound broadcast(pkts)'] = eles.text
elif eles.tag == "rcvErrorPacket":
self.result[intf_name][
'Inbound error(pkts)'] = eles.text
elif eles.tag == "rcvDropPacket":
self.result[intf_name][
'Inbound drop(pkts)'] = eles.text
elif eles.tag == "sendByte":
self.result[intf_name][
'Outbound octets(bytes)'] = eles.text
elif eles.tag == "sendUniPacket":
self.result[intf_name][
'Outbound unicast(pkts)'] = eles.text
elif eles.tag == "sendMutiPacket":
self.result[intf_name][
'Outbound multicast(pkts)'] = eles.text
elif eles.tag == "sendBroadPacket":
self.result[intf_name][
'Outbound broadcast(pkts)'] = eles.text
elif eles.tag == "sendErrorPacket":
self.result[intf_name][
'Outbound error(pkts)'] = eles.text
elif eles.tag == "sendDropPacket":
self.result[intf_name][
'Outbound drop(pkts)'] = eles.text
def get_intf_cleared_stat(self, clr_stat, intf_name):
"""Get interface cleared state information"""
if not intf_name:
return
if_type = get_interface_type(intf_name)
if if_type == 'fcoe-port' or if_type == 'nve' or if_type == 'tunnel' or \
if_type == 'vbdif' or if_type == 'vlanif':
return
if clr_stat:
for eles in clr_stat:
if eles.tag in ["inByteRate", "inPacketRate", "outByteRate", "outPacketRate"]:
if eles.tag == "inByteRate":
self.result[intf_name][
'Inbound rate(byte/sec)'] = eles.text
elif eles.tag == "inPacketRate":
self.result[intf_name][
'Inbound rate(pkts/sec)'] = eles.text
elif eles.tag == "outByteRate":
self.result[intf_name][
'Outbound rate(byte/sec)'] = eles.text
elif eles.tag == "outPacketRate":
self.result[intf_name][
'Outbound rate(pkts/sec)'] = eles.text
def get_all_interface_info(self, intf_type=None):
"""Get interface information by all or by interface type"""
xml_str = CE_NC_GET_INT_STATISTICS % ''
con_obj = get_nc_next(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intfs_info = root.findall("ifm/interfaces/interface")
if not intfs_info:
return
intf_name = ''
flag = False
for eles in intfs_info:
if eles.tag == "interface":
for ele in eles:
if ele.tag in ["ifName", "ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if ele.tag == "ifName":
intf_name = ele.text.lower()
if intf_type:
if get_interface_type(intf_name) != intf_type.lower():
break
else:
flag = True
self.init_interface_data(intf_name)
if is_ethernet_port(intf_name):
self.get_port_info(intf_name)
if ele.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(ele, intf_name)
elif ele.tag == "ifStatistics":
self.get_intf_statistics_info(ele, intf_name)
elif ele.tag == "ifClearedStat":
self.get_intf_cleared_stat(ele, intf_name)
if intf_type and not flag:
self.module.fail_json(
msg='Error: %s interface type does not exist.' % intf_type.upper())
def get_interface_info(self):
"""Get interface information"""
xml_str = CE_NC_GET_INT_STATISTICS % self.interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
self.module.fail_json(
msg='Error: %s interface does not exist.' % self.interface.upper())
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
intf_info = root.find("ifm/interfaces/interface")
if intf_info:
for eles in intf_info:
if eles.tag in ["ifDynamicInfo", "ifStatistics", "ifClearedStat"]:
if eles.tag == "ifDynamicInfo":
self.get_intf_dynamic_info(eles, self.interface)
elif eles.tag == "ifStatistics":
self.get_intf_statistics_info(eles, self.interface)
elif eles.tag == "ifClearedStat":
self.get_intf_cleared_stat(eles, self.interface)
def init_interface_data(self, intf_name):
"""Init interface data"""
# init link status data
self.result[intf_name] = dict()
self.result[intf_name]['Current physical state'] = 'down'
self.result[intf_name]['Current link state'] = 'down'
self.result[intf_name]['Current IPv4 state'] = 'down'
self.result[intf_name]['Current IPv6 state'] = 'down'
self.result[intf_name]['Inbound octets(bytes)'] = '--'
self.result[intf_name]['Inbound unicast(pkts)'] = '--'
self.result[intf_name]['Inbound multicast(pkts)'] = '--'
self.result[intf_name]['Inbound broadcast(pkts)'] = '--'
self.result[intf_name]['Inbound error(pkts)'] = '--'
self.result[intf_name]['Inbound drop(pkts)'] = '--'
self.result[intf_name]['Inbound rate(byte/sec)'] = '--'
self.result[intf_name]['Inbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Outbound octets(bytes)'] = '--'
self.result[intf_name]['Outbound unicast(pkts)'] = '--'
self.result[intf_name]['Outbound multicast(pkts)'] = '--'
self.result[intf_name]['Outbound broadcast(pkts)'] = '--'
self.result[intf_name]['Outbound error(pkts)'] = '--'
self.result[intf_name]['Outbound drop(pkts)'] = '--'
self.result[intf_name]['Outbound rate(byte/sec)'] = '--'
self.result[intf_name]['Outbound rate(pkts/sec)'] = '--'
self.result[intf_name]['Speed'] = '--'
def get_port_info(self, interface):
"""Get port information"""
if_type = get_interface_type(interface)
if if_type == 'meth':
xml_str = CE_NC_GET_PORT_SPEED % interface.lower().replace('meth', 'MEth')
else:
xml_str = CE_NC_GET_PORT_SPEED % interface.upper()
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
xml_str = con_obj.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
# get link status information
root = ElementTree.fromstring(xml_str)
port_info = root.find("devm/ports/port")
if port_info:
for eles in port_info:
if eles.tag == "ethernetPort":
for ele in eles:
if ele.tag == 'speed':
self.result[interface]['Speed'] = ele.text
def get_link_status(self):
"""Get link status information"""
if self.param_type == INTERFACE_FULL_NAME:
self.init_interface_data(self.interface)
self.get_interface_info()
if is_ethernet_port(self.interface):
self.get_port_info(self.interface)
elif self.param_type == INTERFACE_TYPE:
self.get_all_interface_info(self.interface)
else:
self.get_all_interface_info()
def get_intf_param_type(self):
"""Get the type of input interface parameter"""
if self.interface == 'all':
self.param_type = INTERFACE_ALL
return
if self.if_type == self.interface:
self.param_type = INTERFACE_TYPE
return
self.param_type = INTERFACE_FULL_NAME
def work(self):
"""Worker"""
self.if_type = get_interface_type(self.interface)
self.check_params()
self.get_intf_param_type()
self.get_link_status()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
interface=dict(required=True, type='str'),
)
argument_spec.update(ce_argument_spec)
linkstatus_obj = LinkStatus(argument_spec)
linkstatus_obj.work()
if __name__ == '__main__':
main()
| pdellaert/ansible | lib/ansible/modules/network/cloudengine/ce_link_status.py | Python | gpl-3.0 | 22,137 |
#!/usr/bin/env python
"""Displays the status of the build using these hip new shields everyone uses
"""
from shields import ShieldStatusResource
def bind(webstatus, path="badge", left_text=None, left_color=None,
template=None, font_face=None, font_size=None, color_scheme=None):
"""Installs the ShieldStatusResource in the given WebStatus instance
"""
left_text = left_text or ShieldStatusResource.defaults['left_text']
left_color = left_color or ShieldStatusResource.defaults['left_color']
template_name = template or ShieldStatusResource.defaults['template_name']
font_face = font_face or ShieldStatusResource.defaults['font_face']
font_size = font_size or ShieldStatusResource.defaults['font_size']
color_scheme = color_scheme or ShieldStatusResource.defaults['color_scheme']
for filetype in ["png", "svg"]:
webstatus.putChild("%s.%s" % (path, filetype),
ShieldStatusResource(webstatus,
left_text=left_text,
left_color=left_color,
template_name=template_name,
font_face=font_face,
font_size=font_size,
color_scheme=color_scheme))
return webstatus
| thefinn93/BuildbotStatusShields | BuildbotStatusShields/__init__.py | Python | gpl-2.0 | 1,425 |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from random import randint
import time
import unittest
from qa.web_tests import config
class TestRenameInstance(unittest.TestCase):
def setUp(self):
self.base_url = config.base_url
self.verificationErrors = []
self.accept_next_alert = True
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(config.implicitly_wait)
def test_rename_instance(self):
driver = self.driver
driver.maximize_window()
driver.get(self.base_url + "/")
driver.find_element_by_name("username").send_keys(config.username)
driver.find_element_by_name("password").send_keys(config.password)
driver.find_element_by_css_selector("input.loginSubmit").click()
driver.find_element_by_id("launchInstance").click()
driver.find_element_by_id("input_instCreate_name").clear()
instance_name = "Test_instance_%s" % str(randint(10, 10000))
driver.find_element_by_id("input_instCreate_name") \
.send_keys(instance_name)
Move = ActionChains(driver).move_to_element(
driver.find_element_by_xpath(
'//*[@id="table_instanceSource"]/tbody/tr[5]/td[2]/button'))
Move.perform()
driver.find_element_by_xpath(
'//*[@id="table_instanceSource"]/tbody/tr[5]/td[2]/button').click()
Move = ActionChains(driver).move_to_element(
driver.find_element_by_xpath(config.flavor_xpath))
Move.perform()
driver.find_element_by_xpath(config.flavor_xpath).click()
driver.find_element_by_id("submit").click()
self.assertTrue(self.is_element_present(By.LINK_TEXT, instance_name))
driver.find_element_by_xpath('//*[text()="%s"]' % instance_name)
while self.is_element_present(
By.XPATH, '//*[text()="%s"]/..//..//*[text()="Build"]'
% instance_name):
time.sleep(5)
driver.refresh()
self.is_element_present(
By.XPATH, '//*[text()="%s"]/..//..//*[text()="Active"]'
% instance_name)
driver.find_element_by_link_text(instance_name).click()
driver.find_element_by_id("rename").click()
driver.find_element_by_id("name").clear()
driver.find_element_by_id("name").send_keys("%s_edited" % instance_name)
driver.find_element_by_id("submit").click()
driver.find_element_by_id("upButton").click()
self.assertTrue(self.is_element_present(By.LINK_TEXT, "%s_edited" % instance_name))
driver.find_element_by_link_text("%s_edited" % instance_name).click()
driver.find_element_by_id('terminate').click()
driver.find_element_by_id('btn-confirm').click()
while self.is_element_present(By.XPATH,
'//*[text()="%s"]/..//..//*[text()="Active"]' % instance_name):
time.sleep(5)
driver.refresh()
self.assertFalse(self.is_element_present(By.LINK_TEXT, instance_name))
def is_element_present(self, how, what):
try: self.driver.find_element(by=how, value=what)
except NoSuchElementException, e: return False
return True
def tearDown(self):
self.driver.save_screenshot(config.screen_path)
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
| paypal/aurora | qa/web_tests/tests/instances/test_rename_instance.py | Python | apache-2.0 | 3,630 |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import platform
import distro
def get_os():
if distro_info := f'{distro.name()} {distro.version()}'.strip():
return distro_info
# fallback for non-linux/bsd systems (ie mac os)
return f'{platform.system()} {platform.release()}'.strip()
| pferreir/indico | indico/util/system.py | Python | mit | 473 |
#!/usr/bin/env python
import glob
import copy
import cv2
import cv_bridge
import rospy
from sensor_msgs.msg import Image
from std_msgs.msg import Int32, Float32, String
import rospkg
class Hear_orders:
def __init__(self):
self.speech_subscriber = rospy.Subscriber("/speech_recognition", String, self.publish_emotion)
self.emotion_publisher = rospy.Publisher("/emotion", String, queue_size=10)
# self.timer = rospy.Timer(rospy.Duration(self.velocity), self.timer_cb)
def publish_emotion(self, data):
self.emotion_publisher.publish("heard_an_order")
def main():
rospy.init_node('hearing_node', anonymous=True)
rate = rospy.Rate(30)
rospack = rospkg.RosPack()
# path = rospack.get_path('baxter_face_animation') + "/data/"
Hear_orders()
while not rospy.is_shutdown():
rate.sleep()
if __name__ == "__main__":
main()
| UCRoboticsLab/BaxterTictactoe | src/baxter_face_animation/src/baxter_face_animation/hear_words.py | Python | apache-2.0 | 926 |
from __future__ import unicode_literals
import logging
import six
from rbtools.api.errors import APIError
from rbtools.clients.errors import MergeError, PushError
from rbtools.commands import Command, CommandError, Option, RB_MAIN
from rbtools.utils.commands import (build_rbtools_cmd_argv,
extract_commit_message)
from rbtools.utils.console import confirm
from rbtools.utils.graphs import toposort
from rbtools.utils.process import execute
from rbtools.utils.review_request import (get_draft_or_current_value,
get_revisions,
guess_existing_review_request,
parse_review_request_url)
class Land(Command):
"""Land changes from a review request onto the remote repository.
This command takes a review request, applies it to a feature branch,
merges it with the specified destination branch, and pushes the
changes to an upstream repository.
Notes:
The review request needs to be approved first.
``--local`` option can be used to skip the patching step.
"""
name = 'land'
author = 'The Review Board Project'
needs_api = True
needs_scm_client = True
needs_repository = True
args = '[<branch-name>]'
option_list = [
Option('--dest',
dest='destination_branch',
default=None,
config_key='LAND_DEST_BRANCH',
help='Specifies the destination branch to land changes on.'),
Option('-r', '--review-request-id',
dest='rid',
metavar='ID',
default=None,
help='Specifies the review request ID.'),
Option('--local',
dest='is_local',
action='store_true',
default=None,
help='Forces the change to be merged without patching, if '
'merging a local branch. Defaults to true unless '
'--review-request-id is used.'),
Option('-p', '--push',
dest='push',
action='store_true',
default=False,
config_key='LAND_PUSH',
help='Pushes the branch after landing the change.'),
Option('-n', '--no-push',
dest='push',
action='store_false',
default=False,
config_key='LAND_PUSH',
help='Prevents pushing the branch after landing the change, '
'if pushing is enabled by default.'),
Option('--squash',
dest='squash',
action='store_true',
default=False,
config_key='LAND_SQUASH',
help='Squashes history into a single commit.'),
Option('--no-squash',
dest='squash',
action='store_false',
default=False,
config_key='LAND_SQUASH',
help='Disables squashing history into a single commit, '
'choosing instead to merge the branch, if squashing is '
'enabled by default.'),
Option('-e', '--edit',
dest='edit',
action='store_true',
default=False,
help='Invokes the editor to edit the commit message before '
'landing the change.'),
Option('--delete-branch',
dest='delete_branch',
action='store_true',
config_key='LAND_DELETE_BRANCH',
default=True,
help="Deletes the local branch after it's landed. Only used if "
"landing a local branch. This is the default."),
Option('--no-delete-branch',
dest='delete_branch',
action='store_false',
config_key='LAND_DELETE_BRANCH',
default=True,
help="Prevents the local branch from being deleted after it's "
"landed."),
Option('--dry-run',
dest='dry_run',
action='store_true',
default=False,
help='Simulates the landing of a change, without actually '
'making any changes to the tree.'),
Option('--recursive',
dest='recursive',
action='store_true',
default=False,
help='Recursively fetch patches for review requests that the '
'specified review request depends on. This is equivalent '
'to calling "rbt patch" for each of those review '
'requests.',
added_in='1.0'),
Command.server_options,
Command.repository_options,
Command.branch_options,
]
def patch(self, review_request_id, squash=False):
"""Patch a single review request's diff using rbt patch.
Args:
review_request_id (int):
The ID of the review request to patch.
squash (bool, optional):
Whether to squash multiple commits into a single commit.
Raises:
rbtools.commands.CommandError:
There was an error applying the patch.
"""
patch_command = [RB_MAIN, 'patch']
patch_command.extend(build_rbtools_cmd_argv(self.options))
if self.options.edit:
patch_command.append('-c')
else:
patch_command.append('-C')
if squash:
patch_command.append('--squash')
patch_command.append(six.text_type(review_request_id))
rc, output = execute(patch_command, ignore_errors=True,
return_error_code=True)
if rc:
raise CommandError('Failed to execute "rbt patch":\n%s'
% output)
def can_land(self, review_request):
"""Determine if the review request is land-able.
A review request can be landed if it is approved or, if the Review
Board server does not keep track of approval, if the review request
has a ship-it count.
This function returns the error with landing the review request or None
if it can be landed.
"""
try:
is_rr_approved = review_request.approved
approval_failure = review_request.approval_failure
except AttributeError:
# The Review Board server is an old version (pre-2.0) that
# doesn't support the `approved` field. Determine it manually.
if review_request.ship_it_count == 0:
is_rr_approved = False
approval_failure = \
'The review request has not been marked "Ship It!"'
else:
is_rr_approved = True
except Exception as e:
logging.exception(
'Unexpected error while looking up review request '
'approval state: %s',
e)
return ('An error was encountered while executing the land '
'command.')
finally:
if not is_rr_approved:
return approval_failure
return None
def land(self, destination_branch, review_request, source_branch=None,
squash=False, edit=False, delete_branch=True, dry_run=False):
"""Land an individual review request.
Args:
destination_branch (unicode):
The destination branch that the change will be committed or
merged to.
review_request (rbtools.api.resource.ReviewRequestResource):
The review request containing the change to land.
source_branch (unicode, optional):
The source branch to land, if landing from a local branch.
squash (bool, optional):
Whether to squash the changes on the branch, for repositories
that support it.
edit (bool, optional):
Whether to edit the commit message before landing.
delete_branch (bool, optional):
Whether to delete/close the branch, if landing from a local
branch.
dry_run (bool, optional):
Whether to simulate landing without actually changing the
repository.
"""
json_data = {
'review_request': review_request.id,
'destination_branch': destination_branch,
}
if source_branch:
review_commit_message = extract_commit_message(review_request)
author = review_request.get_submitter()
json_data['source_branch'] = source_branch
if squash:
self.stdout.write('Squashing branch "%s" into "%s".'
% (source_branch, destination_branch))
json_data['type'] = 'squash'
else:
self.stdout.write('Merging branch "%s" into "%s".'
% (source_branch, destination_branch))
json_data['type'] = 'merge'
if not dry_run:
try:
self.tool.merge(target=source_branch,
destination=destination_branch,
message=review_commit_message,
author=author,
squash=squash,
run_editor=edit,
close_branch=delete_branch)
except MergeError as e:
raise CommandError(six.text_type(e))
else:
self.stdout.write('Applying patch from review request %s.'
% review_request.id)
if not dry_run:
self.patch(review_request.id,
squash=squash)
self.stdout.write('Review request %s has landed on "%s".'
% (review_request.id,
self.options.destination_branch))
self.json.append('landed_review_requests', json_data)
def initialize(self):
"""Initialize the command.
This overrides Command.initialize in order to handle full review
request URLs on the command line. In this case, we want to parse that
URL in order to pull the server name and review request ID out of it.
Raises:
rbtools.commands.CommandError:
A review request URL passed in as the review request ID could
not be parsed correctly or included a bad diff revision.
"""
review_request_id = self.options.rid
if review_request_id and review_request_id.startswith('http'):
server_url, review_request_id, diff_revision = \
parse_review_request_url(review_request_id)
if diff_revision and '-' in diff_revision:
raise CommandError('Interdiff patches are not supported: %s.'
% diff_revision)
if review_request_id is None:
raise CommandError('The URL %s does not appear to be a '
'review request.')
self.options.server = server_url
self.options.rid = review_request_id
super(Land, self).initialize()
def main(self, branch_name=None, *args):
"""Run the command."""
self.cmd_args = list(args)
if branch_name:
self.cmd_args.insert(0, branch_name)
if not self.tool.can_merge:
raise CommandError('This command does not support %s repositories.'
% self.tool.name)
if self.options.push and not self.tool.can_push_upstream:
raise CommandError('--push is not supported for %s repositories.'
% self.tool.name)
if self.tool.has_pending_changes():
raise CommandError('Working directory is not clean.')
if not self.options.destination_branch:
raise CommandError('Please specify a destination branch.')
if not self.tool.can_squash_merges:
# If the client doesn't support squashing, then never squash.
self.options.squash = False
if self.options.rid:
is_local = branch_name is not None
review_request_id = self.options.rid
else:
try:
review_request = guess_existing_review_request(
api_root=self.api_root,
api_client=self.api_client,
tool=self.tool,
revisions=get_revisions(self.tool, self.cmd_args),
guess_summary=False,
guess_description=False,
is_fuzzy_match_func=self._ask_review_request_match,
repository_id=self.repository.id)
except ValueError as e:
raise CommandError(six.text_type(e))
if not review_request or not review_request.id:
raise CommandError('Could not determine the existing review '
'request URL to land.')
review_request_id = review_request.id
is_local = True
try:
review_request = self.api_root.get_review_request(
review_request_id=review_request_id)
except APIError as e:
raise CommandError('Error getting review request %s: %s'
% (review_request_id, e))
if self.options.is_local is not None:
is_local = self.options.is_local
if is_local:
if branch_name is None:
branch_name = self.tool.get_current_branch()
if branch_name == self.options.destination_branch:
raise CommandError('The local branch cannot be merged onto '
'itself. Try a different local branch or '
'destination branch.')
else:
branch_name = None
land_error = self.can_land(review_request)
if land_error is not None:
raise CommandError('Cannot land review request %s: %s'
% (review_request_id, land_error))
land_kwargs = {
'delete_branch': self.options.delete_branch,
'destination_branch': self.options.destination_branch,
'dry_run': self.options.dry_run,
'edit': self.options.edit,
'squash': self.options.squash,
}
self.json.add('landed_review_requests', [])
if self.options.recursive:
# The dependency graph shows us which review requests depend on
# which other ones. What we are actually after is the order to land
# them in, which is the topological sorting order of the converse
# graph. It just so happens that if we reverse the topological sort
# of a graph, it is a valid topological sorting of the converse
# graph, so we don't have to compute the converse graph.
dependency_graph = review_request.build_dependency_graph()
dependencies = toposort(dependency_graph)[1:]
if dependencies:
self.stdout.write('Recursively landing dependencies of '
'review request %s.'
% review_request_id)
for dependency in dependencies:
land_error = self.can_land(dependency)
if land_error is not None:
raise CommandError(
'Aborting recursive land of review request %s.\n'
'Review request %s cannot be landed: %s'
% (review_request_id, dependency.id, land_error))
for dependency in reversed(dependencies):
self.land(review_request=dependency, **land_kwargs)
self.land(review_request=review_request,
source_branch=branch_name,
**land_kwargs)
if self.options.push:
self.stdout.write('Pushing branch "%s" upstream'
% self.options.destination_branch)
if not self.options.dry_run:
try:
self.tool.push_upstream(self.options.destination_branch)
except PushError as e:
raise CommandError(six.text_type(e))
def _ask_review_request_match(self, review_request):
return confirm(
'Land Review Request #%s: "%s"? '
% (review_request.id,
get_draft_or_current_value('summary', review_request)))
| reviewboard/rbtools | rbtools/commands/land.py | Python | mit | 16,983 |
"""
The latest version of this package is available at:
<http://github.com/jantman/biweeklybudget>
################################################################################
Copyright 2016 Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
This file is part of biweeklybudget, also known as biweeklybudget.
biweeklybudget is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
biweeklybudget is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with biweeklybudget. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/biweeklybudget> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
from .accounts import *
from .budgets import *
from .credit_payoffs import *
from .index import *
from .ofx import *
from .payperiods import *
from .reconcile import *
from .scheduled import *
from .transactions import *
from .help import *
from .fuel import *
from .projects import *
from .utils import *
| jantman/biweeklybudget | biweeklybudget/flaskapp/views/__init__.py | Python | agpl-3.0 | 2,136 |
####Python Script to find the IMDB rating of movies and TV series#####
import requests
import bs4 as bs
####Function to confirm the name of movie or Tv series to get the correct rating####
def cnfm_page(link):
try:
name_link = 'https://www.imdb.com'+link
req2 = requests.get(name_link)
soup2 = bs.BeautifulSoup(req2.content,'lxml')
title = soup2.find('div',{'class':'title_wrapper'}).find('h1')
plot = soup2.find('div',{'class':'plot_summary_wrapper'}).find('div',{'class','summary_text'})
print (title.text)
print ('With plot: {}'.format(plot.text.strip()))
ret = input('Is this what you are searching for(yes or no): ')
if ret=='yes' or ret=='Yes':
return True
if ret=='no' or ret=='No':
return False
except:
print ('XXXXXXX NO SUCH NAME EXIST.....ENTER THE VALID NAME XXXXXXXX')
exit()
####funtion to get the rating after the confirmation of movie or Tv series name####
def getrating(link):
try:
comp_link = 'https://www.imdb.com'+link
req3 = requests.get(comp_link)
soup3 = bs.BeautifulSoup(req3.content,'lxml')
rating = soup3.find('div',{'class':'ratingValue'}).find('strong')['title']
return rating
except:
print ('XXXXXXX NO SUCH NAME EXIST.....ENTER THE VALID NAME XXXXXXXX')
exit()
name = input("Enter the movie or TV series name: ")
url = 'http://www.imdb.com/find?ref_=nv_sr_fn&q={}&s=all'.format(name)
req = requests.get(url)
soup = bs.BeautifulSoup(req.content,'lxml')
search_links = soup.find('div',{'class':'findSection'}).find('table').find_all('tr')# this will give the list of all possible links for the given name
Slinks_list = []
for link in search_links:
Slinks_list.append(link.find('a').get('href'))
for link in Slinks_list:
val =cnfm_page(link)
if val is True:
break;
req_link = link #link of the movie ,which we are looking for
rating = getrating(req_link)
print ('Rating-- {}'.format(rating)) | Kuldip397/My_Scrapers | imdbRating.py | Python | mit | 1,914 |
from setuptools import setup
with open('README.md', 'r') as fh:
long_description = fh.read()
setup(
name='FlowIO',
version='0.9.14',
packages=['flowio'],
package_data={'': []},
description='FlowIO is a Python library for reading / writing Flow Cytometry Standard (FCS) files',
long_description=long_description,
long_description_content_type='text/markdown',
author='Scott White',
author_email='whitews@gmail.com',
license='BSD',
url='https://github.com/whitews/flowio',
requires=[],
data_files=[('', ['LICENSE'])],
classifiers=[
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.6'
]
)
| whitews/FlowIO | setup.py | Python | bsd-3-clause | 795 |
#
# Histogram.py -- Histogram plugin for Ginga fits viewer
#
# Eric Jeschke (eric@naoj.org)
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
import numpy
from ginga.misc import Widgets, Plot
from ginga import GingaPlugin
class Histogram(GingaPlugin.LocalPlugin):
def __init__(self, fv, fitsimage):
# superclass defines some variables for us, like logger
super(Histogram, self).__init__(fv, fitsimage)
self.layertag = 'histogram-canvas'
self.histtag = None
self.histcolor = 'aquamarine'
# If True, limits X axis to lo/hi cut levels
self.xlimbycuts = True
# Number of histogram bins
self.numbins = 2048
self.dc = self.fv.getDrawClasses()
canvas = self.dc.DrawingCanvas()
canvas.enable_draw(True)
canvas.set_drawtype('rectangle', color='cyan', linestyle='dash',
drawdims=True)
canvas.set_callback('draw-event', self.draw_cb)
canvas.set_callback('cursor-down', self.drag)
canvas.set_callback('cursor-move', self.drag)
canvas.set_callback('cursor-up', self.update)
canvas.setSurface(self.fitsimage)
self.canvas = canvas
fitssettings = fitsimage.get_settings()
for name in ['cuts']:
fitssettings.getSetting(name).add_callback('set',
self.cutset_ext_cb, fitsimage)
self.gui_up = False
def build_gui(self, container):
top = Widgets.VBox()
top.set_border_width(4)
# Make the cuts plot
vbox, sw, orientation = Widgets.get_oriented_box(container)
vbox.set_border_width(4)
vbox.set_spacing(2)
msgFont = self.fv.getFont("sansFont", 12)
tw = Widgets.TextArea(wrap=True, editable=False)
tw.set_font(msgFont)
self.tw = tw
fr = Widgets.Frame("Instructions")
vbox2 = Widgets.VBox()
vbox2.add_widget(tw)
vbox2.add_widget(Widgets.Label(''), stretch=1)
fr.set_widget(vbox2)
vbox.add_widget(fr, stretch=0)
self.plot = Plot.Plot(self.logger, width=2, height=3, dpi=100)
ax = self.plot.add_axis()
ax.grid(True)
# for now we need to wrap this native widget
w = Widgets.wrap(self.plot.get_widget())
vbox.add_widget(w, stretch=1)
captions = (('Cut Low:', 'label', 'Cut Low', 'entry'),
('Cut High:', 'label', 'Cut High', 'entry', 'Cut Levels', 'button'),
('Auto Levels', 'button'),
('Log Histogram', 'checkbutton', 'Plot By Cuts', 'checkbutton'),
('NumBins:', 'label', 'NumBins', 'entry'),
('Full Image', 'button'),
)
w, b = Widgets.build_info(captions, orientation=orientation)
self.w.update(b)
b.cut_levels.set_tooltip("Set cut levels manually")
b.auto_levels.set_tooltip("Set cut levels by algorithm")
b.cut_low.set_tooltip("Set low cut level (press Enter)")
b.cut_high.set_tooltip("Set high cut level (press Enter)")
b.log_histogram.set_tooltip("Use the log of the pixel values for the histogram (empty bins map to 10^-1)")
b.plot_by_cuts.set_tooltip("Only show the part of the histogram between the cuts")
b.numbins.set_tooltip("Number of bins for the histogram")
b.full_image.set_tooltip("Use the full image for calculating the histogram")
b.numbins.set_text(str(self.numbins))
b.cut_low.add_callback('activated', lambda w: self.cut_levels())
b.cut_high.add_callback('activated', lambda w: self.cut_levels())
b.cut_levels.add_callback('activated', lambda w: self.cut_levels())
b.auto_levels.add_callback('activated', lambda w: self.auto_levels())
b.log_histogram.set_state(self.plot.logy)
b.log_histogram.add_callback('activated', self.log_histogram_cb)
b.plot_by_cuts.set_state(self.xlimbycuts)
b.plot_by_cuts.add_callback('activated', self.plot_by_cuts_cb)
b.numbins.add_callback('activated', lambda w: self.set_numbins_cb())
b.full_image.add_callback('activated', lambda w: self.full_image_cb())
vbox.add_widget(w, stretch=0)
## spacer = Widgets.Label('')
## vbox.add_widget(spacer, stretch=1)
top.add_widget(sw, stretch=1)
btns = Widgets.HBox()
btns.set_border_width(4)
btns.set_spacing(3)
btn = Widgets.Button("Close")
btn.add_callback('activated', lambda w: self.close())
btns.add_widget(btn, stretch=0)
btns.add_widget(Widgets.Label(''), stretch=1)
top.add_widget(btns, stretch=0)
container.add_widget(top, stretch=1)
self.gui_up = True
def instructions(self):
self.tw.set_text("""Draw (or redraw) a region with the right mouse button. Click or drag left mouse button to reposition region.""")
def close(self):
chname = self.fv.get_channelName(self.fitsimage)
self.fv.stop_local_plugin(chname, str(self))
return True
def start(self):
self.instructions()
self.plot.set_titles(rtitle="Histogram")
self.plot.show()
# insert canvas, if not already
try:
obj = self.fitsimage.getObjectByTag(self.layertag)
except KeyError:
# Add ruler layer
self.fitsimage.add(self.canvas, tag=self.layertag)
#self.canvas.deleteAllObjects()
self.resume()
def pause(self):
self.canvas.ui_setActive(False)
def resume(self):
self.canvas.ui_setActive(True)
self.fv.showStatus("Draw a rectangle with the right mouse button")
def stop(self):
# remove the rect from the canvas
## try:
## self.canvas.deleteObjectByTag(self.histtag, redraw=False)
## except:
## pass
##self.histtag = None
# remove the canvas from the image
try:
self.fitsimage.deleteObjectByTag(self.layertag)
except:
pass
self.gui_up = False
self.fv.showStatus("")
def full_image_cb(self):
canvas = self.canvas
try:
canvas.deleteObjectByTag(self.histtag, redraw=False)
except:
pass
image = self.fitsimage.get_image()
width, height = image.get_size()
x1, y1, x2, y2 = 0, 0, width-1, height-1
tag = canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
self.draw_cb(canvas, tag)
def redo(self):
obj = self.canvas.getObjectByTag(self.histtag)
if obj.kind != 'compound':
return True
bbox = obj.objects[0]
# Do histogram on the points within the rect
image = self.fitsimage.get_image()
self.plot.clear()
numbins = self.numbins
## pct = 1.0
## i = int(numbins * (1.0 - pct))
## j = int(numbins * pct)
depth = image.get_depth()
if depth != 3:
res = image.histogram(int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2),
pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = numpy.append(res.dist, res.dist[-1])
## y, x = y[i:j+1], x[i:j+1]
ymax = y.max()
if self.plot.logy:
y = numpy.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color='blue', alpha=1.0, drawstyle='steps-post')
else:
colors = ('red', 'green', 'blue')
ymax = 0
for z in range(depth):
res = image.histogram(int(bbox.x1), int(bbox.y1),
int(bbox.x2), int(bbox.y2),
z=z, pct=1.0, numbins=numbins)
# used with 'steps-post' drawstyle, this x and y assignment
# gives correct histogram-steps
x = res.bins
y = numpy.append(res.dist, res.dist[-1])
## y, x = y[i:j+1], x[i:j+1]
ymax = max(ymax, y.max())
if self.plot.logy:
y = numpy.choose(y > 0, (.1, y))
self.plot.plot(x, y, xtitle="Pixel value", ytitle="Number",
title="Pixel Value Distribution",
color=colors[z], alpha=0.33, drawstyle='steps-post')
# show cut levels
loval, hival = self.fitsimage.get_cut_levels()
self.loline = self.plot.ax.axvline(loval, 0.0, 0.99,
linestyle='-', color='red')
self.hiline = self.plot.ax.axvline(hival, 0.0, 0.99,
linestyle='-', color='green')
if self.xlimbycuts:
self.plot.ax.set_xlim(loval, hival)
# Make x axis labels a little more readable
## lbls = self.plot.ax.xaxis.get_ticklabels()
## for lbl in lbls:
## lbl.set(rotation=45, horizontalalignment='right')
self.w.cut_low.set_text(str(loval))
self.w.cut_high.set_text(str(hival))
self.plot.fig.canvas.draw()
self.fv.showStatus("Click or drag left mouse button to move region")
return True
def update(self, canvas, button, data_x, data_y):
obj = self.canvas.getObjectByTag(self.histtag)
if obj.kind == 'compound':
bbox = obj.objects[0]
elif obj.kind == 'rectangle':
bbox = obj
else:
return True
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate offsets of move
dx = (data_x - x)
dy = (data_y - y)
# calculate new coords
x1, y1, x2, y2 = bbox.x1+dx, bbox.y1+dy, bbox.x2+dx, bbox.y2+dy
try:
canvas.deleteObjectByTag(self.histtag, redraw=False)
except:
pass
tag = canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
self.draw_cb(canvas, tag)
return True
def drag(self, canvas, button, data_x, data_y):
obj = self.canvas.getObjectByTag(self.histtag)
if obj.kind == 'compound':
bbox = obj.objects[0]
elif obj.kind == 'rectangle':
bbox = obj
else:
return True
# calculate center of bbox
wd = bbox.x2 - bbox.x1
dw = wd // 2
ht = bbox.y2 - bbox.y1
dh = ht // 2
x, y = bbox.x1 + dw, bbox.y1 + dh
# calculate offsets of move
dx = (data_x - x)
dy = (data_y - y)
# calculate new coords
x1, y1, x2, y2 = bbox.x1+dx, bbox.y1+dy, bbox.x2+dx, bbox.y2+dy
if obj.kind == 'compound':
try:
canvas.deleteObjectByTag(self.histtag, redraw=False)
except:
pass
self.histtag = canvas.add(self.dc.Rectangle(x1, y1, x2, y2,
color='cyan',
linestyle='dash'))
else:
bbox.x1, bbox.y1, bbox.x2, bbox.y2 = x1, y1, x2, y2
canvas.redraw(whence=3)
return True
def draw_cb(self, canvas, tag):
obj = canvas.getObjectByTag(tag)
if obj.kind != 'rectangle':
return True
canvas.deleteObjectByTag(tag, redraw=False)
if self.histtag:
try:
canvas.deleteObjectByTag(self.histtag, redraw=False)
except:
pass
tag = canvas.add(self.dc.CompoundObject(
self.dc.Rectangle(obj.x1, obj.y1, obj.x2, obj.y2,
color=self.histcolor),
self.dc.Text(obj.x1, obj.y2+4, "Histogram",
color=self.histcolor)))
self.histtag = tag
return self.redo()
def cut_levels(self):
try:
loval = float(self.w.cut_low.get_text())
hival = float(self.w.cut_high.get_text())
reslvls = self.fitsimage.cut_levels(loval, hival)
except Exception as e:
self.fv.showStatus("Error cutting levels: %s" % (str(e)))
if self.xlimbycuts:
self.redo()
return reslvls
def auto_levels(self):
self.fitsimage.auto_levels()
def cutset_ext_cb(self, setting, value, fitsimage):
if not self.gui_up:
return
t_ = fitsimage.get_settings()
loval, hival = t_['cuts']
try:
self.loline.remove()
self.hiline.remove()
except:
pass
self.loline = self.plot.ax.axvline(loval, 0.0, 0.99,
linestyle='-', color='black')
self.hiline = self.plot.ax.axvline(hival, 0.0, 0.99,
linestyle='-', color='black')
self.w.cut_low.set_text(str(loval))
self.w.cut_high.set_text(str(hival))
#self.plot.fig.canvas.draw()
self.redo()
def set_numbins_cb(self):
self.numbins = int(self.w.numbins.get_text())
self.redo()
def log_histogram_cb(self, w, val):
self.plot.logy = val
if (self.histtag is not None) and self.gui_up:
# self.histtag == None means no data is loaded yet
self.redo()
def plot_by_cuts_cb(self, w, val):
self.xlimbycuts = val
if (self.histtag is not None) and self.gui_up:
# self.histtag == None means no data is loaded yet
self.redo()
def __str__(self):
return 'histogram'
# END
| bsipocz/ginga | ginga/misc/plugins/Histogram.py | Python | bsd-3-clause | 14,394 |
"""
Base and utility classes for pandas objects.
"""
from __future__ import annotations
import textwrap
from typing import (
TYPE_CHECKING,
Any,
Generic,
Hashable,
Literal,
TypeVar,
cast,
final,
)
import numpy as np
import pandas._libs.lib as lib
from pandas._typing import (
ArrayLike,
DtypeObj,
FrameOrSeries,
IndexLabel,
Shape,
npt,
)
from pandas.compat import PYPY
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
is_categorical_dtype,
is_dict_like,
is_extension_array_dtype,
is_object_dtype,
is_scalar,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import (
isna,
remove_na_arraylike,
)
from pandas.core import algorithms
from pandas.core.accessor import DirNamesMixin
from pandas.core.algorithms import (
duplicated,
unique1d,
value_counts,
)
from pandas.core.arraylike import OpsMixin
from pandas.core.arrays import ExtensionArray
from pandas.core.construction import create_series_with_explicit_dtype
import pandas.core.nanops as nanops
if TYPE_CHECKING:
from pandas._typing import (
NumpySorter,
NumpyValueArrayLike,
)
from pandas import Categorical
_shared_docs: dict[str, str] = {}
_indexops_doc_kwargs = {
"klass": "IndexOpsMixin",
"inplace": "",
"unique": "IndexOpsMixin",
"duplicated": "IndexOpsMixin",
}
_T = TypeVar("_T", bound="IndexOpsMixin")
class PandasObject(DirNamesMixin):
"""
Baseclass for various pandas objects.
"""
# results from calls to methods decorated with cache_readonly get added to _cache
_cache: dict[str, Any]
@property
def _constructor(self):
"""
Class constructor (for this class it's just `__class__`.
"""
return type(self)
def __repr__(self) -> str:
"""
Return a string representation for a particular object.
"""
# Should be overwritten by base classes
return object.__repr__(self)
def _reset_cache(self, key: str | None = None) -> None:
"""
Reset cached properties. If ``key`` is passed, only clears that key.
"""
if not hasattr(self, "_cache"):
return
if key is None:
self._cache.clear()
else:
self._cache.pop(key, None)
def __sizeof__(self) -> int:
"""
Generates the total memory usage for an object that returns
either a value or Series of values
"""
memory_usage = getattr(self, "memory_usage", None)
if memory_usage:
mem = memory_usage(deep=True)
return int(mem if is_scalar(mem) else mem.sum())
# no memory_usage attribute, so fall back to object's 'sizeof'
return super().__sizeof__()
class NoNewAttributesMixin:
"""
Mixin which prevents adding new attributes.
Prevents additional attributes via xxx.attribute = "something" after a
call to `self.__freeze()`. Mainly used to prevent the user from using
wrong attributes on an accessor (`Series.cat/.str/.dt`).
If you really want to add a new attribute at a later time, you need to use
`object.__setattr__(self, key, value)`.
"""
def _freeze(self):
"""
Prevents setting additional attributes.
"""
object.__setattr__(self, "__frozen", True)
# prevent adding any attribute via s.xxx.new_attribute = ...
def __setattr__(self, key: str, value):
# _cache is used by a decorator
# We need to check both 1.) cls.__dict__ and 2.) getattr(self, key)
# because
# 1.) getattr is false for attributes that raise errors
# 2.) cls.__dict__ doesn't traverse into base classes
if getattr(self, "__frozen", False) and not (
key == "_cache"
or key in type(self).__dict__
or getattr(self, key, None) is not None
):
raise AttributeError(f"You cannot add any new attribute '{key}'")
object.__setattr__(self, key, value)
class DataError(Exception):
pass
class SpecificationError(Exception):
pass
class SelectionMixin(Generic[FrameOrSeries]):
"""
mixin implementing the selection & aggregation interface on a group-like
object sub-classes need to define: obj, exclusions
"""
obj: FrameOrSeries
_selection: IndexLabel | None = None
exclusions: frozenset[Hashable]
_internal_names = ["_cache", "__setstate__"]
_internal_names_set = set(_internal_names)
@final
@property
def _selection_list(self):
if not isinstance(
self._selection, (list, tuple, ABCSeries, ABCIndex, np.ndarray)
):
return [self._selection]
return self._selection
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, ABCSeries):
return self.obj
else:
return self.obj[self._selection]
@final
@cache_readonly
def ndim(self) -> int:
return self._selected_obj.ndim
@final
@cache_readonly
def _obj_with_exclusions(self):
if self._selection is not None and isinstance(self.obj, ABCDataFrame):
return self.obj[self._selection_list]
if len(self.exclusions) > 0:
return self.obj.drop(self.exclusions, axis=1)
else:
return self.obj
def __getitem__(self, key):
if self._selection is not None:
raise IndexError(f"Column(s) {self._selection} already selected")
if isinstance(key, (list, tuple, ABCSeries, ABCIndex, np.ndarray)):
if len(self.obj.columns.intersection(key)) != len(key):
bad_keys = list(set(key).difference(self.obj.columns))
raise KeyError(f"Columns not found: {str(bad_keys)[1:-1]}")
return self._gotitem(list(key), ndim=2)
elif not getattr(self, "as_index", False):
if key not in self.obj.columns:
raise KeyError(f"Column not found: {key}")
return self._gotitem(key, ndim=2)
else:
if key not in self.obj:
raise KeyError(f"Column not found: {key}")
subset = self.obj[key]
ndim = subset.ndim
return self._gotitem(key, ndim=ndim, subset=subset)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : str / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
raise AbstractMethodError(self)
def aggregate(self, func, *args, **kwargs):
raise AbstractMethodError(self)
agg = aggregate
class IndexOpsMixin(OpsMixin):
"""
Common ops mixin to support a unified interface / docs for Series / Index
"""
# ndarray compatibility
__array_priority__ = 1000
_hidden_attrs: frozenset[str] = frozenset(
["tolist"] # tolist is not deprecated, just suppressed in the __dir__
)
@property
def dtype(self) -> DtypeObj:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
@property
def _values(self) -> ExtensionArray | np.ndarray:
# must be defined here as a property for mypy
raise AbstractMethodError(self)
def transpose(self: _T, *args, **kwargs) -> _T:
"""
Return the transpose, which is by definition self.
Returns
-------
%(klass)s
"""
nv.validate_transpose(args, kwargs)
return self
T = property(
transpose,
doc="""
Return the transpose, which is by definition self.
""",
)
@property
def shape(self) -> Shape:
"""
Return a tuple of the shape of the underlying data.
"""
return self._values.shape
def __len__(self) -> int:
# We need this defined here for mypy
raise AbstractMethodError(self)
@property
def ndim(self) -> int:
"""
Number of dimensions of the underlying data, by definition 1.
"""
return 1
def item(self):
"""
Return the first element of the underlying data as a Python scalar.
Returns
-------
scalar
The first element of %(klass)s.
Raises
------
ValueError
If the data is not length-1.
"""
if len(self) == 1:
return next(iter(self))
raise ValueError("can only convert an array of size 1 to a Python scalar")
@property
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
return self._values.nbytes
@property
def size(self) -> int:
"""
Return the number of elements in the underlying data.
"""
return len(self._values)
@property
def array(self) -> ExtensionArray:
"""
The ExtensionArray of the data backing this Series or Index.
Returns
-------
ExtensionArray
An ExtensionArray of the values stored within. For extension
types, this is the actual array. For NumPy native types, this
is a thin (no copy) wrapper around :class:`numpy.ndarray`.
``.array`` differs ``.values`` which may require converting the
data to a different form.
See Also
--------
Index.to_numpy : Similar method that always returns a NumPy array.
Series.to_numpy : Similar method that always returns a NumPy array.
Notes
-----
This table lays out the different array types for each extension
dtype within pandas.
================== =============================
dtype array type
================== =============================
category Categorical
period PeriodArray
interval IntervalArray
IntegerNA IntegerArray
string StringArray
boolean BooleanArray
datetime64[ns, tz] DatetimeArray
================== =============================
For any 3rd-party extension types, the array type will be an
ExtensionArray.
For all remaining dtypes ``.array`` will be a
:class:`arrays.NumpyExtensionArray` wrapping the actual ndarray
stored within. If you absolutely need a NumPy array (possibly with
copying / coercing data), then use :meth:`Series.to_numpy` instead.
Examples
--------
For regular NumPy types like int, and float, a PandasArray
is returned.
>>> pd.Series([1, 2, 3]).array
<PandasArray>
[1, 2, 3]
Length: 3, dtype: int64
For extension types, like Categorical, the actual ExtensionArray
is returned
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.array
['a', 'b', 'a']
Categories (2, object): ['a', 'b']
"""
raise AbstractMethodError(self)
def to_numpy(
self,
dtype: npt.DTypeLike | None = None,
copy: bool = False,
na_value=lib.no_default,
**kwargs,
) -> np.ndarray:
"""
A NumPy ndarray representing the values in this Series or Index.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
na_value : Any, optional
The value to use for missing values. The default value depends
on `dtype` and the type of the array.
.. versionadded:: 1.0.0
**kwargs
Additional keywords passed through to the ``to_numpy`` method
of the underlying array (for extension arrays).
.. versionadded:: 1.0.0
Returns
-------
numpy.ndarray
See Also
--------
Series.array : Get the actual data stored within.
Index.array : Get the actual data stored within.
DataFrame.to_numpy : Similar method for DataFrame.
Notes
-----
The returned array will be the same up to equality (values equal
in `self` will be equal in the returned array; likewise for values
that are not equal). When `self` contains an ExtensionArray, the
dtype may be different. For example, for a category-dtype Series,
``to_numpy()`` will return a NumPy array and the categorical dtype
will be lost.
For NumPy dtypes, this will be a reference to the actual data stored
in this Series or Index (assuming ``copy=False``). Modifying the result
in place will modify the data stored in the Series or Index (not that
we recommend doing that).
For extension types, ``to_numpy()`` *may* require copying data and
coercing the result to a NumPy type (possibly object), which may be
expensive. When you need a no-copy reference to the underlying data,
:attr:`Series.array` should be used instead.
This table lays out the different dtypes and default return types of
``to_numpy()`` for various dtypes within pandas.
================== ================================
dtype array type
================== ================================
category[T] ndarray[T] (same dtype as input)
period ndarray[object] (Periods)
interval ndarray[object] (Intervals)
IntegerNA ndarray[object]
datetime64[ns] datetime64[ns]
datetime64[ns, tz] ndarray[object] (Timestamps)
================== ================================
Examples
--------
>>> ser = pd.Series(pd.Categorical(['a', 'b', 'a']))
>>> ser.to_numpy()
array(['a', 'b', 'a'], dtype=object)
Specify the `dtype` to control how datetime-aware data is represented.
Use ``dtype=object`` to return an ndarray of pandas :class:`Timestamp`
objects, each with the correct ``tz``.
>>> ser = pd.Series(pd.date_range('2000', periods=2, tz="CET"))
>>> ser.to_numpy(dtype=object)
array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'),
Timestamp('2000-01-02 00:00:00+0100', tz='CET')],
dtype=object)
Or ``dtype='datetime64[ns]'`` to return an ndarray of native
datetime64 values. The values are converted to UTC and the timezone
info is dropped.
>>> ser.to_numpy(dtype="datetime64[ns]")
... # doctest: +ELLIPSIS
array(['1999-12-31T23:00:00.000000000', '2000-01-01T23:00:00...'],
dtype='datetime64[ns]')
"""
if is_extension_array_dtype(self.dtype):
# error: Too many arguments for "to_numpy" of "ExtensionArray"
return self.array.to_numpy( # type: ignore[call-arg]
dtype, copy=copy, na_value=na_value, **kwargs
)
elif kwargs:
bad_keys = list(kwargs.keys())[0]
raise TypeError(
f"to_numpy() got an unexpected keyword argument '{bad_keys}'"
)
result = np.asarray(self._values, dtype=dtype)
# TODO(GH-24345): Avoid potential double copy
if copy or na_value is not lib.no_default:
result = result.copy()
if na_value is not lib.no_default:
result[self.isna()] = na_value
return result
@property
def empty(self) -> bool:
return not self.size
def max(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the maximum value of the Index.
Parameters
----------
axis : int, optional
For compatibility with NumPy. Only 0 or None are allowed.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Maximum value.
See Also
--------
Index.min : Return the minimum value in an Index.
Series.max : Return the maximum value in a Series.
DataFrame.max : Return the maximum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.max()
3
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.max()
'c'
For a MultiIndex, the maximum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.max()
('b', 2)
"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return nanops.nanmax(self._values, skipna=skipna)
@doc(op="max", oppose="min", value="largest")
def argmax(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""
Return int position of the {value} value in the Series.
If the {op}imum is achieved in multiple locations,
the first row position is returned.
Parameters
----------
axis : {{None}}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
int
Row position of the {op}imum value.
See Also
--------
Series.arg{op} : Return position of the {op}imum value.
Series.arg{oppose} : Return position of the {oppose}imum value.
numpy.ndarray.arg{op} : Equivalent method for numpy arrays.
Series.idxmax : Return index label of the maximum values.
Series.idxmin : Return index label of the minimum values.
Examples
--------
Consider dataset containing cereal calories
>>> s = pd.Series({{'Corn Flakes': 100.0, 'Almond Delight': 110.0,
... 'Cinnamon Toast Crunch': 120.0, 'Cocoa Puff': 110.0}})
>>> s
Corn Flakes 100.0
Almond Delight 110.0
Cinnamon Toast Crunch 120.0
Cocoa Puff 110.0
dtype: float64
>>> s.argmax()
2
>>> s.argmin()
0
The maximum cereal calories is the third element and
the minimum cereal calories is the first element,
since series is zero-indexed.
"""
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmax_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmax()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmax( # type: ignore[return-value]
delegate, skipna=skipna
)
def min(self, axis=None, skipna: bool = True, *args, **kwargs):
"""
Return the minimum value of the Index.
Parameters
----------
axis : {None}
Dummy argument for consistency with Series.
skipna : bool, default True
Exclude NA/null values when showing the result.
*args, **kwargs
Additional arguments and keywords for compatibility with NumPy.
Returns
-------
scalar
Minimum value.
See Also
--------
Index.max : Return the maximum value of the object.
Series.min : Return the minimum value in a Series.
DataFrame.min : Return the minimum values in a DataFrame.
Examples
--------
>>> idx = pd.Index([3, 2, 1])
>>> idx.min()
1
>>> idx = pd.Index(['c', 'b', 'a'])
>>> idx.min()
'a'
For a MultiIndex, the minimum is determined lexicographically.
>>> idx = pd.MultiIndex.from_product([('a', 'b'), (2, 1)])
>>> idx.min()
('a', 1)
"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return nanops.nanmin(self._values, skipna=skipna)
@doc(argmax, op="min", oppose="max", value="smallest")
def argmin(self, axis=None, skipna=True, *args, **kwargs) -> int:
delegate = self._values
nv.validate_minmax_axis(axis)
skipna = nv.validate_argmin_with_skipna(skipna, args, kwargs)
if isinstance(delegate, ExtensionArray):
if not skipna and delegate.isna().any():
return -1
else:
return delegate.argmin()
else:
# error: Incompatible return value type (got "Union[int, ndarray]", expected
# "int")
return nanops.nanargmin( # type: ignore[return-value]
delegate, skipna=skipna
)
def tolist(self):
"""
Return a list of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
list
See Also
--------
numpy.ndarray.tolist : Return the array as an a.ndim-levels deep
nested list of Python scalars.
"""
if not isinstance(self._values, np.ndarray):
# check for ndarray instead of dtype to catch DTA/TDA
return list(self._values)
return self._values.tolist()
to_list = tolist
def __iter__(self):
"""
Return an iterator of the values.
These are each a scalar type, which is a Python scalar
(for str, int, float) or a pandas scalar
(for Timestamp/Timedelta/Interval/Period)
Returns
-------
iterator
"""
# We are explicitly making element iterators.
if not isinstance(self._values, np.ndarray):
# Check type instead of dtype to catch DTA/TDA
return iter(self._values)
else:
return map(self._values.item, range(self._values.size))
@cache_readonly
def hasnans(self) -> bool:
"""
Return if I have any nans; enables various perf speedups.
"""
return bool(isna(self).any())
def isna(self):
return isna(self._values)
def _reduce(
self,
op,
name: str,
*,
axis=0,
skipna=True,
numeric_only=None,
filter_type=None,
**kwds,
):
"""
Perform the reduction type operation if we can.
"""
func = getattr(self, name, None)
if func is None:
raise TypeError(
f"{type(self).__name__} cannot perform the operation {name}"
)
return func(skipna=skipna, **kwds)
@final
def _map_values(self, mapper, na_action=None):
"""
An internal function that maps values using the input
correspondence (which can be a dict, Series, or function).
Parameters
----------
mapper : function, dict, or Series
The input correspondence object
na_action : {None, 'ignore'}
If 'ignore', propagate NA values, without passing them to the
mapping function
Returns
-------
Union[Index, MultiIndex], inferred
The output of the mapping function applied to the index.
If the function returns a tuple with more than one element
a MultiIndex will be returned.
"""
# we can fastpath dict/Series to an efficient map
# as we know that we are not going to have to yield
# python types
if is_dict_like(mapper):
if isinstance(mapper, dict) and hasattr(mapper, "__missing__"):
# If a dictionary subclass defines a default value method,
# convert mapper to a lookup function (GH #15999).
dict_with_default = mapper
mapper = lambda x: dict_with_default[x]
else:
# Dictionary does not have a default. Thus it's safe to
# convert to an Series for efficiency.
# we specify the keys here to handle the
# possibility that they are tuples
# The return value of mapping with an empty mapper is
# expected to be pd.Series(np.nan, ...). As np.nan is
# of dtype float64 the return value of this method should
# be float64 as well
mapper = create_series_with_explicit_dtype(
mapper, dtype_if_empty=np.float64
)
if isinstance(mapper, ABCSeries):
# Since values were input this means we came from either
# a dict or a series and mapper should be an index
if is_categorical_dtype(self.dtype):
# use the built in categorical series mapper which saves
# time by mapping the categories instead of all values
cat = cast("Categorical", self._values)
return cat.map(mapper)
values = self._values
indexer = mapper.index.get_indexer(values)
new_values = algorithms.take_nd(mapper._values, indexer)
return new_values
# we must convert to python types
if is_extension_array_dtype(self.dtype) and hasattr(self._values, "map"):
# GH#23179 some EAs do not have `map`
values = self._values
if na_action is not None:
raise NotImplementedError
map_f = lambda values, f: values.map(f)
else:
values = self._values.astype(object)
if na_action == "ignore":
map_f = lambda values, f: lib.map_infer_mask(
values, f, isna(values).view(np.uint8)
)
elif na_action is None:
map_f = lib.map_infer
else:
msg = (
"na_action must either be 'ignore' or None, "
f"{na_action} was passed"
)
raise ValueError(msg)
# mapper is a function
new_values = map_f(values, mapper)
return new_values
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
"""
Return a Series containing counts of unique values.
The resulting object will be in descending order so that the
first element is the most frequently-occurring element.
Excludes NA values by default.
Parameters
----------
normalize : bool, default False
If True then the object returned will contain the relative
frequencies of the unique values.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
bins : int, optional
Rather than count values, group them into half-open bins,
a convenience for ``pd.cut``, only works with numeric data.
dropna : bool, default True
Don't include counts of NaN.
Returns
-------
Series
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.count: Number of non-NA elements in a DataFrame.
DataFrame.value_counts: Equivalent method on DataFrames.
Examples
--------
>>> index = pd.Index([3, 1, 2, 3, 4, np.nan])
>>> index.value_counts()
3.0 2
1.0 1
2.0 1
4.0 1
dtype: int64
With `normalize` set to `True`, returns the relative frequency by
dividing all values by the sum of values.
>>> s = pd.Series([3, 1, 2, 3, 4, np.nan])
>>> s.value_counts(normalize=True)
3.0 0.4
1.0 0.2
2.0 0.2
4.0 0.2
dtype: float64
**bins**
Bins can be useful for going from a continuous variable to a
categorical variable; instead of counting unique
apparitions of values, divide the index in the specified
number of half-open bins.
>>> s.value_counts(bins=3)
(0.996, 2.0] 2
(2.0, 3.0] 2
(3.0, 4.0] 1
dtype: int64
**dropna**
With `dropna` set to `False` we can also see NaN index values.
>>> s.value_counts(dropna=False)
3.0 2
1.0 1
2.0 1
4.0 1
NaN 1
dtype: int64
"""
return value_counts(
self,
sort=sort,
ascending=ascending,
normalize=normalize,
bins=bins,
dropna=dropna,
)
def unique(self):
values = self._values
if not isinstance(values, np.ndarray):
result: ArrayLike = values.unique()
if self.dtype.kind in ["m", "M"] and isinstance(self, ABCSeries):
# GH#31182 Series._values returns EA, unpack for backward-compat
if getattr(self.dtype, "tz", None) is None:
result = np.asarray(result)
else:
result = unique1d(values)
return result
def nunique(self, dropna: bool = True) -> int:
"""
Return number of unique elements in the object.
Excludes NA values by default.
Parameters
----------
dropna : bool, default True
Don't include NaN in the count.
Returns
-------
int
See Also
--------
DataFrame.nunique: Method nunique for DataFrame.
Series.count: Count non-NA/null observations in the Series.
Examples
--------
>>> s = pd.Series([1, 3, 5, 7, 7])
>>> s
0 1
1 3
2 5
3 7
4 7
dtype: int64
>>> s.nunique()
4
"""
uniqs = self.unique()
if dropna:
uniqs = remove_na_arraylike(uniqs)
return len(uniqs)
@property
def is_unique(self) -> bool:
"""
Return boolean if values in the object are unique.
Returns
-------
bool
"""
return self.nunique(dropna=False) == len(self)
@property
def is_monotonic(self) -> bool:
"""
Return boolean if values in the object are
monotonic_increasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic
@property
def is_monotonic_increasing(self) -> bool:
"""
Alias for is_monotonic.
"""
# mypy complains if we alias directly
return self.is_monotonic
@property
def is_monotonic_decreasing(self) -> bool:
"""
Return boolean if values in the object are
monotonic_decreasing.
Returns
-------
bool
"""
from pandas import Index
return Index(self).is_monotonic_decreasing
def _memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of the values.
Parameters
----------
deep : bool, default False
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption.
Returns
-------
bytes used
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of the
array.
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False or if used on PyPy
"""
if hasattr(self.array, "memory_usage"):
# https://github.com/python/mypy/issues/1424
# error: "ExtensionArray" has no attribute "memory_usage"
return self.array.memory_usage(deep=deep) # type: ignore[attr-defined]
v = self.array.nbytes
if deep and is_object_dtype(self) and not PYPY:
values = cast(np.ndarray, self._values)
v += lib.memory_usage_of_objects(values)
return v
@doc(
algorithms.factorize,
values="",
order="",
size_hint="",
sort=textwrap.dedent(
"""\
sort : bool, default False
Sort `uniques` and shuffle `codes` to maintain the
relationship.
"""
),
)
def factorize(self, sort: bool = False, na_sentinel: int | None = -1):
return algorithms.factorize(self, sort=sort, na_sentinel=na_sentinel)
_shared_docs[
"searchsorted"
] = """
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted {klass} `self` such that, if the
corresponding elements in `value` were inserted before the indices,
the order of `self` would be preserved.
.. note::
The {klass} *must* be monotonically sorted, otherwise
wrong locations will likely be returned. Pandas does *not*
check this for you.
Parameters
----------
value : array-like or scalar
Values to insert into `self`.
side : {{'left', 'right'}}, optional
If 'left', the index of the first suitable location found is given.
If 'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `self`).
sorter : 1-D array-like, optional
Optional array of integer indices that sort `self` into ascending
order. They are typically the result of ``np.argsort``.
Returns
-------
int or array of int
A scalar or array of insertion points with the
same shape as `value`.
See Also
--------
sort_values : Sort by the values along either axis.
numpy.searchsorted : Similar method from NumPy.
Notes
-----
Binary search is used to find the required insertion points.
Examples
--------
>>> ser = pd.Series([1, 2, 3])
>>> ser
0 1
1 2
2 3
dtype: int64
>>> ser.searchsorted(4)
3
>>> ser.searchsorted([0, 4])
array([0, 3])
>>> ser.searchsorted([1, 3], side='left')
array([0, 2])
>>> ser.searchsorted([1, 3], side='right')
array([1, 3])
>>> ser = pd.Series(pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000']))
>>> ser
0 2000-03-11
1 2000-03-12
2 2000-03-13
dtype: datetime64[ns]
>>> ser.searchsorted('3/14/2000')
3
>>> ser = pd.Categorical(
... ['apple', 'bread', 'bread', 'cheese', 'milk'], ordered=True
... )
>>> ser
['apple', 'bread', 'bread', 'cheese', 'milk']
Categories (4, object): ['apple' < 'bread' < 'cheese' < 'milk']
>>> ser.searchsorted('bread')
1
>>> ser.searchsorted(['bread'], side='right')
array([3])
If the values are not monotonically sorted, wrong locations
may be returned:
>>> ser = pd.Series([2, 1, 3])
>>> ser
0 2
1 1
2 3
dtype: int64
>>> ser.searchsorted(1) # doctest: +SKIP
0 # wrong result, correct would be 1
"""
@doc(_shared_docs["searchsorted"], klass="Index")
def searchsorted(
self,
value: NumpyValueArrayLike,
side: Literal["left", "right"] = "left",
sorter: NumpySorter = None,
) -> npt.NDArray[np.intp] | np.intp:
return algorithms.searchsorted(self._values, value, side=side, sorter=sorter)
def drop_duplicates(self, keep="first"):
duplicated = self._duplicated(keep=keep)
# error: Value of type "IndexOpsMixin" is not indexable
return self[~duplicated] # type: ignore[index]
@final
def _duplicated(
self, keep: Literal["first", "last", False] = "first"
) -> npt.NDArray[np.bool_]:
return duplicated(self._values, keep=keep)
| dsm054/pandas | pandas/core/base.py | Python | bsd-3-clause | 37,574 |
from io import StringIO
from unittest.mock import Mock, patch
# pylint:disable=import-error
import pytest
from _pytest.runner import CallInfo
# pylint:enable=import-error
from flaky import flaky
from flaky import _flaky_plugin
from flaky.flaky_pytest_plugin import (
runner,
FlakyPlugin,
FlakyXdist,
PLUGIN,
)
from flaky.names import FlakyNames
@pytest.fixture
def mock_io(monkeypatch):
mock_string_io = StringIO()
def string_io():
return mock_string_io
monkeypatch.setattr(_flaky_plugin, 'StringIO', string_io)
return mock_string_io
@pytest.fixture
def string_io():
return StringIO()
@pytest.fixture
def flaky_plugin(mock_io):
# pylint:disable=unused-argument
return FlakyPlugin()
@pytest.fixture
def mock_plugin_rerun(monkeypatch, flaky_plugin):
calls = []
def rerun_test(test):
calls.append(test)
monkeypatch.setattr(flaky_plugin, '_mark_test_for_rerun', rerun_test)
def get_calls():
return calls
return get_calls
@pytest.fixture(params=['instance', 'module', 'parent'])
def flaky_test(request, mock_config):
def test_function():
pass
test_owner = Mock()
setattr(test_owner, 'test_method', test_function)
setattr(test_owner, 'obj', test_owner)
kwargs = {request.param: test_owner}
test = MockTestItem(**kwargs)
setattr(test, 'owner', test_owner)
setattr(test, 'config', mock_config)
return test
@pytest.fixture
def call_info(flaky_test):
return MockFlakyCallInfo(flaky_test, 'call')
@pytest.fixture
def mock_error():
return MockError()
class MockError:
def __init__(self):
super().__init__()
self.type = Mock()
self.value = Mock()
self.value.message = 'failed'
self.traceback = Mock()
class MockTestItem:
name = 'test_method'
instance = None
module = None
parent = None
def __init__(self, instance=None, module=None, parent=None):
if instance is not None:
self.instance = instance
if module is not None:
self.module = module
if parent is not None:
self.parent = parent
def runtest(self):
pass
class MockConfig:
def getvalue(self, key):
# pylint:disable=unused-argument,no-self-use
return False
def getoption(self, key, default):
# pylint:disable=unused-argument,no-self-use
return default
@pytest.fixture
def mock_config():
return MockConfig()
class MockFlakyCallInfo(CallInfo):
def __init__(self, item, when):
# pylint:disable=super-init-not-called
# super init not called because it has unwanted side effects
self.when = when
self._item = item
def test_flaky_plugin_report(flaky_plugin, mock_io, string_io):
flaky_report = 'Flaky tests passed; others failed. ' \
'No more tests; that ship has sailed.'
expected_string_io = StringIO()
expected_string_io.write('===Flaky Test Report===\n\n')
expected_string_io.write(flaky_report)
expected_string_io.write('\n===End Flaky Test Report===\n')
mock_io.write(flaky_report)
flaky_plugin.pytest_terminal_summary(string_io)
assert string_io.getvalue() == expected_string_io.getvalue()
@pytest.fixture(params=(
{},
{'flaky_report': ''},
{'flaky_report': 'ŝȁḿҏľȅ ƭȅхƭ'},
))
def mock_xdist_node_workeroutput(request):
return request.param
@pytest.fixture(params=(None, object()))
def mock_xdist_error(request):
return request.param
@pytest.mark.parametrize('assign_workeroutput', (True, False))
def test_flaky_xdist_nodedown(
mock_xdist_node_workeroutput,
assign_workeroutput,
mock_xdist_error
):
flaky_xdist = FlakyXdist(PLUGIN)
node = Mock()
if assign_workeroutput:
node.workeroutput = mock_xdist_node_workeroutput
else:
delattr(node, 'workeroutput')
delattr(node, 'slaveoutput')
mock_stream = Mock(StringIO)
with patch.object(PLUGIN, '_stream', mock_stream):
flaky_xdist.pytest_testnodedown(node, mock_xdist_error)
if assign_workeroutput and 'flaky_report' in mock_xdist_node_workeroutput:
mock_stream.write.assert_called_once_with(
mock_xdist_node_workeroutput['flaky_report'],
)
else:
assert not mock_stream.write.called
_REPORT_TEXT1 = 'Flaky report text'
_REPORT_TEXT2 = 'Ḿőŕȅ ƒľȁƙŷ ŕȅҏőŕƭ ƭȅхƭ'
@pytest.mark.parametrize('initial_report,stream_report,expected_report', (
('', '', ''),
('', _REPORT_TEXT1, _REPORT_TEXT1),
(_REPORT_TEXT1, '', _REPORT_TEXT1),
(_REPORT_TEXT1, _REPORT_TEXT2, _REPORT_TEXT1 + _REPORT_TEXT2),
(_REPORT_TEXT2, _REPORT_TEXT1, _REPORT_TEXT2 + _REPORT_TEXT1),
))
def test_flaky_session_finish_copies_flaky_report(
initial_report,
stream_report,
expected_report,
):
PLUGIN.stream.seek(0)
PLUGIN.stream.truncate()
PLUGIN.stream.write(stream_report)
PLUGIN.config = Mock()
PLUGIN.config.workeroutput = {'flaky_report': initial_report}
PLUGIN.pytest_sessionfinish()
assert PLUGIN.config.workeroutput['flaky_report'] == expected_report
def test_flaky_plugin_can_suppress_success_report(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
):
flaky()(flaky_test)
# pylint:disable=protected-access
flaky_plugin._flaky_success_report = False
# pylint:enable=protected-access
call_info.when = 'call'
actual_plugin_handles_success = flaky_plugin.add_success(flaky_test)
assert actual_plugin_handles_success is False
assert string_io.getvalue() == mock_io.getvalue()
def test_flaky_plugin_raises_errors_in_fixture_setup(
flaky_test,
flaky_plugin,
string_io,
mock_io,
):
"""
Test for Issue #57 - fixtures which raise an error should show up as
test errors.
This test ensures that exceptions occurring when running a test
fixture are copied into the call info's excinfo field.
"""
def error_raising_setup_function(item):
assert item is flaky_test
item.ran_setup = True
return 5 / 0
flaky()(flaky_test)
flaky_test.ihook = Mock()
flaky_test.ihook.pytest_runtest_setup = error_raising_setup_function
flaky_plugin._call_infos[flaky_test] = {} # pylint:disable=protected-access
call_info = runner.call_runtest_hook(flaky_test, 'setup')
assert flaky_test.ran_setup
assert string_io.getvalue() == mock_io.getvalue()
assert call_info.excinfo.type is ZeroDivisionError
class TestFlakyPytestPlugin:
_test_method_name = 'test_method'
def test_flaky_plugin_handles_success(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
):
self._test_flaky_plugin_handles_success(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
)
def test_flaky_plugin_handles_success_for_needs_rerun(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_plugin_rerun,
):
self._test_flaky_plugin_handles_success(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
min_passes=2,
)
assert mock_plugin_rerun()[0] == flaky_test
def test_flaky_plugin_ignores_success_for_non_flaky_test(
self,
flaky_plugin,
flaky_test,
call_info,
string_io,
mock_io,
):
flaky_plugin.add_success(flaky_test)
self._assert_test_ignored(mock_io, string_io, call_info)
def test_flaky_plugin_ignores_failure_for_non_flaky_test(
self,
flaky_plugin,
flaky_test,
call_info,
string_io,
mock_io,
):
flaky_plugin.add_failure(flaky_test, None)
self._assert_test_ignored(mock_io, string_io, call_info)
def test_flaky_plugin_handles_failure(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
mock_plugin_rerun,
):
self._test_flaky_plugin_handles_failure(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
)
assert mock_plugin_rerun()[0] == flaky_test
def test_flaky_plugin_handles_failure_for_no_more_retries(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
):
self._test_flaky_plugin_handles_failure(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
max_runs=1,
)
def test_flaky_plugin_handles_additional_failures(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
mock_plugin_rerun,
):
self._test_flaky_plugin_handles_failure(
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
current_errors=[None],
)
assert mock_plugin_rerun()[0] == flaky_test
def _assert_flaky_attributes_contains(
self,
expected_flaky_attributes,
test,
):
actual_flaky_attributes = self._get_flaky_attributes(test)
assert all(
item in actual_flaky_attributes.items()
for item in expected_flaky_attributes.items()
)
def test_flaky_plugin_exits_after_false_rerun_filter(
self,
flaky_test,
flaky_plugin,
call_info,
string_io,
mock_io,
mock_error,
mock_plugin_rerun,
):
err_tuple = (mock_error.type, mock_error.value, mock_error.traceback)
def rerun_filter(err, name, test, plugin):
assert err == err_tuple
assert name == flaky_test.name
assert test is flaky_test
assert plugin is flaky_plugin
return False
flaky(rerun_filter=rerun_filter)(flaky_test)
call_info.when = 'call'
actual_plugin_handles_failure = flaky_plugin.add_failure(
flaky_test,
mock_error,
)
assert actual_plugin_handles_failure is False
assert not mock_plugin_rerun()
string_io.writelines([
self._test_method_name,
' failed and was not selected for rerun.',
'\n\t',
str(mock_error.type),
'\n\t',
str(mock_error.value),
'\n\t',
str(mock_error.traceback),
'\n',
])
assert string_io.getvalue() == mock_io.getvalue()
@staticmethod
def _assert_test_ignored(mock_io, string_io, call_info):
assert call_info
assert mock_io.getvalue() == string_io.getvalue()
def _test_flaky_plugin_handles_success(
self,
test,
plugin,
info,
stream,
mock_stream,
current_passes=0,
current_runs=0,
max_runs=2,
min_passes=1,
):
flaky(max_runs, min_passes)(test)
setattr(
test,
FlakyNames.CURRENT_PASSES,
current_passes,
)
setattr(
test,
FlakyNames.CURRENT_RUNS,
current_runs,
)
too_few_passes = current_passes + 1 < min_passes
retries_remaining = current_runs + 1 < max_runs
expected_plugin_handles_success = too_few_passes and retries_remaining
info.when = 'call'
actual_plugin_handles_success = plugin.add_success(test)
assert expected_plugin_handles_success == actual_plugin_handles_success
self._assert_flaky_attributes_contains(
{
FlakyNames.CURRENT_PASSES: current_passes + 1,
FlakyNames.CURRENT_RUNS: current_runs + 1,
},
test,
)
stream.writelines([
self._test_method_name,
" passed {} out of the required {} times. ".format(
current_passes + 1, min_passes,
),
])
if expected_plugin_handles_success:
stream.write(
'Running test again until it passes {} times.\n'.format(
min_passes,
),
)
else:
stream.write('Success!\n')
assert stream.getvalue() == mock_stream.getvalue()
def _test_flaky_plugin_handles_failure(
self,
test,
plugin,
info,
stream,
mock_stream,
mock_error,
current_errors=None,
current_passes=0,
current_runs=0,
max_runs=2,
min_passes=1,
rerun_filter=None,
):
flaky(max_runs, min_passes, rerun_filter)(test)
if current_errors is None:
current_errors = [None]
else:
current_errors.append(None)
setattr(
test,
FlakyNames.CURRENT_ERRORS,
current_errors,
)
setattr(
test,
FlakyNames.CURRENT_PASSES,
current_passes,
)
setattr(
test,
FlakyNames.CURRENT_RUNS,
current_runs,
)
too_few_passes = current_passes < min_passes
retries_remaining = current_runs + 1 < max_runs
expected_plugin_handles_failure = too_few_passes and retries_remaining
info.when = 'call'
actual_plugin_handles_failure = plugin.add_failure(
test,
mock_error,
)
assert expected_plugin_handles_failure == actual_plugin_handles_failure
self._assert_flaky_attributes_contains(
{
FlakyNames.CURRENT_RUNS: current_runs + 1,
FlakyNames.CURRENT_ERRORS: current_errors
},
test,
)
if expected_plugin_handles_failure:
stream.writelines([
self._test_method_name,
' failed ({} runs remaining out of {}).'.format(
max_runs - current_runs - 1, max_runs
),
'\n\t',
str(mock_error.type),
'\n\t',
str(mock_error.value),
'\n\t',
str(mock_error.traceback),
'\n',
])
else:
message = ' failed; it passed {0} out of the required {1} times.'
stream.writelines([
self._test_method_name,
message.format(
current_passes,
min_passes
),
'\n\t',
str(mock_error.type),
'\n\t',
str(mock_error.value),
'\n\t',
str(mock_error.traceback),
'\n',
])
assert stream.getvalue() == mock_stream.getvalue()
@staticmethod
def _get_flaky_attributes(test):
actual_flaky_attributes = {
attr: getattr(
test,
attr,
None,
) for attr in FlakyNames()
}
return actual_flaky_attributes
| box/flaky | test/test_pytest/test_flaky_pytest_plugin.py | Python | apache-2.0 | 15,590 |
import asyncio
import discord
import datetime
import pytz
from discord.ext import commands
from Cogs import FuzzySearch
from Cogs import Settings
from Cogs import DisplayName
from Cogs import Message
from Cogs import Nullify
class Time:
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
@commands.command(pass_context=True)
async def settz(self, ctx, *, tz : str = None):
"""Sets your TimeZone - Overrides your UTC offset - and accounts for DST."""
usage = 'Usage: `{}settz [Region/City]`\nYou can get a list of available TimeZones with `{}listtz`'.format(ctx.prefix, ctx.prefix)
if not tz:
self.settings.setGlobalUserStat(ctx.author, "TimeZone", None)
await ctx.channel.send("*{}*, your TimeZone has been removed!".format(DisplayName.name(ctx.author)))
return
# Let's get the timezone list
tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3)
if not tz_list[0]['Ratio'] == 1:
# We didn't find a complete match
msg = "I couldn't find that TimeZone!\n\nMaybe you meant one of the following?\n```"
for tz in tz_list:
msg += tz['Item'] + "\n"
msg += '```'
await ctx.channel.send(msg)
return
# We got a time zone
self.settings.setGlobalUserStat(ctx.author, "TimeZone", tz_list[0]['Item'])
await ctx.channel.send("TimeZone set to *{}!*".format(tz_list[0]['Item']))
@commands.command(pass_context=True)
async def listtz(self, ctx, *, tz_search = None):
"""List all the supported TimeZones in PM."""
if not tz_search:
msg = "__Available TimeZones:__\n\n"
for tz in pytz.all_timezones:
msg += tz + "\n"
else:
tz_list = FuzzySearch.search(tz_search, pytz.all_timezones)
msg = "__Top 3 TimeZone Matches:__\n\n"
for tz in tz_list:
msg += tz['Item'] + "\n"
await Message.say(self.bot, msg, ctx.channel, ctx.author, 1)
@commands.command(pass_context=True)
async def tz(self, ctx, *, member = None):
"""See a member's TimeZone."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member == None:
member = ctx.message.author
if type(member) == str:
# Try to get a user first
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'Couldn\'t find user *{}*.'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# We got one
timezone = self.settings.getGlobalUserStat(member, "TimeZone")
if timezone == None:
msg = '*{}* hasn\'t set their TimeZone yet - they can do so with the `{}settz [Region/City]` command.'.format(DisplayName.name(member), ctx.prefix)
await ctx.channel.send(msg)
return
msg = '*{}\'s* TimeZone is *{}*'.format(DisplayName.name(member), timezone)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def setoffset(self, ctx, *, offset : str = None):
"""Set your UTC offset."""
if offset == None:
self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", None)
msg = '*{}*, your UTC offset has been removed!'.format(DisplayName.name(ctx.message.author))
await ctx.channel.send(msg)
return
offset = offset.replace('+', '')
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
await ctx.channel.send('Offset has to be in +-H:M!')
return
off = "{}:{}".format(hours, minutes)
self.settings.setGlobalUserStat(ctx.message.author, "UTCOffset", off)
msg = '*{}*, your UTC offset has been set to *{}!*'.format(DisplayName.name(ctx.message.author), off)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def offset(self, ctx, *, member = None):
"""See a member's UTC offset."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.guild, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
if member == None:
member = ctx.message.author
if type(member) == str:
# Try to get a user first
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.guild)
if not member:
msg = 'Couldn\'t find user *{}*.'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await ctx.channel.send(msg)
return
# We got one
offset = self.settings.getGlobalUserStat(member, "UTCOffset")
if offset == None:
msg = '*{}* hasn\'t set their offset yet - they can do so with the `{}setoffset [+-offset]` command.'.format(DisplayName.name(member), ctx.prefix)
await ctx.channel.send(msg)
return
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
await ctx.channel.send('Offset has to be in +-H:M!')
return
msg = 'UTC'
# Apply offset
if hours > 0:
# Apply positive offset
msg += '+{}'.format(offset)
elif hours < 0:
# Apply negative offset
msg += '{}'.format(offset)
msg = '*{}\'s* offset is *{}*'.format(DisplayName.name(member), msg)
await ctx.channel.send(msg)
@commands.command(pass_context=True)
async def time(self, ctx, *, offset : str = None):
"""Get UTC time +- an offset."""
timezone = None
if offset == None:
member = ctx.message.author
else:
# Try to get a user first
member = DisplayName.memberForName(offset, ctx.message.guild)
if member:
# We got one
# Check for timezone first
offset = self.settings.getGlobalUserStat(member, "TimeZone")
if offset == None:
offset = self.settings.getGlobalUserStat(member, "UTCOffset")
if offset == None:
msg = '*{}* hasn\'t set their TimeZone or offset yet - they can do so with the `{}setoffset [+-offset]` or `{}settz [Region/City]` command.\nThe current UTC time is *{}*.'.format(DisplayName.name(member), ctx.prefix, ctx.prefix, datetime.datetime.utcnow().strftime("%I:%M %p"))
await ctx.channel.send(msg)
return
# At this point - we need to determine if we have an offset - or possibly a timezone passed
t = self.getTimeFromTZ(offset)
if t == None:
# We did not get an offset
t = self.getTimeFromOffset(offset)
if t == None:
await ctx.channel.send("I couldn't find that TimeZone or offset!")
return
if member:
msg = '{}; where *{}* is, it\'s currently *{}*'.format(t["zone"], DisplayName.name(member), t["time"])
else:
msg = '{} is currently *{}*'.format(t["zone"], t["time"])
# Say message
await ctx.channel.send(msg)
def getTimeFromOffset(self, offset):
offset = offset.replace('+', '')
# Split time string by : and get hour/minute values
try:
hours, minutes = map(int, offset.split(':'))
except Exception:
try:
hours = int(offset)
minutes = 0
except Exception:
return None
# await ctx.channel.send('Offset has to be in +-H:M!')
# return
msg = 'UTC'
# Get the time
t = datetime.datetime.utcnow()
# Apply offset
if hours > 0:
# Apply positive offset
msg += '+{}'.format(offset)
td = datetime.timedelta(hours=hours, minutes=minutes)
newTime = t + td
elif hours < 0:
# Apply negative offset
msg += '{}'.format(offset)
td = datetime.timedelta(hours=(-1*hours), minutes=(-1*minutes))
newTime = t - td
else:
# No offset
newTime = t
return { "zone" : msg, "time" : newTime.strftime("%I:%M %p") }
def getTimeFromTZ(self, tz):
# Assume sanitized zones - as they're pulled from pytz
# Let's get the timezone list
tz_list = FuzzySearch.search(tz, pytz.all_timezones, None, 3)
if not tz_list[0]['Ratio'] == 1:
# We didn't find a complete match
return None
zone = pytz.timezone(tz_list[0]['Item'])
zone_now = datetime.datetime.now(zone)
return { "zone" : tz_list[0]['Item'], "time" : zone_now.strftime("%I:%M %p") } | TheMasterGhost/CorpBot | Cogs/Time.py | Python | mit | 8,457 |
import pyblish.api
@pyblish.api.log
class ValidateDeadlineFramePadding(pyblish.api.Validator):
""" Validates the existence of four digit frame padding
('%04d or ####') in output.
"""
families = ['deadline.render']
label = 'Frame Padding'
optional = True
def process(self, instance):
try:
# skipping the call up project
ftrack_data = instance.context.data('ftrackData')
if ftrack_data['Project']['code'] == 'the_call_up':
return
except:
pass
if '-' in instance.data('deadlineFrames'):
path = instance.data('deadlineData')['job']['OutputFilename0']
msg = "Couldn't find any frame padding string ('%04d or ####')"
msg += " in output on %s" % instance
assert '####' in path or '%04d' in path, msg
| ProgressiveFX/pyblish-pfx | pyblish_pfx/plugins/bin/validate_deadline_frame_padding.py | Python | lgpl-3.0 | 862 |
# Copyright 2019 The dm_env Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
| deepmind/dm_env | examples/__init__.py | Python | apache-2.0 | 662 |
import logging
import warnings
import salt.utils.url
from salt.serializers.yamlex import deserialize
log = logging.getLogger(__name__)
def render(sls_data, saltenv="base", sls="", **kws):
"""
Accepts YAML_EX as a string or as a file object and runs it through the YAML_EX
parser.
:rtype: A Python data structure
"""
with warnings.catch_warnings(record=True) as warn_list:
data = deserialize(sls_data) or {}
for item in warn_list:
log.warning(
"%s found in %s saltenv=%s",
item.message,
salt.utils.url.create(sls),
saltenv,
)
log.debug("Results of SLS rendering: \n%s", data)
return data
| saltstack/salt | salt/renderers/yamlex.py | Python | apache-2.0 | 735 |
# Copyright (c) by it's authors.
# Some rights reserved. See LICENSE, AUTHORS.
from ..services.invoiceGenerator import *
from wallaby.common.document import *
import wallaby.backends.couchdb as couchdb
def run(appPath, options):
if options.db == None and not options.fx:
print "The --db options is required!"
from twisted.internet import reactor
reactor.stop()
return
if options.fx:
options.db = "crm"
options.server = "https://relax.freshx.de"
options.couchPort = "443"
couchdb.Database.setURLForDatabase(options.db, options.server + ":" + options.couchPort)
if options.username is not None and options.password is not None:
couchdb.Database.setLoginForDatabase(options.db, options.username, options.password)
invoiceGeneratorConfig = Document(data={'databaseName':options.db,'templateFile':os.path.join(appPath, "invoices", "template.tex"),'senderDocID':'company'})
invoiceGeneratorConfig.set('flag.key', 'generateInvoicePDF')
invoiceGeneratorConfig.set('flag.generate', 'generate')
invoiceGeneratorConfig.set('flag.inProgress', 'inProgress')
invoiceGeneratorConfig.set('flag.error', 'error')
invoiceGeneratorConfig.set('flag.success', 'success')
invoiceGeneratorConfig.set('view.inProgress', '_design/couchapp/_view/invoiceGenerationInProgress')
invoiceGeneratorConfig.set('view.pending', '_design/couchapp/_view/pendingInvoiceGeneration')
invoiceGenerator = InvoiceGenerator(invoiceGeneratorConfig)
from twisted.internet import reactor
reactor.callLater(0, couchdb.Database.getDatabase(options.db).changes)
reactor.callLater(0, invoiceGenerator.initialize)
| FreshXOpenSource/wallaby-app-crm | wallaby/apps/crm/scripts/invoiceGenerator.py | Python | bsd-2-clause | 1,712 |
import optparse
import os
import shutil
import sys
import unittest
from itertools import izip
from . import util
from . import stats
#=============================================================================
# common utility functions for testing
def clean_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
def makedirs(path):
if not os.path.exists(path):
os.makedirs(path)
def make_clean_dir(path):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
def fequal(f1, f2, rel=.0001, eabs=1e-12):
"""assert whether two floats are approximately equal"""
if f1 == f2:
return
if f2 == 0:
err = f1
elif f1 == 0:
err = f2
else:
err = abs(f1 - f2) / abs(f2)
x = (err < rel)
if abs(f1 - f2) < eabs:
return
assert x, "%e != %e [rel=%f, abs=%f]" % (f1, f2, err, abs(f1 - f2))
def fequals(f1, f2, rel=.0001, eabs=1e-12):
for i, j in izip(f1, f2):
fequal(i, j, rel=rel, eabs=eabs)
def integrate(func, a, b, step):
return sum(func(i) * step for i in util.frange(a, b, step))
def eq_sample_pdf(samples, pdf,
ndivs=20, start=-util.INF, end=util.INF, pval=.05,
step=None):
"""Asserts a sample matches a probability density distribution"""
if step is None:
step = (max(samples) - min(samples)) / float(ndivs)
cdf = lambda x, params: integrate(pdf, x, x+step, step/10.0)
chi2, p = stats.chi_square_fit(cdf, [], samples,
ndivs=ndivs, start=start, end=end)
assert p >= pval, p
def eq_sample_pmf(samples, pmf, pval=.05):
"""Asserts a sample matches a probability mass distribution"""
import scipy.stats
hist = util.hist_dict(samples)
total = sum(hist.itervalues())
observed = []
expected = []
for sample, count in hist.iteritems():
if count >= 5:
observed.append(count)
expected.append(pmf(sample) * total)
chi2, p = scipy.stats.chisquare(
scipy.array(observed), scipy.array(expected))
assert p >= pval, p
_do_pause = True
def pause(text="press enter to continue: "):
"""Pause until the user presses enter"""
if _do_pause:
sys.stderr.write(text)
raw_input()
def set_pausing(enabled=True):
global _do_pause
_do_pause = enabled
#=============================================================================
# common unittest functions
def list_tests(stack=0):
# get environment
var = __import__("__main__").__dict__
for name, obj in var.iteritems():
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
for attr in dir(obj):
if attr.startswith("test"):
print "%s.%s" % (name, attr),
doc = getattr(obj, attr).__doc__
if doc:
print "--", doc.split("\n")[0]
else:
print
def test_main():
o = optparse.OptionParser()
o.add_option("-v", "--verbose", action="store_true",
help="Verbose output")
o.add_option("-q", "--quiet", action="store_true",
help="Minimal output")
o.add_option("-l", "--list_tests", action="store_true")
o.add_option("-p", "--pause", action="store_true")
conf, args = o.parse_args()
if conf.list_tests:
list_tests(1)
return
if conf.pause:
set_pausing(True)
else:
set_pausing(False)
# process unittest arguments
argv = [sys.argv[0]]
if conf.verbose:
argv.append("-v")
if conf.quiet:
argv.append("-q")
argv.extend(args)
# run unittest
unittest.main(argv=argv)
| wutron/compbio | rasmus/testing.py | Python | mit | 3,789 |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 4)
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print "Mining blocks..."
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 500})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 500})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We cant find the block from a fully-spent tx
# Doesn't apply to TDC Core - we have txindex always on
# assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
| trading-dev/trading-coin | qa/rpc-tests/merkle_blocks.py | Python | mit | 4,051 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_monitor_https
short_description: Manages F5 BIG-IP GTM https monitors
description:
- Manages F5 BIG-IP GTM https monitors.
version_added: 2.6
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp)
parent on the C(Common) partition.
default: /Common/https
send:
description:
- The send string for the monitor call.
- When creating a new monitor, if this parameter is not provided, the
default of C(GET /\r\n) will be used.
receive:
description:
- The receive string for the monitor call.
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, then a C(port) number must be specified.
port:
description:
- Port address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'. Note that if specifying an IP address, a value between 1 and 65535
must be specified.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run.
- If this parameter is not provided when creating a new monitor, then
the default value will be 30.
- This value B(must) be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second.
- If this parameter is not provided when creating a new monitor, then the
default value will be 120.
partition:
description:
- Device partition to manage resources on.
default: Common
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
probe_timeout:
description:
- Specifies the number of seconds after which the system times out the probe request
to the system.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(5).
ignore_down_response:
description:
- Specifies that the monitor allows more than one probe attempt per interval.
- When C(yes), specifies that the monitor ignores down responses for the duration of
the monitor timeout. Once the monitor timeout is reached without the system receiving
an up response, the system marks the object down.
- When C(no), specifies that the monitor immediately marks an object down when it
receives a down response.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(no).
type: bool
transparent:
description:
- Specifies whether the monitor operates in transparent mode.
- A monitor in transparent mode directs traffic through the associated pool members
or nodes (usually a router or firewall) to the aliased destination (that is, it
probes the C(ip)-C(port) combination specified in the monitor).
- If the monitor cannot successfully reach the aliased destination, the pool member
or node through which the monitor traffic was sent is marked down.
- When creating a new monitor, if this parameter is not provided, then the default
value will be C(no).
type: bool
reverse:
description:
- Instructs the system to mark the target resource down when the test is successful.
This setting is useful, for example, if the content on your web site home page is
dynamic and changes frequently, you may want to set up a reverse ECV service check
that looks for the string Error.
- A match for this string means that the web server was down.
- To use this option, you must specify values for C(send) and C(receive).
type: bool
target_username:
description:
- Specifies the user name, if the monitored target requires authentication.
target_password:
description:
- Specifies the password, if the monitored target requires authentication.
update_password:
description:
- C(always) will update passwords if the C(target_password) is specified.
- C(on_create) will only set the password for newly created monitors.
default: always
choices:
- always
- on_create
cipher_list:
description:
- Specifies the list of ciphers for this monitor.
- The items in the cipher list are separated with the colon C(:) symbol.
- When creating a new monitor, if this parameter is not specified, the default
list is C(DEFAULT:+SHA:+3DES:+kEDH).
compatibility:
description:
- Specifies, when enabled, that the SSL options setting (in OpenSSL) is set to B(all).
- When creating a new monitor, if this value is not specified, the default is
C(yes)
type: bool
client_cert:
description:
- Specifies a fully-qualified path for a client certificate that the monitor sends to
the target SSL server.
client_key:
description:
- Specifies a key for a client certificate that the monitor sends to the target SSL server.
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create a GTM HTTPS monitor
bigip_gtm_monitor_https:
name: my_monitor
ip: 1.1.1.1
port: 80
send: my send string
receive: my receive string
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Remove HTTPS Monitor
bigip_gtm_monitor_https:
name: my_monitor
state: absent
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Add HTTPS monitor for all addresses, port 514
bigip_gtm_monitor_https:
name: my_monitor
server: lb.mydomain.com
user: admin
port: 514
password: secret
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: https
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
port:
description: The new port the monitor checks the resource on.
returned: changed
type: string
sample: 8080
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
ignore_down_response:
description: Whether to ignore the down response or not.
returned: changed
type: bool
sample: True
send:
description: The new send string for this monitor.
returned: changed
type: string
sample: tcp string to send
receive:
description: The new receive string for this monitor.
returned: changed
type: string
sample: tcp string to receive
probe_timeout:
description: The new timeout in which the system will timeout the monitor probe.
returned: changed
type: int
sample: 10
reverse:
description: The new value for whether the monitor operates in reverse mode.
returned: changed
type: bool
sample: False
transparent:
description: The new value for whether the monitor operates in transparent mode.
returned: changed
type: bool
sample: False
cipher_list:
description: The new value for the cipher list.
returned: changed
type: string
sample: +3DES:+kEDH
compatibility:
description: The new SSL compatibility setting.
returned: changed
type: bool
sample: True
client_cert:
description: The new client cert setting.
returned: changed
type: string
sample: /Common/default
client_key:
description: The new client key setting.
returned: changed
type: string
sample: /Common/default
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.icontrol import module_provisioned
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.icontrol import module_provisioned
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'defaultsFrom': 'parent',
'ignoreDownResponse': 'ignore_down_response',
'probeTimeout': 'probe_timeout',
'recv': 'receive',
'username': 'target_username',
'password': 'target_password',
'cipherlist': 'cipher_list',
'cert': 'client_cert',
'key': 'client_key',
}
api_attributes = [
'defaultsFrom',
'interval',
'timeout',
'destination',
'transparent',
'probeTimeout',
'ignoreDownResponse',
'reverse',
'send',
'recv',
'username',
'password',
'cipherlist',
'compatibility',
'cert',
'key',
]
returnables = [
'parent',
'ip',
'port',
'interval',
'timeout',
'transparent',
'probe_timeout',
'ignore_down_response',
'send',
'receive',
'reverse',
'cipher_list',
'compatibility',
'client_cert',
'client_key',
]
updatables = [
'destination',
'interval',
'timeout',
'transparent',
'probe_timeout',
'ignore_down_response',
'send',
'receive',
'reverse',
'ip',
'port',
'target_username',
'target_password',
'cipher_list',
'compatibility',
'client_cert',
'client_key',
]
class ApiParameters(Parameters):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
try:
return int(port)
except ValueError:
return port
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
if self._values['ignore_down_response'] == 'disabled':
return False
return True
@property
def transparent(self):
if self._values['transparent'] is None:
return None
if self._values['transparent'] == 'disabled':
return False
return True
@property
def reverse(self):
if self._values['reverse'] is None:
return None
if self._values['reverse'] == 'disabled':
return False
return True
@property
def compatibility(self):
if self._values['compatibility'] is None:
return None
if self._values['compatibility'] == 'disabled':
return False
return True
class ModuleParameters(Parameters):
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def parent(self):
if self._values['parent'] is None:
return None
result = fq_name(self.partition, self._values['parent'])
return result
@property
def port(self):
if self._values['port'] is None:
return None
elif self._values['port'] == '*':
return '*'
return int(self._values['port'])
@property
def destination(self):
if self.ip is None and self.port is None:
return None
destination = '{0}:{1}'.format(self.ip, self.port)
return destination
@destination.setter
def destination(self, value):
ip, port = value.split(':')
self._values['ip'] = ip
self._values['port'] = port
@property
def probe_timeout(self):
if self._values['probe_timeout'] is None:
return None
return int(self._values['probe_timeout'])
@property
def type(self):
return 'https'
@property
def client_cert(self):
if self._values['client_cert'] is None:
return None
if self._values['client_cert'] == '':
return ''
result = fq_name(self.partition, self._values['client_cert'])
if not result.endswith('.crt'):
result += '.crt'
return result
@property
def client_key(self):
if self._values['client_key'] is None:
return None
if self._values['client_key'] == '':
return ''
result = fq_name(self.partition, self._values['client_key'])
if not result.endswith('.key'):
result += '.key'
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def transparent(self):
if self._values['transparent'] is None:
return None
elif self._values['transparent'] is True:
return 'enabled'
return 'disabled'
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] is None:
return None
elif self._values['ignore_down_response'] is True:
return 'enabled'
return 'disabled'
@property
def reverse(self):
if self._values['reverse'] is None:
return None
elif self._values['reverse'] is True:
return 'enabled'
return 'disabled'
@property
def compatibility(self):
if self._values['compatibility'] is None:
return None
elif self._values['compatibility'] is True:
return 'enabled'
return 'disabled'
class ReportableChanges(Changes):
@property
def ip(self):
ip, port = self._values['destination'].split(':')
return ip
@property
def port(self):
ip, port = self._values['destination'].split(':')
return int(port)
@property
def transparent(self):
if self._values['transparent'] == 'enabled':
return True
return False
@property
def ignore_down_response(self):
if self._values['ignore_down_response'] == 'enabled':
return True
return False
@property
def reverse(self):
if self._values['reverse'] == 'enabled':
return True
return False
@property
def compatibility(self):
if self._values['compatibility'] == 'enabled':
return True
return False
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None and self.want.port is None:
return None
if self.want.port is None:
self.want.update({'port': self.have.port})
if self.want.ip is None:
self.want.update({'ip': self.have.ip})
if self.want.port in [None, '*'] and self.want.ip != '*':
raise F5ModuleError(
"Specifying an IP address requires that a port number be specified"
)
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
@property
def target_password(self):
if self.want.target_password != self.have.target_password:
if self.want.update_password == 'always':
result = self.want.target_password
return result
@property
def client_cert(self):
if self.have.client_cert is None and self.want.client_cert == '':
return None
if self.have.client_cert != self.want.client_cert:
return self.want.client_cert
@property
def client_key(self):
if self.have.client_key is None and self.want.client_key == '':
return None
if self.have.client_key != self.want.client_key:
return self.want.client_key
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 120})
if self.want.interval is None:
self.want.update({'interval': 30})
if self.want.probe_timeout is None:
self.want.update({'probe_timeout': 5})
if self.want.ip is None:
self.want.update({'ip': '*'})
if self.want.port is None:
self.want.update({'port': '*'})
if self.want.ignore_down_response is None:
self.want.update({'ignore_down_response': False})
if self.want.transparent is None:
self.want.update({'transparent': False})
if self.want.send is None:
self.want.update({'send': 'GET /\r\n'})
if self.want.cipher_list is None:
self.want.update({'cipher_list': 'DEFAULT:+SHA:+3DES:+kEDH'})
if self.want.compatibility is None:
self.want.update({'compatibility': True})
def exec_module(self):
if not module_provisioned(self.client, 'gtm'):
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_default_creation_values()
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['selfLink']
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/gtm/monitor/https/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name),
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/https'),
send=dict(),
receive=dict(),
ip=dict(),
port=dict(type='int'),
interval=dict(type='int'),
timeout=dict(type='int'),
ignore_down_response=dict(type='bool'),
transparent=dict(type='bool'),
probe_timeout=dict(type='int'),
reverse=dict(type='bool'),
target_username=dict(),
target_password=dict(no_log=True),
cipher_list=dict(),
compatibility=dict(type='bool'),
client_cert=dict(),
client_key=dict(),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
| yfried/ansible | lib/ansible/modules/network/f5/bigip_gtm_monitor_https.py | Python | gpl-3.0 | 30,003 |
from task_board.tasks.models import Task
def create_sample_tasks(owner=None, count=15):
assert owner, "Please specify the owner for tasks."
for idx in range(count):
task_data = dict(
name='Task number %d' % (idx + 1),
description='Cras sit amet nibh libero, in gravida nulla. Nulla vel metus scelerisque ante '
'sollicitudin commodo. Cras purus odio, vestibulum in vulputate at, tempus viverra '
'turpis. Fusce condimentum nunc ac nisi vulputate fringilla. Donec lacinia congue '
'felis in faucibus.',
created_by=owner,
)
Task.objects.create(**task_data)
| AlexanderKaluzhny/taskboard | task_board/tasks/utils.py | Python | mit | 699 |
#!/usr/bin/python
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
import libcloud.security
libcloud.security.VERIFY_SSL_CERT = False
auth_username = 'admin'
auth_password = 'thepassword'
auth_url = 'http://172.18.0.10:5000'
project_name = 'admin'
provider = get_driver(Provider.OPENSTACK)
conn = provider(auth_username,
auth_password,
ex_force_auth_url=auth_url,
ex_force_auth_version='2.0_password',
ex_force_service_region='regionOne',
ex_tenant_name=project_name)
flavors = conn.list_sizes()
print ("******** Flavor **********")
for flavor in flavors:
print 'Name: %10s\tID: %40s\tRAM: %4s\tDisk: %5s\tVCPUS: %2s' % (flavor.name, flavor.id, flavor.ram, flavor.disk, flavor.vcpus)
| kionetworks/openstack-api-scripts | libcloud/flavors.py | Python | apache-2.0 | 819 |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "v"
cfg.parentdir_prefix = "microdrop-"
cfg.versionfile_source = "microdrop/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
| wheeler-microfluidics/microdrop | microdrop/_version.py | Python | bsd-3-clause | 18,457 |
from abc import ABCMeta, abstractmethod
from opencog_b.python.blending.util.blend_logger import blend_log
from opencog_b.python.blending.util.general_util import *
from opencog_b.python.blending.util.blend_config import BlendConfig
__author__ = 'DongMin Kim'
class BaseMaker(object):
"""
:type a: opencog.atomspace_details.AtomSpace
"""
__metaclass__ = ABCMeta
Status = enum_simulate(
'SUCCESS_MAKE',
'IN_PROCESS',
'UNKNOWN_ERROR',
'PARAMETER_ERROR',
'NOT_ENOUGH_ATOMS'
)
def __init__(self, a):
self.a = a
self.last_status = self.Status.UNKNOWN_ERROR
self.make_default_config()
self.ret = None
def __str__(self):
return self.__class__.__name__
def is_succeeded(self):
return (lambda x: True
if x == BaseMaker.Status.SUCCESS_MAKE
else False
)(self.last_status)
def make_default_config(self):
BlendConfig().update(self.a, "make-atom-prefix", "")
BlendConfig().update(self.a, "make-atom-separator", "-")
BlendConfig().update(self.a, "make-atom-postfix", "")
@abstractmethod
def new_blend_make_impl(self, decided_atoms, config_base):
"""
:param decided_atoms: list
"""
raise NotImplementedError("Please implement this method.")
def new_blend_make(self, decided_atoms, config_base):
self.last_status = self.Status.IN_PROCESS
try:
self.new_blend_make_impl(decided_atoms, config_base)
except UserWarning as e:
blend_log("Skipping make, caused by '" + str(e) + "'")
blend_log(
"Last status is '" +
self.Status.reverse_mapping[self.last_status] +
"'"
)
raise e
if self.last_status == self.Status.IN_PROCESS:
self.last_status = self.Status.SUCCESS_MAKE
return self.ret
| kim135797531/opencog-python-blending | opencog_b/python/blending/maker/base_maker.py | Python | agpl-3.0 | 1,977 |
#!/usr/bin/env python2
#
# Wikipedia queries generator
# developed by Alex Mayer (2014)
#
import re
import helpers
import urllib2
from random import randint, shuffle
from datetime import date
from bingRewards import BingRewards
MONTH_NAMES = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"]
QUERY_URL = "https://en.wikipedia.org/wiki/{0}_{1}?action=raw".format(MONTH_NAMES[date.today().month - 1], date.today().strftime("%d").strip("0"))
WIKIPEDIA_SECTION_PATTERN = re.compile(r'==([^\n]+)==\n(.+?)\n\n', re.S)
WIKIPEDIA_LINK_PATTERN = re.compile(r'\[\[(?:[^|\]]*\|)?([a-zA-Z\s]+?)\]\]')
"""
higher weight = higher priority(relativly)
assuming "events" weight is 4 and "births" weight is 1:
if "events" section has 20 links and "births" section has 20 links
"events" section will be favored 4 to 1
with the same weights:
if "events" has 20 links and "births" has 40 links
"events" will be favored 2 to 1
How the math works:
"events" weight is 4 and has 20 links
each link will be added to the search pool 4 times generating 80 event links
after a link is chosen from the pool to be returned as a query
all other instances of the link are removed
"""
DEFAULT_SECTION_WEIGHT = 2
DEFAULT_SECTION_WEIGHTS = {
"events": 4,
"holidays and observances": 4,
"births": 1,
"deaths": 1
}
class queryGenerator:
def __init__(self, br):
"""
param br is a pointer to the calling class bingRewards (used for variables)
"""
if br is None or not isinstance(br, BingRewards):
raise ValueError("br is not set or is not an instance of BingRewards")
self.bingRewards = br
def generateQueries(self, queriesToGenerate, history, maxQueryLen = None):
"""
Parses the current days wikipedia.com page and generates queries
from the links on the page.
param queriesToGenerate the number of queries to return
param history a set of previous searches
param maxQueryLen the maximum query length
returns a set of queries - self.queries
"""
if queriesToGenerate <= 0:
raise ValueError("numberOfQueries should be more than 0, but it is %d" % queriesToGenerate)
if history is None or not isinstance(history, set):
raise ValueError("history is not set or not an instance of set")
request = urllib2.Request(url = QUERY_URL, headers = self.bingRewards.httpHeaders)
with self.bingRewards.opener.open(request) as response:
page = helpers.getResponseBody(response)
# check that the page has content
if page.strip() == "":
raise ValueError("Wikipedia page is empty")
# convert history to lowercase
history = [x.strip().lower() for x in history]
# get sections of the page (ie. Events, Births, Deaths, Holidays)
rawSections = WIKIPEDIA_SECTION_PATTERN.findall(page)
if len(rawSections) == 0:
raise ValueError("Wikipedia page is empty")
# a list of search terms
searchTerms = []
for sectionName, conts in rawSections:
section = sectionName.lower()
# skip unwanted sections
if section in ["external links"]:
continue
# extract search terms
rawTerms = WIKIPEDIA_LINK_PATTERN.findall(conts)
# skip empty sections
if len(rawTerms) == 0:
continue
terms = []
# check each term against history
for term in rawTerms:
# humans search in lowercase
term = term.lower()
# check if the term was searched for before
if term not in history:
terms.append(term)
# entire section is in history... skip it
if len(terms) == 0:
continue
# section search weight
weight = DEFAULT_SECTION_WEIGHT
if section in DEFAULT_SECTION_WEIGHTS:
weight = DEFAULT_SECTION_WEIGHTS[section]
# add each search term list the number of weighted times
for i in range(weight):
searchTerms.extend(terms)
# randomize the search terms for good measure
shuffle(searchTerms)
queries = set()
queriesNeeded = queriesToGenerate
# loop until we have enough queries or run out of things to search for
while queriesNeeded > 0 and len(searchTerms) > 0:
ri = randint(0, len(searchTerms) - 1)
# add current term to queries
queries.add(searchTerms[ri])
# remove each instance of current term from searchTerms
searchTerms = filter(lambda x: x != searchTerms[ri], searchTerms)
queriesNeeded -= 1
return queries
| amayer5125/BingRewards | pkg/queryGenerators/wikipedia.py | Python | lgpl-3.0 | 4,912 |
from bs4 import BeautifulSoup
from couchpotato.core.helpers.encoding import tryUrlencode
from couchpotato.core.helpers.variable import tryInt
from couchpotato.core.logger import CPLog
from couchpotato.core.providers.nzb.base import NZBProvider
from couchpotato.environment import Env
import re
import traceback
log = CPLog(__name__)
class BinSearch(NZBProvider):
urls = {
'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s',
'detail': 'https://www.binsearch.info%s',
'search': 'https://www.binsearch.info/index.php?%s',
}
http_time_between_calls = 4 # Seconds
def _search(self, movie, quality, results):
arguments = tryUrlencode({
'q': movie['library']['identifier'],
'm': 'n',
'max': 400,
'adv_age': Env.setting('retention', 'nzb'),
'adv_sort': 'date',
'adv_col': 'on',
'adv_nfo': 'on',
'minsize': quality.get('size_min'),
'maxsize': quality.get('size_max'),
})
data = self.getHTMLData(self.urls['search'] % arguments)
if data:
try:
html = BeautifulSoup(data)
main_table = html.find('table', attrs = {'id':'r2'})
if not main_table:
return
items = main_table.find_all('tr')
for row in items:
title = row.find('span', attrs = {'class':'s'})
if not title: continue
nzb_id = row.find('input', attrs = {'type':'checkbox'})['name']
info = row.find('span', attrs = {'class':'d'})
size_match = re.search('size:.(?P<size>[0-9\.]+.[GMB]+)', info.text)
age = 0
try: age = re.search('(?P<size>\d+d)', row.find_all('td')[-1:][0].text).group('size')[:-1]
except: pass
def extra_check(item):
parts = re.search('available:.(?P<parts>\d+)./.(?P<total>\d+)', info.text)
total = tryInt(parts.group('total'))
parts = tryInt(parts.group('parts'))
if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower())):
log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total))
return False
if 'requires password' in info.text.lower():
log.info2('Wrong: \'%s\', passworded', (item['name']))
return False
return True
results.append({
'id': nzb_id,
'name': title.text,
'age': tryInt(age),
'size': self.parseSize(size_match.group('size')),
'url': self.urls['download'] % nzb_id,
'detail_url': self.urls['detail'] % info.find('a')['href'],
'extra_check': extra_check
})
except:
log.error('Failed to parse HTML response from BinSearch: %s', traceback.format_exc())
def download(self, url = '', nzb_id = ''):
data = {
'action': 'nzb',
nzb_id: 'on'
}
try:
return self.urlopen(url, data = data, show_error = False)
except:
log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc()))
return 'try_next'
| rooi/CouchPotatoServer | couchpotato/core/providers/nzb/binsearch/main.py | Python | gpl-3.0 | 3,668 |
from __future__ import division
from random import choice
from moi import r_client
from tornado.gen import coroutine, Task
from qiita_db.util import get_count
from qiita_db.study import Study
from qiita_db.util import get_lat_longs
from .base_handlers import BaseHandler
class StatsHandler(BaseHandler):
def _get_stats(self, callback):
# check if the key exists in redis
lats = r_client.lrange('stats:sample_lats', 0, -1)
longs = r_client.lrange('stats:sample_longs', 0, -1)
if not (lats or longs):
# if we don't have them, then fetch from disk and add to the
# redis server with a 24-hour expiration
lat_longs = get_lat_longs()
with r_client.pipeline() as pipe:
for latitude, longitude in lat_longs:
# storing as a simple data structure, hopefully this
# doesn't burn us later
pipe.rpush('stats:sample_lats', latitude)
pipe.rpush('stats:sample_longs', longitude)
# set the key to expire in 24 hours, so that we limit the
# number of times we have to go to the database to a reasonable
# amount
r_client.expire('stats:sample_lats', 86400)
r_client.expire('stats:sample_longs', 86400)
pipe.execute()
else:
# If we do have them, put the redis results into the same structure
# that would come back from the database
longs = [float(x) for x in longs]
lats = [float(x) for x in lats]
lat_longs = zip(lats, longs)
# Get the number of studies
num_studies = get_count('qiita.study')
# Get the number of samples
num_samples = len(lats)
# Get the number of users
num_users = get_count('qiita.qiita_user')
callback([num_studies, num_samples, num_users, lat_longs])
@coroutine
def get(self):
num_studies, num_samples, num_users, lat_longs = \
yield Task(self._get_stats)
# Pull a random public study from the database
public_studies = Study.get_by_status('public')
study = Study(choice(list(public_studies))) if public_studies else None
if study is None:
random_study_info = None
random_study_title = None
random_study_id = None
else:
random_study_info = study.info
random_study_title = study.title
random_study_id = study.id
self.render('stats.html',
num_studies=num_studies, num_samples=num_samples,
num_users=num_users, lat_longs=lat_longs,
random_study_info=random_study_info,
random_study_title=random_study_title,
random_study_id=random_study_id)
| RNAer/qiita | qiita_pet/handlers/stats.py | Python | bsd-3-clause | 2,904 |
# Copyright 2012,2013 Christoph Reiter
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import sys
from typing import Type, List
from optparse import OptionParser
from quodlibet import _
from quodlibet.formats import MusicFile, AudioFileError
from quodlibet.util import print_
class CommandError(Exception):
pass
class Command(object):
"""Base class for commands.
Subclasses can override _add_options() and _execute()
"""
NAME = ""
DESCRIPTION = ""
USAGE = ""
COMMANDS: "List[Type[Command]]" = []
@classmethod
def register(cls, cmd_cls):
cls.COMMANDS.append(cmd_cls)
cls.COMMANDS.sort(key=lambda c: c.NAME)
def __init__(self, main_cmd, options=None):
self._main_cmd = main_cmd
usage = "%s %s %s" % (main_cmd, self.NAME, self.USAGE)
self.__parser = OptionParser(usage=usage, description=self.DESCRIPTION)
if options is None:
options = self.__parser.parse_args([])[0]
self.__options = options
self._add_options(self.__parser)
def _add_options(self, parser):
"""Override to add options to the parser"""
pass
@property
def verbose(self):
return self.__options.verbose
@verbose.setter
def verbose(self, value):
self.__options.verbose = bool(value)
def log(self, text):
"""Print output if --verbose was passed"""
if self.verbose:
return print_(text, file=sys.stderr)
def load_song(self, path):
"""Load a song. Raises CommandError in case it fails"""
self.log("Load file: %r" % path)
song = MusicFile(path)
if not song:
raise CommandError(_("Failed to load file: %r") % path)
return song
def save_songs(self, songs):
"""Save all passed songs"""
self.log("Saving songs...")
for song in songs:
try:
song.write()
except AudioFileError as e:
raise CommandError(e)
def _execute(self, options, args):
"""Override to execute something"""
raise NotImplementedError
def print_help(self, file=None):
"""Print the help information about the command"""
if file is None:
file = sys.stdout
self.__parser.print_help(file=file)
def execute(self, args):
"""Execute the command"""
options, args = self.__parser.parse_args(args)
self._execute(options, args)
| ptitjes/quodlibet | quodlibet/operon/base.py | Python | gpl-2.0 | 2,693 |
import unittest
import mock
import numpy
import chainer
from chainer import cuda
import chainer.functions as F
from chainer.functions.connection import deconvolution_2d
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.testing import parameterize
from chainer.utils import conv
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return (x, x)
def _asfortranarray(x):
xp = cuda.get_array_module(x)
if xp is numpy:
return xp.asfortranarray(x)
else:
return xp.ascontiguousarray(x.T).T
@parameterize(
*testing.product({
'in_channels': [3],
'out_channels': [2],
'wscale': [1],
'ksize': [3],
'stride': [1, 2],
'pad': [1],
'nobias': [True, False],
'use_cudnn': [True, False],
'test_outsize': [True, False],
'c_contiguous': [True, False],
})
)
class TestDeconvolution2DFunction(unittest.TestCase):
def setUp(self, use_cudnn=True):
kh, kw = _pair(self.ksize)
sh, sw = _pair(self.stride)
ph, pw = _pair(self.pad)
self.W = numpy.random.normal(
0, self.wscale * numpy.sqrt(1. / (kh * kw * self.in_channels)),
(self.in_channels, self.out_channels, kh, kw)
).astype(numpy.float32)
self.b = None if self.nobias else numpy.random.uniform(
-1, 1, self.out_channels).astype(numpy.float32)
N = 2
inh, inw = 4, 3
outh = conv.get_deconv_outsize(inh, kh, sh, ph)
outw = conv.get_deconv_outsize(inw, kw, sw, pw)
self.outsize = (outh, outw) if self.test_outsize else None
self.x = numpy.random.uniform(
-1, 1, (N, self.in_channels, inh, inw)).astype(numpy.float32)
self.gy = numpy.random.uniform(
-1, 1, (N, self.out_channels, outh, outw)).astype(numpy.float32)
@attr.cudnn
def test_forward_consistency(self):
x_cpu = chainer.Variable(self.x)
W_cpu = chainer.Variable(self.W)
b_cpu = None if self.nobias else chainer.Variable(self.b)
y_cpu = F.deconvolution_2d(
x_cpu, W_cpu, b_cpu, stride=self.stride, pad=self.pad,
outsize=self.outsize, use_cudnn=self.use_cudnn)
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
W_gpu = chainer.Variable(cuda.to_gpu(self.W))
b_gpu = None if self.nobias else chainer.Variable(
cuda.to_gpu(self.b))
y_gpu = F.deconvolution_2d(
x_gpu, W_gpu, b_gpu, stride=self.stride, pad=self.pad,
outsize=self.outsize, use_cudnn=self.use_cudnn)
gradient_check.assert_allclose(y_cpu.data, y_gpu.data.get())
@attr.gpu
def test_forward_consistency_im2col(self):
self.test_forward_consistency()
def check_backward(self, x_data, W_data, b_data, y_grad):
if not self.c_contiguous:
x_data = _asfortranarray(x_data)
W_data = _asfortranarray(W_data)
y_grad = _asfortranarray(y_grad)
self.assertFalse(x_data.flags.c_contiguous)
self.assertFalse(W_data.flags.c_contiguous)
self.assertFalse(y_grad.flags.c_contiguous)
if b_data is not None:
xp = cuda.get_array_module(b_data)
b = xp.empty((len(b_data) * 2,), dtype=self.b.dtype)
b[::2] = b_data
b_data = b[::2]
self.assertFalse(b_data.flags.c_contiguous)
args = (x_data, W_data)
if b_data is not None:
args = args + (b_data,)
gradient_check.check_backward(
deconvolution_2d.Deconvolution2DFunction(
self.stride, self.pad, self.outsize, self.use_cudnn),
args, y_grad, eps=1e-2)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.W, self.b, self.gy)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
b = None if self.b is None else cuda.to_gpu(self.b)
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.W),
b, cuda.to_gpu(self.gy))
@testing.parameterize(
{'use_cudnn': True},
{'use_cudnn': False},
)
@attr.cudnn
class TestDeconvolution2DCudnnCall(unittest.TestCase):
def setUp(self):
self.in_channels = 3
self.out_channels = 2
kh, kw = _pair(3)
sh, sw = _pair(1)
ph, pw = _pair(1)
self.W = cuda.cupy.random.normal(
0, numpy.sqrt(1. / (kh * kw * self.in_channels)),
(self.in_channels, self.out_channels, kh, kw)
).astype(numpy.float32)
N = 2
inh, inw = 4, 3
outh = conv.get_deconv_outsize(inh, kh, sh, ph)
outw = conv.get_deconv_outsize(inw, kw, sw, pw)
self.x = cuda.cupy.random.uniform(
-1, 1, (N, self.in_channels, inh, inw)).astype(numpy.float32)
self.gy = cuda.cupy.random.uniform(
-1, 1, (N, self.out_channels, outh, outw)).astype(numpy.float32)
def forward(self):
x = chainer.Variable(self.x)
W = chainer.Variable(self.W)
return F.deconvolution_2d(
x, W, None, stride=1, pad=1, use_cudnn=self.use_cudnn)
def test_call_cudnn_forward(self):
v2 = 'cupy.cudnn.cudnn.convolutionBackwardData_v2'
v3 = 'cupy.cudnn.cudnn.convolutionBackwardData_v3'
with mock.patch(v2) as func_v2, mock.patch(v3) as func_v3:
self.forward()
self.assertEqual(func_v2.called or func_v3.called, self.use_cudnn)
def test_call_cudnn_backrward(self):
y = self.forward()
y.grad = self.gy
with mock.patch('cupy.cudnn.cudnn.convolutionForward') as func:
y.backward()
self.assertEqual(func.called, self.use_cudnn)
testing.run_module(__name__, __file__)
| cemoody/chainer | tests/chainer_tests/functions_tests/connection_tests/test_deconvolution_2d.py | Python | mit | 5,913 |
import os, sys, random, argparse, time, math, gzip
import cPickle as pickle
from collections import Counter
import numpy as np
import scipy
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
import theano
import theano.tensor as T
sys.path.append('../')
sys.path.append('../../../adulteration/wikipedia')
sys.path.append('../../../adulteration/model')
from nn import get_activation_by_name, create_optimization_updates, softmax, sigmoid
from nn import Layer, EmbeddingLayer, LSTM, RCNN, StrCNN, Dropout, apply_dropout
from utils import say, load_embedding_iterator
import hier_to_cat
import scoring
from split_data import split_data_by_wiki
from wikipedia import *
np.set_printoptions(precision=3)
wiki_path = '../../../adulteration/wikipedia/'
def convert_to_zero_one(v):
"""Convert probability distribution to zero (not occured) or one (occured)."""
return (v>0).astype('int32')
def reduce_dim(hier_x, n_components, saved=True):
fname = 'pca_{}.pkl'.format(n_components)
if saved and os.path.isfile(fname):
with open(fname, 'r') as f:
pca = pickle.load(f)
hier_x_new = pca.transform(hier_x)
else:
pca = PCA(n_components=n_components)
hier_x_new = pca.fit_transform(hier_x)
with open(fname, 'w') as f:
pickle.dump(pca, f)
return hier_x_new
def create_product_mask(products_len, n_hidden):
mask = []
max_len = products_len.max()
for l in products_len:
row = np.pad(np.ones(l), pad_width=(0,max_len-l), mode='constant', constant_values=0)
mask.append(np.tile(row, (n_hidden,1)).T)
mask = np.array(mask)
mask = np.swapaxes(mask, 0,1)
return mask.astype('int32')
def read_corpus_products():
with open('../../../adulteration/ncim/idx_to_cat.pkl', 'rb') as f_in:
idx_to_cat = pickle.load(f_in)
products = [idx_to_cat[i] for i in sorted(idx_to_cat.keys())]
tokens = input_to_tokens(ings=products)
# Add padding
products_len = np.array([len(i) for i in tokens])
max_len = products_len.max()
new_tokens = []
for t in tokens:
num_pads = max_len - len(t)
new_tokens.append(t+num_pads*['<pad>'])
return new_tokens, products_len
def read_corpus_adulterants():
with open(wiki_path+'input_to_outputs_adulterants.pkl', 'r') as f_in:
input_to_outputs = pickle.load(f_in)
corpus_x, corpus_y, hier_x = [], [], []
adulterants = get_adulterants(get_all=True)
assert len(adulterants) == len(input_to_outputs)
input_keys = range(len(adulterants))
input_tokens = input_to_tokens(input_keys, adulterants)
ing_idx_to_hier_map = hier_to_cat.gen_ing_idx_to_hier_map(adulterants, adulterants=True)
assert len(input_keys) == len(input_tokens)
for i in range(len(input_keys)):
inp = input_keys[i]
tokens = input_tokens[i]
hier = ing_idx_to_hier_map.get(i, np.zeros(3751))
out = input_to_outputs[inp]
if out.sum() <= 0:
continue
if len(tokens) > 5:
corpus_x.append(tokens)
else:
corpus_x.append([])
hier_x.append(hier)
normalized = out*1. / out.sum()
assert np.isclose(normalized.sum(), 1, atol=1e-5)
corpus_y.append(normalized)
#len_corpus_y.append(out.sum())
assert len(corpus_x)==len(corpus_y)==len(hier_x)
return np.array(corpus_x), np.array(corpus_y).astype('float32'), np.array(hier_x).astype('float32')
def read_corpus_ingredients(num_ingredients=5000):
with open(wiki_path+'input_to_outputs.pkl', 'r') as f_in:
input_to_outputs = pickle.load(f_in)
corpus_x, corpus_y, hier_x = [], [], []
#y_indptr = [0]
#y_indices = []
#y_data = []
input_keys = sorted(input_to_outputs.keys())
ings = get_ings(num_ingredients)
input_tokens = input_to_tokens(input_keys, ings)
ing_idx_to_hier_map = hier_to_cat.gen_ing_idx_to_hier_map(ings)
assert len(input_keys) == len(input_tokens) == num_ingredients
for i in range(num_ingredients):
inp = input_keys[i]
tokens = input_tokens[i]
hier = ing_idx_to_hier_map.get(i, np.zeros(3751))
assert len(hier) == 3751
out = input_to_outputs[inp]
assert out.sum() > 0, "Each ing must have a product category"
#y_data.extend(out)
#y_indices.extend(range(len(out)))
#y_indptr.append(len(out))
if len(tokens) > 5:
corpus_x.append(tokens)
else:
corpus_x.append([])
hier_x.append(hier)
normalized = out*1. / out.sum()
assert np.isclose(normalized.sum(), 1, atol=1e-5)
corpus_y.append(normalized)
#len_corpus_y.append(out.sum())
assert len(corpus_x)==len(corpus_y)==len(hier_x)
#corpus_y = scipy.sparse.csr_matrix((y_data, y_indices, np.cumsum(y_indptr)))
return np.array(corpus_x), np.array(corpus_y).astype('float32'), np.array(hier_x).astype('float32')
def read_corpus(path):
with open(path) as fin:
lines = fin.readlines()
lines = [ x.strip().split() for x in lines ]
lines = [ x for x in lines if x ]
corpus_x = [ x[1:] for x in lines ]
corpus_y = [ int(x[0]) for x in lines ]
return corpus_x, corpus_y
def create_one_batch(ids, x, y, hier):
batch_x = np.column_stack( [ x[i] for i in ids ] )
batch_y = np.array( [ y[i] for i in ids ] )
if hier is None:
batch_hier = np.column_stack( [[] for i in ids] ).astype('float32')
else:
batch_hier = np.column_stack( [ hier[i] for i in ids ] )
#batch_y = y[ids]
assert batch_x.shape[1] == batch_y.shape[0]
return batch_x, batch_y, batch_hier
# shuffle training examples and create mini-batches
def create_batches(perm, x, y, hier, batch_size):
# sort sequences based on their length
# permutation is necessary if we want different batches every epoch
first_nonzero_idx = sum([1 for i in x if len(i)==0])
lst = sorted(perm, key=lambda i: len(x[i]))[first_nonzero_idx:]
batches_x = [ ]
batches_y = [ ]
batches_hier = [ ]
size = batch_size
ids = [ lst[0] ]
for i in lst[1:]:
if len(ids) < size and len(x[i]) == len(x[ids[0]]):
ids.append(i)
else:
#print ids
#print x, len(x)
#print y, len(y)
bx, by, bhier = create_one_batch(ids, x, y, hier)
batches_x.append(bx)
batches_y.append(by)
batches_hier.append(bhier)
ids = [ i ]
bx, by, bhier = create_one_batch(ids, x, y, hier)
batches_x.append(bx)
batches_y.append(by)
batches_hier.append(bhier)
# shuffle batches
batch_perm = range(len(batches_x))
random.shuffle(batch_perm)
batches_x = [ batches_x[i] for i in batch_perm ]
batches_y = [ batches_y[i] for i in batch_perm ]
batches_hier = [ batches_hier[i] for i in batch_perm ]
assert len(batches_x) == len(batches_y) == len(batches_hier)
return batches_x, batches_y, batches_hier
def get_ing_split(seed):
"""Split ing into train, dev, adulterants.
To be replaced by split_data.split_data_by_wiki.
"""
num_ingredients = 5000
ings = get_ings(num_ingredients)
#train_indices, dev_indices = train_test_split(
# range(num_ingredients), test_size=1/3., random_state=seed)
train_indices, dev_indices, test_indices = split_data_by_wiki(
ings, seed)
ings_train = ings[train_indices]
ings_dev = ings[dev_indices]
adulterants = get_adulterants()
with open(wiki_path+'input_to_outputs_adulterants.pkl', 'r') as f_in:
input_to_outputs = pickle.load(f_in)
test_indices = [k for k,v in input_to_outputs.items() if v.sum()>0]
adulterants = adulterants[test_indices]
return ings_train, ings_dev, adulterants
def gen_text_predictions(args, fname):
"""Generate text predictions given the prediction vector file."""
assert 'train' in fname or 'dev' in fname or 'test' in fname
seed = args.seed
results = np.load(fname)
text_fname = fname.replace('.npy', '.txt')
with open('../../../adulteration/ncim/idx_to_cat.pkl', 'rb') as f_in:
idx_to_cat = pickle.load(f_in)
ings = get_ings(5000)
if args.add_adulterants:
adulterants = get_adulterants()
ings = np.hstack([ings, adulterants])
train_indices, dev_indices, test_indices = split_data_by_wiki(ings, seed)
if 'train' in fname:
#train_indices, dev_indices = train_test_split(
# range(5000), test_size=1/3., random_state=seed)
ings = ings[train_indices]
elif 'dev' in fname:
#train_indices, dev_indices = train_test_split(
# range(5000), test_size=1/3., random_state=seed)
ings = ings[dev_indices]
elif 'test' in fname:
if args.test_adulterants_only:
ings = get_adulterants()
#with open(wiki_path+'input_to_outputs_adulterants.pkl', 'r') as f_in:
# input_to_outputs = pickle.load(f_in)
#test_indices = [k for k,v in input_to_outputs.items() if v.sum()>0]
#ings = adulterants[test_indices]
else:
ings = ings[test_indices]
assert len(ings)==len(results)
hier_to_cat.test_model(
results, ings, idx_to_cat, top_n=5, fname=text_fname, ings_wiki_links=get_ings_wiki_links())
def save_representations(args, get_representation, train, dev, test, products):
label = args.model
if not label:
label = str(int(time.time()))
trainx, trainy = train
devx, devy = dev
testx, testy = test
for x_data, data_name in [(trainx, 'train'), (devx, 'dev'), (testx, 'test')]:
if x_data is None:
print "No data for:", data_name
continue
ing_reps = []
prod_reps = None
counter = 0
for x_idx, x_for_predict in enumerate(x_data):
if len(x_for_predict) > 0:
counter += 1
if products is None:
ing_rep = get_representation(np.vstack(x_for_predict))[0][0]
ing_reps.append(ing_rep)
else:
ing_rep, prod_rep = get_representation(np.vstack(x_for_predict), products)
ing_reps.append(ing_rep[0])
if prod_reps is None:
prod_reps = prod_rep
else:
prod_reps += prod_rep
else:
ing_reps.append(np.zeros(len(ing_reps[0]))) # hopefully ing_reps[0] exists
ing_fname = 'representations/{}_{}_ing_reps.npy'.format(label, data_name)
np.save(ing_fname, np.array(ing_reps))
if products is not None:
prod_reps /= counter
prod_fname = 'representations/{}_{}_prod_reps.npy'.format(label, data_name)
np.save(prod_fname, prod_reps)
def save_predictions(args, predict_model, train, dev, test, hier, products):
label = args.model
seed = args.seed
if not label:
label = str(int(time.time()))
trainx, trainy = train
devx, devy = dev
testx, testy = test
hier_train, hier_dev, hier_test = hier
for x_data, data_name, hier_x in [(
trainx, 'train', hier_train), (devx, 'dev', hier_dev), (testx, 'test', hier_test)]:
if x_data is None:
print "No data for:", data_name
continue
results = []
for x_idx, x_for_predict in enumerate(x_data):
if len(x_for_predict) > 0:
if hier_x is not None:
if products is None:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.vstack(hier_x[x_idx]))[0]
else:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.vstack(hier_x[x_idx]), products)[0]
else:
if products is None:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.column_stack([[]]))[0]
else:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.column_stack([[]]), products)[0]
results.append(p_y_given_x)
else:
results.append(np.zeros(len(results[0])))
fname = 'predictions/{}_{}_pred.npy'.format(label, data_name)
print "Saved predictions to:", fname
np.save(fname, np.array(results))
gen_text_predictions(args, fname)
def evaluate(x_data, y_data, hier_x, products, predict_model):
"""Compute the MAP of the data."""
ing_cat_pair_map = {}
for x_idx, x in enumerate(x_data):
for y_idx, out in enumerate(y_data[x_idx]):
if out > 0:
ing_cat_pair_map[(x_idx, y_idx)] = True
valid_ing_indices, results = [], []
for x_idx, x_for_predict in enumerate(x_data):
if len(x_for_predict) > 0:
if hier_x is not None:
if products is None:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.vstack(hier_x[x_idx]))[0]
else:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.vstack(hier_x[x_idx]), products)[0]
else:
if products is None:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.column_stack([[]]))[0]
else:
p_y_given_x = predict_model(np.vstack(x_for_predict),
np.column_stack([[]]), products)[0]
valid_ing_indices.append(x_idx)
results.append(p_y_given_x)
valid_ing_indices = np.array(valid_ing_indices)
avg_true_results = scoring.gen_avg_true_results(valid_ing_indices)
results = np.array(results)
print "Random:"
scoring.evaluate_map(valid_ing_indices, results, ing_cat_pair_map, random=True)
print "Avg True Results:"
scoring.evaluate_map(valid_ing_indices, avg_true_results, ing_cat_pair_map, random=False)
print "Model:"
scoring.evaluate_map(valid_ing_indices, results, ing_cat_pair_map, random=False)
class Model:
def __init__(self, args, embedding_layer, nclasses, products_len):
self.args = args
self.embedding_layer = embedding_layer
self.nclasses = nclasses
self.products_len = products_len
def ready(self):
args = self.args
embedding_layer = self.embedding_layer
self.n_hidden = args.hidden_dim
self.n_in = embedding_layer.n_d
dropout = self.dropout = theano.shared(
np.float64(args.dropout_rate).astype(theano.config.floatX)
)
# x is length * batch_size
# y is batch_size * num_cats
self.x = T.imatrix('x')
#self.y = T.ivector('y')
self.y = T.fmatrix('y')
self.y_len = T.ivector()
x = self.x
y = self.y
y_len = self.y_len
n_hidden = self.n_hidden
n_in = self.n_in
# hier is batch_size * hier_dim
self.hier = T.fmatrix('hier')
hier = self.hier
size = 0
size_prod = 0
# fetch word embeddings
# (len * batch_size) * n_in
slices = embedding_layer.forward(x.ravel())
self.slices = slices
# 3-d tensor, len * batch_size * n_in
slices = slices.reshape( (x.shape[0], x.shape[1], n_in) )
# stacking the feature extraction layers
pooling = args.pooling
depth = args.depth
layers = self.layers = [ ]
prev_output = slices
prev_output = apply_dropout(prev_output, dropout, v2=True)
if args.products:
self.products = T.imatrix('products')
products = self.products
slices_prod = embedding_layer.forward(products.ravel())
slices_prod = slices_prod.reshape( (products.shape[0], products.shape[1], n_in) )
prev_output_prod = apply_dropout(slices_prod, dropout, v2=True)
products_len = theano.shared(self.products_len.astype(theano.config.floatX))
products_len_mask = create_product_mask(self.products_len, n_hidden)
products_len_mask = theano.shared(products_len_mask.astype(theano.config.floatX))
softmax_inputs = [ ]
softmax_inputs_prod = [ ]
activation = get_activation_by_name(args.act)
for i in range(depth):
if args.layer.lower() == "lstm":
print "Layer: LSTM"
layer = LSTM(
n_in = n_hidden if i > 0 else n_in,
n_out = n_hidden
)
elif args.layer.lower() == "strcnn":
print "Layer: StrCNN"
layer = StrCNN(
n_in = n_hidden if i > 0 else n_in,
n_out = n_hidden,
activation = activation,
decay = args.decay,
order = args.order
)
elif args.layer.lower() == "rcnn":
print "Layer: RCNN"
layer = RCNN(
n_in = n_hidden if i > 0 else n_in,
n_out = n_hidden,
activation = activation,
order = args.order,
mode = args.mode
)
else:
raise Exception("unknown layer type: {}".format(args.layer))
layers.append(layer)
prev_output = layer.forward_all(prev_output)
if pooling:
softmax_inputs.append(T.sum(prev_output, axis=0)) # summing over columns
else:
softmax_inputs.append(prev_output[-1])
prev_output = apply_dropout(prev_output, dropout)
size += n_hidden
if args.products:
prev_output_prod = layer.forward_all(prev_output_prod)
if pooling:
inter_result = prev_output_prod * products_len_mask
inter_result = T.sum(inter_result, axis=0)
inter_result = inter_result / products_len[:,None]
softmax_inputs_prod.append(inter_result) # summing over columns
else:
inter_result = prev_output_prod[-1]
#inter_result = prev_output_prod[products_len.astype('int32'),np.arange(self.nclasses),:]
softmax_inputs_prod.append(inter_result)
prev_output_prod = apply_dropout(prev_output_prod, dropout)
size_prod += n_hidden
#softmax_inputs.append(hier.T)
# final feature representation is the concatenation of all extraction layers
if pooling:
softmax_input = T.concatenate(softmax_inputs, axis=1) / x.shape[0]
else:
softmax_input = T.concatenate(softmax_inputs, axis=1)
softmax_input = apply_dropout(softmax_input, dropout, v2=True)
if args.products:
if pooling:
softmax_inputs_prod = T.concatenate(softmax_inputs_prod, axis=1)# / products.shape[0]
else:
softmax_inputs_prod = T.concatenate(softmax_inputs_prod, axis=1)
softmax_inputs_prod = apply_dropout(softmax_inputs_prod, dropout, v2=True)
if not args.products or args.final_softmax:
# feed the feature repr. to the softmax output layer
if args.products:
softmax_n_in = self.nclasses
else:
softmax_n_in = size
if args.use_hier:
softmax_n_in += args.hier_dim
layers.append( Layer(
n_in = softmax_n_in,
n_out = self.nclasses,
activation = sigmoid if args.binary else softmax,
has_bias = False,
) )
for l,i in zip(layers, range(len(layers))):
say("layer {}: n_in={}\tn_out={}\n".format(
i, l.n_in, l.n_out
))
if not args.no_bias:
b_vals = np.zeros((size,), dtype=theano.config.floatX)
b = theano.shared(b_vals, name="b")
softmax_input = softmax_input + b
self.softmax_input = softmax_input
# unnormalized score of y given x
if args.products:
if not args.no_bias:
#b_vals_prod = np.zeros((self.nclasses,), dtype=theano.config.floatX)
b_vals_prod = np.zeros((self.nclasses, size_prod), dtype=theano.config.floatX)
b_prod = theano.shared(b_vals_prod, name="b_prod")
softmax_inputs_prod = softmax_inputs_prod + b_prod#.reshape((-1,1)) # add reshape if broadcasting
softmax_input = T.dot(softmax_input, softmax_inputs_prod.T)
self.softmax_inputs_prod = softmax_inputs_prod
#else:
#self.softmax_inputs_prod = layers[-1].W
softmax_input = T.concatenate([softmax_input, hier.T], axis=1)
if not args.products or args.final_softmax:
self.p_y_given_x = layers[-1].forward(softmax_input)
else:
if args.binary:
self.p_y_given_x = sigmoid(softmax_input)
else:
self.p_y_given_x = softmax(softmax_input)
self.pred = T.argmax(self.p_y_given_x, axis=1)
if args.binary:
loss_func = T.nnet.binary_crossentropy
else:
loss_func = T.nnet.categorical_crossentropy
self.nll_loss = T.mean( loss_func(
self.p_y_given_x,
y
))
# adding regularizations
self.l2_sqr = None
self.params = [ ]
for layer in layers:
self.params += layer.params
if not args.no_bias:
self.params.append(b)
if args.products:
self.params.append(b_prod)
for p in self.params:
if self.l2_sqr is None:
self.l2_sqr = args.l2_reg * T.sum(p**2)
else:
self.l2_sqr += args.l2_reg * T.sum(p**2)
nparams = sum(len(x.get_value(borrow=True).ravel()) \
for x in self.params)
say("total # parameters: {}\n".format(nparams))
def save_model(self, path, args):
# append file suffix
if not path:
path = str(int(time.time()))
if not path.endswith(".pkl.gz"):
if path.endswith(".pkl"):
path += ".gz"
else:
path += ".pkl.gz"
with gzip.open(path, "wb") as fout:
pickle.dump(
([ x.get_value() for x in self.params ], args, self.nclasses),
fout,
protocol = pickle.HIGHEST_PROTOCOL
)
print "Saved model:", path
def load_model(self, path):
if not os.path.exists(path):
if path.endswith(".pkl"):
path += ".gz"
else:
path += ".pkl.gz"
with gzip.open(path, "rb") as fin:
param_values, args, nclasses = pickle.load(fin)
self.args = args
self.nclasses = nclasses
self.ready()
for x,v in zip(self.params, param_values):
x.set_value(v)
print "Loaded model:", path
def eval_accuracy(self, preds, golds):
fine = sum([ sum(p == y) for p,y in zip(preds, golds) ]) + 0.0
fine_tot = sum( [ len(y) for y in golds ] )
return fine/fine_tot
def train(self, train, dev, test, hier, products):
args = self.args
trainx, trainy = train
train_hier_x, dev_hier_x, test_hier_x = hier
batch_size = args.batch
#if products is None:
# products = [[] for i in range(131)]
#if products is not None:
# products = np.column_stack(products)
blank_product_hier = np.column_stack( [[] for i in range(self.nclasses)] )
if dev:
dev_batches_x, dev_batches_y, dev_batches_hier = create_batches(
range(len(dev[0])),
dev[0],
dev[1],
dev_hier_x,
batch_size
)
if test:
test_batches_x, test_batches_y, test_batches_hier = create_batches(
range(len(test[0])),
test[0],
test[1],
test_hier_x,
batch_size
)
cost = self.nll_loss + self.l2_sqr
updates, lr, gnorm = create_optimization_updates(
cost = cost,
params = self.params,
lr = args.learning_rate,
method = args.learning
)[:3]
if products is not None:
inputs = [self.x, self.y, self.hier, self.products]
predict_inputs = [self.x, self.hier, self.products]
else:
inputs = [self.x, self.y, self.hier]
predict_inputs = [self.x, self.hier]
train_model = theano.function(
inputs = inputs,
outputs = [ cost, gnorm ],
updates = updates,
allow_input_downcast = True
)
predict_model = theano.function(
inputs = predict_inputs,
outputs = self.p_y_given_x,
allow_input_downcast = True
)
get_representation = theano.function(
inputs = [self.x, self.products] if products is not None else [self.x],
outputs = [self.softmax_input, self.softmax_inputs_prod] if products is not None else [self.softmax_input],
allow_input_downcast = True
)
eval_acc = theano.function(
inputs = predict_inputs,
outputs = self.pred,
allow_input_downcast = True
)
if args.load_model:
return predict_model, get_representation
unchanged = 0
best_dev = 0.0
dropout_prob = np.float64(args.dropout_rate).astype(theano.config.floatX)
start_time = time.time()
eval_period = args.eval_period
perm = range(len(trainx))
say(str([ "%.2f" % np.linalg.norm(x.get_value(borrow=True)) for x in self.params ])+"\n")
for epoch in xrange(args.max_epochs):
unchanged += 1
#if dev and unchanged > 30: return
train_loss = 0.0
random.shuffle(perm)
batches_x, batches_y, batches_hier = create_batches(
perm, trainx, trainy, train_hier_x, batch_size)
N = len(batches_x)
blah = None#Delete me
for i in xrange(N):
if i % 100 == 0:
sys.stdout.write("\r%d" % i)
sys.stdout.flush()
x = batches_x[i]
y = batches_y[i]
hier_x = batches_hier[i]
y_len = np.array([j.sum() for j in batches_y[i]])
#y = y.toarray()
assert x.dtype in ['float32', 'int32']
assert y.dtype in ['float32', 'int32']
assert hier_x.dtype in ['float32', 'int32']
#print x.shape
#print y.shape
#print hier_x.shape
if products is not None:
#print products.shape
assert products.dtype in ['float32', 'int32']
va, grad_norm = train_model(x, y, hier_x, products)
else:
va, grad_norm = train_model(x, y, hier_x)
train_loss += va
#if products is not None:
# print x.shape, hier_x.shape, np.array(products[0:1]).T.shape, hier_x[:,0:1].shape
#print i, N
# debug
if math.isnan(va):
print ""
print i-1, i
print x
print y
return
if (i == N-1) or (eval_period > 0 and (i+1) % eval_period == 0):
self.dropout.set_value(0.0)
say( "%s\n" % (args.model) )
say( "Epoch %.1f\tloss=%.4f\t|g|=%s [%.2fm]\n" % (
epoch + (i+1)/(N+0.0),
train_loss / (i+1),
float(grad_norm),
(time.time()-start_time) / 60.0
))
say(str([ "%.2f" % np.linalg.norm(x.get_value(borrow=True)) for x in self.params ])+"\n")
"""
if dev:
preds = [ eval_acc(x) for x in dev_batches_x ]
nowf_dev = self.eval_accuracy(preds, dev_batches_y)
if nowf_dev > best_dev:
unchanged = 0
best_dev = nowf_dev
if args.model:
self.save_model(args.model, args)
say("\tdev accuracy=%.4f\tbest=%.4f\n" % (
nowf_dev,
best_dev
))
if args.test and nowf_dev == best_dev:
preds = [ eval_acc(x) for x in test_batches_x ]
nowf_test = self.eval_accuracy(preds, test_batches_y)
say("\ttest accuracy=%.4f\n" % (
nowf_test,
))
if best_dev > nowf_dev + 0.05:
return
"""
self.dropout.set_value(dropout_prob)
start_time = time.time()
#print "Length of trainx: ", len(trainx)
#for x_idx, x_for_predict in enumerate(trainx[3233:3236]):
# if len(x_for_predict) > 0:
# p_y_given_x = predict_model(np.vstack(x_for_predict))
# print x_idx, p_y_given_x
if epoch == 0 or (epoch+1) % 10 == 0 or epoch == args.max_epochs-1:
evaluate_start_time = time.time()
print "\nEpoch:", epoch+1
print "======= Training evaluation ========"
evaluate(trainx, trainy, train_hier_x, products, predict_model)
if dev:
print "======= Validation evaluation ========"
evaluate(dev[0], dev[1], dev_hier_x, products, predict_model)
if test:
print "======= Adulteration evaluation ========"
evaluate(test[0], test[1], test_hier_x, products, predict_model)
print "Evaluate time: {:.1f}m".format((time.time()-evaluate_start_time)/60)
start_time = time.time()
if args.save_model:
self.save_model(args.model, args)
start_time = time.time()
return predict_model, get_representation
def main(args):
print args
model = None
ings = get_ings(5000)
adulterants = get_adulterants()
assert args.embedding, "Pre-trained word embeddings required."
assert not (args.products and args.use_hier and not args.final_softmax), "Hier won't be used here."
assert args.train or (args.load_model and args.test), "Need training data or existing model"
print "Loading embeddings"
if '.pkl' in args.embedding:
with open(args.embedding, 'rb') as f:
embedding = pickle.load(f)
if '<unk>' not in embedding:
embedding['<unk>'] = np.zeros(len(embedding['</s>']))
else:
embedding = load_embedding_iterator(args.embedding)
embedding_layer = EmbeddingLayer(
n_d = args.hidden_dim,
vocab = [ "<unk>" ],
embs = embedding
)
print "Reading corpus"
products, products_len = None, None
if args.products:
products_text, products_len = read_corpus_products()
products = [ embedding_layer.map_to_ids(x) for x in products_text ]
products = np.column_stack(products)
train_hier_x = dev_hier_x = test_hier_x = None
if args.train:
data_x_text, data_y, data_hier_x = read_corpus_ingredients()
print "# Ings:", len(data_x_text)
if args.add_adulterants:
test_x_text, test_y, test_hier_x = read_corpus_adulterants()
print "# Adulterants:", len(test_x_text)
assert len(test_x_text) == len(adulterants)
data_x_text = np.array(list(data_x_text) + list(test_x_text))
data_y = np.array(list(data_y) + list(test_y)).astype('float32')
data_hier_x = np.array(list(data_hier_x) + list(test_hier_x)).astype('float32')
ings = np.hstack([ings, adulterants])
if args.binary:
data_y = convert_to_zero_one(data_y)
data_hier_x = reduce_dim(data_hier_x, args.hier_dim)
#print "Num data points:", len(data_x_text)
if args.dev or args.test:
#train_indices, dev_indices = train_test_split(
# range(len(data_x_text)), test_size=1/3., random_state=seed)
train_indices, dev_indices, test_indices = split_data_by_wiki(
ings, args.seed)
if not args.test_adulterants_only:
test_x_text = data_x_text[test_indices]
test_y = data_y[test_indices]
dev_x_text = data_x_text[dev_indices]
train_x_text = data_x_text[train_indices]
dev_y = data_y[dev_indices]
train_y = data_y[train_indices]
if len(data_hier_x) > 0:
if not args.test_adulterants_only:
test_hier_x = data_hier_x[test_indices]
dev_hier_x = data_hier_x[dev_indices]
train_hier_x = data_hier_x[train_indices]
train_x = [ embedding_layer.map_to_ids(x) for x in train_x_text ]
if args.dev:
#dev_x, dev_y = read_corpus(args.dev)
dev_x = [ embedding_layer.map_to_ids(x) for x in dev_x_text ]
if args.test_adulterants_only:
test_x_text, test_y, test_hier_x = read_corpus_adulterants()
if args.binary:
test_y = convert_to_zero_one(test_y)
test_hier_x = reduce_dim(test_hier_x, args.hier_dim)
if args.test:
test_x = [ embedding_layer.map_to_ids(x) for x in test_x_text ]
if not args.use_hier:
hier = (None, None, None)
else:
hier = (train_hier_x, dev_hier_x, test_hier_x)
train = (train_x, train_y) if args.train else None
dev = (dev_x, dev_y) if args.dev else None
test = (test_x, test_y) if args.test else None
if args.load_model:
model = Model(
args = None,
embedding_layer = embedding_layer,
nclasses = -1
)
model.load_model(args.load_model)
predict_model, get_representation = model.train(train, dev, test, hier, products)
elif args.train:
model = Model(
args = args,
embedding_layer = embedding_layer,
nclasses = len(train_y[0]), #max(train_y.data)+1
products_len = products_len,
)
model.ready()
#print train_x[0].dtype, train_hier_x[0].dtype, dev_hier_x[0].dtype, test_hier_x[0].dtype
predict_model, get_representation = model.train(
train, dev, test, hier, products)
print "Saving predictions"
save_predictions(args, predict_model, train, dev, test, hier, products)
print "Saving representations"
save_representations(args, get_representation, train, dev, test, products)
if __name__ == "__main__":
argparser = argparse.ArgumentParser(sys.argv[0])
argparser.add_argument("--train",
type = str,
default = "",
help = "path to training data"
)
argparser.add_argument("--dev",
type = str,
default = "",
help = "path to development data"
)
argparser.add_argument("--test",
type = str,
default = "",
help = "path to test data"
)
argparser.add_argument("--hidden_dim", "-d",
type = int,
default = 200,
help = "hidden dimensions"
)
argparser.add_argument("--decay",
type = float,
default = 0.3
)
argparser.add_argument("--learning",
type = str,
default = "adam",
help = "learning method (sgd, adagrad, adam, ...)"
)
argparser.add_argument("--learning_rate",
type = float,
default = "0.01",
help = "learning rate"
)
argparser.add_argument("--max_epochs",
type = int,
default = 100,
help = "maximum # of epochs"
)
argparser.add_argument("--eval_period",
type = int,
default = -1,
help = "evaluate on dev every period"
)
argparser.add_argument("--dropout_rate",
type = float,
default = 0.0,
help = "dropout probability"
)
argparser.add_argument("--l2_reg",
type = float,
default = 0.00001
)
argparser.add_argument("--embedding",
type = str,
default = ""
)
argparser.add_argument("--batch",
type = int,
default = 15,
help = "mini-batch size"
)
argparser.add_argument("--depth",
type = int,
default = 3,
help = "number of feature extraction layers (min:1)"
)
argparser.add_argument("--order",
type = int,
default = 3,
help = "when the order is k, we use up tp k-grams (k=1,2,3)"
)
argparser.add_argument("--act",
type = str,
default = "relu",
help = "activation function (none, relu, tanh)"
)
argparser.add_argument("--layer",
type = str,
default = "rcnn",
help = "type of neural net (LSTM, RCNN, StrCNN)"
)
argparser.add_argument("--mode",
type = int,
default = 1
)
argparser.add_argument("--seed",
type = int,
default = 42,
help = "random seed of the model"
)
argparser.add_argument("--model",
type = str,
default = "",
help = "label of model"
)
argparser.add_argument("--save_model",
action='store_true',
help = "whether to save model"
)
argparser.add_argument("--load_model",
type = str,
default = "",
help = "load model from this file"
)
argparser.add_argument("--pooling",
type = int,
default = 1,
help = "whether to use mean pooling or take the last vector"
)
argparser.add_argument("--use_hier",
action='store_true',
help = "use hierarchy"
)
argparser.add_argument("--hier_dim",
type = int,
default = 100,
help = "hierarchy dimension"
)
argparser.add_argument("--products",
action='store_true',
help = "use product categories"
)
argparser.add_argument("--final_softmax",
action='store_true',
help = "final softmax layer"
)
argparser.add_argument("--no_bias",
action='store_true',
help = "don't add bias to the vector representations"
)
argparser.add_argument("--add_adulterants",
action='store_true',
help = "add adulterants to training"
)
argparser.add_argument("--test_adulterants_only",
action='store_true',
help = "test using adulterants only"
)
argparser.add_argument("--binary",
action='store_true',
help = "binary prediction for each product category rather than distribution"
)
args = argparser.parse_args()
main(args)
| youyanggu/rcnn | code/adulteration/main.py | Python | apache-2.0 | 40,708 |
# -*- coding: utf-8 -*-
from datetime import timedelta
from unittest import mock
from uuid import UUID
from django.test import TestCase
from sesam import SesamStudent, StudentNotFound
from .. import factories
from ..models import Student
def sesam_response_factory(student):
# Factory function useful together with StudentFactory.
return SesamStudent(
liu_id=student.liu_id,
name=student.name,
union=student.union.name if student.union else None,
section_code=student.section.code if student.section else None,
nor_edu_person_lin=student.id,
liu_lin=student.liu_lin,
email=student.email
)
class SesamTests(TestCase):
def test_get_no_local_no_sesam(self):
# Without existing local entry and without Sesam match.
with mock.patch('sesam.SesamStudentServiceClient.get_student',
side_effect=StudentNotFound):
with self.assertRaises(Student.DoesNotExist):
Student.objects.get(liu_id='oller120', use_sesam=True)
def test_get_no_local(self):
# Without existing local entry.
mock_sesam_response = sesam_response_factory(
factories.StudentFactory.build())
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
student = Student.objects.get(liu_id=mock_sesam_response.liu_id,
use_sesam=True)
student.refresh_from_db() # Make sure the changes are persisted
self.assertEqual(student.id, mock_sesam_response.nor_edu_person_lin)
def test_get_with_local(self):
# With local entry.
original_student = factories.StudentFactory(union=None)
new_union = factories.UnionFactory()
# Mock response that looks like the student but now with a union
# membership
mock_sesam_response = sesam_response_factory(original_student)._replace(
union=new_union.name)
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
with mock.patch('kobra.models.Student.is_outdated',
new_callable=mock.PropertyMock, return_value=False):
unchanged_student = Student.objects.get(
id=mock_sesam_response.nor_edu_person_lin, use_sesam=True)
self.assertEqual(unchanged_student.union, None)
unchanged_student.refresh_from_db()
self.assertEqual(unchanged_student.union, None)
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
with mock.patch('kobra.models.Student.is_outdated',
new_callable=mock.PropertyMock, return_value=True):
changed_student = Student.objects.get(
id=mock_sesam_response.nor_edu_person_lin, use_sesam=True)
self.assertEqual(changed_student.union, new_union)
changed_student.refresh_from_db()
self.assertEqual(changed_student.union, new_union)
def test_get_with_local_no_sesam(self):
# With local entry.
student = factories.StudentFactory()
with mock.patch('sesam.SesamStudentServiceClient.get_student',
side_effect=StudentNotFound):
with mock.patch('kobra.models.Student.is_outdated',
new_callable=mock.PropertyMock,
return_value=True):
fetched_student = Student.objects.get(pk=student.pk,
use_sesam=True)
self.assertEqual(student.pk, fetched_student.pk)
self.assertEqual(student.last_updated, fetched_student.last_updated)
def test_get_updates_mifare_id(self):
# With existing local entry.
student = factories.StudentFactory(mifare_id=None)
mock_sesam_response = sesam_response_factory(student)
mifare_id = 12345678
with mock.patch('sesam.SesamStudentServiceClient.get_student',
return_value=mock_sesam_response):
student = Student.objects.get(mifare_id=mifare_id, use_sesam=True)
self.assertEqual(student.mifare_id, mifare_id)
student.refresh_from_db() # Make sure the changes are persisted
self.assertEqual(student.mifare_id, mifare_id)
| karservice/kobra | kobra/tests/test_sesam.py | Python | mit | 4,475 |
# ulno_iot display
from machine import Pin, I2C
import ssd1306
CHAR_WIDTH = 128 // 8
CHAR_HEIGHT = 64 // 8
present = False
_y = 0
_x = 0
# test if lcd is responding
i2c = I2C(sda=Pin(4), scl=Pin(14))
try:
display = ssd1306.SSD1306_I2C(128, 64, i2c)
display.fill(0)
display.text("iot.ulno.net", 16, 4)
display.show()
except OSError:
# shield seems not to be here
print("ulno_iot-lcd not found")
else:
# shield is present
present = True
text = display.text
show = display.show
fill = display.fill
scroll = display.scroll
pixel = display.pixel
### easier output functions which can scroll
def set_cursor(x, y):
global _x, _y
if x < 0: x = 0
if y < 0: y = 0
if x >= CHAR_WIDTH: x = CHAR_WIDTH - 1
if y >= CHAR_HEIGHT: y = CHAR_HEIGHT - 1
_x = x
_y = y
def get_cursor():
global _x, _y
return (_x, _y)
# clear display immediately
def clear(show=True):
set_cursor(0, 0)
display.fill(0)
if show:
display.show()
# move cursor down and scroll the text area by one line if at screen end
def line_feed(show=True):
global _x, _y
if (_y < CHAR_HEIGHT - 1):
_y += 1
else:
display.scroll(0, -8)
if show:
display.show()
_x = 0
# move just to start of line and clear the whole line
def clear_line(show=True):
global _x, _y
_x = 0
# clear line
for y in range(_y * 8, (_y + 1) * 8):
for x in range(0, CHAR_WIDTH * 8):
display.pixel(x, y, False)
if show:
display.show()
# print some text in the text area and linebreak and wrap if necessary
def print(text="", newline=False, show=True):
global _x
linefeed_last = text.endswith("\n")
if linefeed_last:
text = text[:-1]
l_first = True
for l in text.split("\n"):
if not l_first: # scroll if it's not the first line
line_feed(show=False)
l_first = False
while len(l) > 0:
sub = l[0:CHAR_WIDTH - _x]
display.text(sub, _x * 8, _y * 8)
_x += len(sub)
if _x >= CHAR_WIDTH:
line_feed(show=False)
l = l[len(sub):]
if linefeed_last:
line_feed(show=False)
if newline:
line_feed(show=False)
if show:
display.show()
def println(text="", show=True):
print(text, newline=True, show=show)
| ulno/micropython-extra-ulno | examples/obsolete/device_types/devkit1/ulno_iot_display.py | Python | mit | 2,413 |
#!/usr/bin/env python
"""cflib.vcf
==============
This module provides functions to read, write and access vcf files.
Objects
-------
Classes:
- :class:`NucBase`, store a nucleotide base
- :class:`VCFStream`, a variant call format (VCF) stream object
- :class:`VCFSeq`, a VCF file sequence object
Exception Classes:
- :class:`NotAVariantCallFormatFileError`
- :class:`NotANucBaseError`
Functions:
- :func:`update_base()`, read a line into a base
- :func:`get_nuc_base_from_line()`, create a new `NucBase` from a line
- :func:`check_fixed_field_header()`, check a VCF fixed field header
string
- :func:`get_indiv_from_field_header()`, extract list of individuals
from header
- :func:`init_seq()`, open VCF file and initialize `VCFStream`
- :func:`open_seq()`, open VCF file and save it to a `VCFSeq`
- :func:`get_header_line_string()`, print vcf header line
----
"""
__docformat__ = 'restructuredtext'
import cflib.seqbase as sb
dna2ind = {'a': 0, 'c': 1, 'g': 2, 't': 3}
ind2dna = ['a', 'c', 'g', 't']
class NotAVariantCallFormatFileError(sb.SequenceDataError):
"""Exception raised if given VCF file is not valid."""
pass
class NotANucBaseError(sb.SequenceDataError):
"""Exception raised if given nucleotide base is not valid."""
pass
hdList = ['#CHROM', 'POS', 'ID', 'REF', 'ALT',
'QUAL', 'FILTER', 'INFO', 'FORMAT']
def update_base(ln, base, info=True):
"""Read line *ln* into base *base*.
Split a given VCF file line and returns a :class:`NucBase`
object. If *info* is set to False, only #CHROM, REF, ALT and
speciesData will be read.
"""
lnList = ln.split('\t', maxsplit=9)
if len(lnList) >= 10:
base.chrom = lnList[0]
base.pos = int(lnList[1])
base.ref = lnList[3]
base.alt = lnList[4]
base.speciesData = lnList[9].rstrip().split('\t')
# base.speciesData = [s.split(':', maxsplit=1)[0]
# for s in lnList[9].rstrip().split('\t')]
if info is True:
base.id = lnList[2]
base.qual = lnList[5]
base.filter = lnList[6]
base.info = lnList[7]
base.format = lnList[8]
else:
raise NotANucBaseError('Line ' + ln + ' is not a NucBase.')
return base
def get_nuc_base_from_line(ln, info=False, ploidy=None):
"""Retrieve base data from a VCF file line *ln*.
Split a given VCF file line and returns a NucBase object. If
*info* is set to False, only #CHROM, POS, REF, ALT and speciesData will
be read.
:param Bool info: Determines if info is retrieved from *ln*.
:param int ploidy: If ploidy is known and given, it is set.
"""
base = NucBase()
update_base(ln, base, info)
if ploidy is not None:
base.ploidy = ploidy
return base
class NucBase():
"""Stores a nucleotide base.
FIXME: Bases are split by '/'. They should also be split by '|'.
A class that stores a single nucleotide base and related
information retrieved from a VCF file. Please see
http://www.1000genomes.org/ for a detailed description of the vcf
format.
:ivar str chrom: Chromosome name.
:ivar int pos: 1-based position on the chromosome.
:ivar str id: ID.
:ivar str ref: Reference base.
:ivar str alt: Alternative base(s).
:ivar str qual: Quality.
:ivar str filter: Filter.
:ivar str info: Additional information.
:ivar str format: String with format specification.
:ivar [str] speciesData: List with strings of the species data
(e.g. 0/1:...).
:ivar int ploidy: Ploidy (number of sets of chromosomes) of the
sequenced individuals. Can be set with
:func:`set_ploidy`.
"""
def __init__(self):
self.chrom = ''
self.pos = 0
self.id = ''
self.ref = ''
self.alt = ''
self.qual = ''
self.filter = ''
self.info = ''
self.format = ''
self.speciesData = []
self.ploidy = None
def get_info(self):
"""Return nucleotide base information string."""
msg1 = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t' % \
(self.chrom,
self.pos, self.id, self.ref,
self.alt, self.qual, self.filter,
self.info, self.format)
msg2 = '\t'.join(self.speciesData)
return msg1 + msg2
def print_info(self):
"""Print nucleotide base information.
Print the stored single nucleotide base and related
information from the VCF file.
"""
print(self.get_info())
# print(self.chrom, self.pos, self.id, self.ref,
# self.alt, self.qual, self.filter,
# self.info, self.format,
# sep='\t', end='\t')
# print('\t'.join(self.speciesData))
return
def get_ref_base(self):
"""Return reference base.
:rtype: char
"""
return self.ref
def get_alt_base_list(self):
"""Return alternative bases as a list."""
altBases = [b.lower() for b in self.alt.split(',')]
return altBases
def set_ploidy(self):
"""Set self.ploidy."""
baseInfo = self.speciesData[0].split(':')[0]
# FIXME: Also split '|'.
self.ploidy = len(baseInfo.split('/'))
return self.ploidy
def get_speciesData(self):
"""Return species data as a list.
- data[0][0] = data of first species/individual on chromatide A
- data[0][1] = only set for non-haploids; data of first
species/individual on chromatide B
Sets data[i][j] to None if the base of individual *i* on
chromosome *j* could not be read (e.g. it is not valid).
:rtype: matrix of integers
"""
data = []
for i in range(0, len(self.speciesData)):
if self.ploidy == 1:
# Haploid.
baseInfo = self.speciesData[i].split(':')[0]
try:
baseInfo = int(baseInfo)
except ValueError:
# Invalid Base.
baseInfo = None
data.append([baseInfo])
else:
# Diploid or even more
baseInfo = self.speciesData[i].split(':')[0]
# FIXME: Also split '|'.
baseInfoL = baseInfo.split('/')
for j in range(len(baseInfoL)):
try:
baseInfoL[j] = int(baseInfoL[j])
except ValueError:
# Invalid Base.
baseInfoL[j] = None
data.append(baseInfoL)
return data
def get_base_ind(self, iI, iC):
"""Return the base of a specific individual.
:param int indiv: 0-based index of individual.
:param int chrom: 0-based index of chromosome (for n-ploid
individuals).
:rtype: character with nucleotide base.
"""
data = self.get_speciesData()
altBases = self.get_alt_base_list()
if data[iI][iC] is None:
return None
elif data[iI][iC] == 0:
return self.ref
else:
return altBases[data[iI][iC]-1]
def purge(self):
"""Purge the data associated with this :class:`NucBase`."""
self.__init__()
class VCFStream():
"""Store base data from a VCF file line per line.
It can be initialized with :func:`init_seq`. This class stores a
single base retrieved from a VCF file and the file itself. It is
used to parse through a VCF file line by line processing the bases
without having to read the whole file at one.
:param str seqName: Name of the stream.
:param fo vcfFileObject: File object associated with the stream.
:param [str] speciesList: List with species / individuals.
:param NucBase firstBase: First :class:`NucBase` to be saved.
:ivar str name: Name of the stream.
:ivar fo fo: Stored VCF file object.
:ivar [str] speciesL: List with species / individuals.
:ivar int nSpecies: Number of species / individuals.
:ivar NusBase base: Stored :class:`NucBase`.
"""
def __init__(self, seqName, vcfFileObject, speciesList, firstBase):
"""Initialize a :class:`VCFStream` object; add state objects."""
self.name = seqName
self.fo = vcfFileObject
self.speciesL = speciesList
self.nSpecies = len(speciesList)
self.base = firstBase
def print_info(self):
"""Prints VCFStream information."""
print("Name:", self.name)
print("File object:", self.fo)
print("List of species/individuals:", self.speciesL)
print("Number of species/individuals:", self.nSpecies)
print("Saved base:")
self.base.print_info()
def read_next_base(self):
"""Read the next base.
Return position of next base.
Raise a *ValueError* if no next base is found.
"""
line = self.fo.readline()
if line != '':
update_base(line, self.base)
return self.base.pos
else:
raise ValueError("End of VCFStream.")
return None
def close(self):
"""Closes the linked file."""
self.fo.close()
class VCFSeq():
"""Store data retrieved from a VCF file.
Initialized with :func:`open_seq`.
:ivar str name: Sequence name.
:ivar str header: Sequence header.
:ivar [str] speciesL: List with species / individuals.
:ivar int nSpecies: Number of species / individuals.
:ivar [NucBase] baseL: List with stored :class:`NucBase` objects.
:ivar int nBases: Number of :class:`NucBase` objects stored.
"""
def __init__(self):
"""Initialize a :class:`VCFSeq` object; add state objects."""
self.name = ''
self.header = []
self.speciesL = []
self.nSpecies = 0
self.baseL = []
self.nBases = 0
def get_header_line_string(self, indiv):
"""Return a standard VCF File header string with individuals *indiv*.
"""
string = ''
for s in hdList:
string += s + '\t'
for i in indiv:
string += i + '\t'
# we added one tab at the end that we do not need
return string[:-1]
def print_header_line(self, indiv):
"""Print a standard VCF File header with individuals *indiv*."""
print(self.get_header_line_string(indiv))
def print_info(self, maxB=50, printHeader=False):
"""Print VCF sequence information.
Print vcf header, the total number of nucleotides and a
maximum of *maxB* bases (defaults to 50). Only prints header
if *printHeader* = True is given.
"""
if printHeader is True:
print(self.header)
self.print_header_line(self.speciesL)
if self.nBases < maxB:
maxB = self.nBases
for i in range(0, maxB):
self.baseL[i].print_info()
return
def append_nuc_base(self, base):
"""Append *base*, a given :class:`NucBase`, to the VCFSeq object."""
self.baseL.append(base)
self.nBases += 1
return
def has_base(self, chrom, pos):
"""Return True (False) if base is (not) found.
:param str chrom: Chromosome name.
:param int pos: 1-based position on *chrom*.
"""
for i in range(0, self.nBases):
if pos == self.baseL[i].pos \
and chrom == self.baseL[i].chrom:
return True
return False
def get_nuc_base(self, chrom, pos):
"""Return base at position *pos* of chromosome *chrom*."""
for i in range(0, self.nBases):
if pos == self.baseL[i].pos \
and chrom == self.baseL[i].chrom:
return self.baseL[i]
raise sb.SequenceDataError('Base at position ' + str(pos) +
' on chromosome ' + str(chrom) +
' not found.')
def check_fixed_field_header(ln):
"""Check if the given line *ln* is the header of the fixed fields.
Sample header line::
#CHROM\t POS\t ID\t REF\t ALT\t QUAL\t FILTER\t INFO\t FORMAT\t SpeciesL
"""
lnList = ln.split('\t', maxsplit=9)
if lnList[0:9] != hdList:
raise NotAVariantCallFormatFileError('Header line is invalid.')
return
def get_indiv_from_field_header(ln):
"""Return species from a fixed field header line *ln*.
Sample header line::
#CHROM\t POS\t ID\t REF\t ALT\t QUAL\t FILTER\t INFO\t FORMAT\t SpeciesL
"""
speciesL = []
lnList = ln.split(maxsplit=9)
# lnList = ln.split('\t', maxsplit=9)
if len(lnList) == 10:
# speciesL = lnList[9].rstrip().split('\t')
speciesL = lnList[9].rstrip().split()
else:
raise NotAVariantCallFormatFileError('No species in header line.')
return speciesL
def init_seq(VCFFileName, maxskip=100, name=None):
"""Open a (gzipped) VCF4.1 file.
Try to open the given VCF file, checks if it is in VCF format.
Initialize a :class:`VCFStream` object that contains the first
base.
Please close the associated file object with
:func:`VCFStream.close` when you don't need it anymore.
:param str VCFFileName: Name of the VCF file.
:param int maxskip: Only look *maxskip* lines for the start of the
bases (defaults to 80).
:param str name: Set the name of the sequence to *name*, otherwise
set it to the filename.
"""
flag = False
VCFFile = sb.gz_open(VCFFileName)
# Set the vcf sequence name.
if name is None:
name = sb.stripFName(VCFFileName)
# Find the start of the first base.
for i in range(0, maxskip):
line = VCFFile.readline()
if line == '':
raise NotAVariantCallFormatFileError("File contains no data.")
if line[0:6] == '#CHROM':
# Here starts the data.
check_fixed_field_header(line)
speciesL = get_indiv_from_field_header(line)
flag = True
break
if flag is False:
raise NotAVariantCallFormatFileError(
"Didn't find any data within " + str(maxskip) + " lines.")
line = VCFFile.readline()
base = get_nuc_base_from_line(line, info=False)
base.set_ploidy()
return VCFStream(name, VCFFile, speciesL, base)
def open_seq(VCFFileName, maxskip=100, name=None):
"""Open a VCF4.1 file.
Try to open the given VCF file, checks if it is in VCF format and
reads the bases(s). It returns an :class:`VCFSeq` object that
contains all the information.
:param str VCFFileName: Name of the VCF file.
:param int maxskip: Only look *maxskip* lines for the start of the
bases (defaults to 80).
:param str name: Set the name of the sequence to *name*, otherwise
set it to the filename.
"""
def test_sequence(seq):
""" Test a given VCF sequence.
TODO: implement this.
:param seq:
:returns:
:rtype:
"""
pass
seq = VCFSeq()
seq.header = ""
flag = False
VCFFile = sb.gz_open(VCFFileName)
# set the vcf sequence name
if name is not None:
seq.name = name
else:
seq.name = sb.stripFName(VCFFileName)
# Find the start of the first base
for i in range(0, maxskip):
line = VCFFile.readline()
if line == '':
raise NotAVariantCallFormatFileError("File contains no data.")
if line[0:2] == '##':
seq.header += line
if line[0:6] == '#CHROM':
# Here starts the data.
check_fixed_field_header(line)
seq.speciesL = get_indiv_from_field_header(line)
seq.nSpecies = len(seq.speciesL)
flag = True
break
if flag is False:
raise NotAVariantCallFormatFileError(
"Didn't find any data within " + str(maxskip) + " lines.")
for line in VCFFile:
base = get_nuc_base_from_line(line)
seq.append_nuc_base(base)
VCFFile.close()
test_sequence(seq)
return seq
def get_header_line_string(indiv):
"""Return a standard VCF File header string with individuals *indiv*.
"""
string = ''
for s in hdList:
string += s + '\t'
for i in indiv:
string += i + '\t'
# we added one tab at the end that we do not need
return string[:-1]
| pomo-dev/cflib | cflib/vcf.py | Python | mit | 16,699 |
import unittest
from .... import examples as pysal_examples
from ..pyDbfIO import DBF
import tempfile
import os
class test_DBF(unittest.TestCase):
def setUp(self):
self.test_file = test_file = pysal_examples.get_path('10740.dbf')
self.dbObj = DBF(test_file, 'r')
def test_len(self):
self.assertEqual(len(self.dbObj), 195)
def test_tell(self):
self.assertEqual(self.dbObj.tell(), 0)
self.dbObj.read(1)
self.assertEqual(self.dbObj.tell(), 1)
self.dbObj.read(50)
self.assertEqual(self.dbObj.tell(), 51)
self.dbObj.read()
self.assertEqual(self.dbObj.tell(), 195)
def test_cast(self):
self.assertEqual(self.dbObj._spec, [])
self.dbObj.cast('FIPSSTCO', float)
self.assertEqual(self.dbObj._spec[1], float)
def test_seek(self):
self.dbObj.seek(0)
self.assertEqual(self.dbObj.tell(), 0)
self.dbObj.seek(55)
self.assertEqual(self.dbObj.tell(), 55)
self.dbObj.read(1)
self.assertEqual(self.dbObj.tell(), 56)
def test_read(self):
self.dbObj.seek(0)
objs = self.dbObj.read()
self.assertEqual(len(objs), 195)
self.dbObj.seek(0)
objsB = list(self.dbObj)
self.assertEqual(len(objsB), 195)
for rowA, rowB in zip(objs, objsB):
self.assertEqual(rowA, rowB)
def test_random_access(self):
self.dbObj.seek(0)
db0 = self.dbObj.read(1)[0]
self.assertEqual(db0, [1, '35001', '000107', '35001000107', '1.07'])
self.dbObj.seek(57)
db57 = self.dbObj.read(1)[0]
self.assertEqual(db57, [58, '35001', '001900', '35001001900', '19'])
self.dbObj.seek(32)
db32 = self.dbObj.read(1)[0]
self.assertEqual(db32, [33, '35001', '000500', '35001000500', '5'])
self.dbObj.seek(0)
self.assertEqual(next(self.dbObj), db0)
self.dbObj.seek(57)
self.assertEqual(next(self.dbObj), db57)
self.dbObj.seek(32)
self.assertEqual(next(self.dbObj), db32)
def test_write(self):
f = tempfile.NamedTemporaryFile(suffix='.dbf')
fname = f.name
f.close()
self.dbfcopy = fname
self.out = DBF(fname, 'w')
self.dbObj.seek(0)
self.out.header = self.dbObj.header
self.out.field_spec = self.dbObj.field_spec
for row in self.dbObj:
self.out.write(row)
self.out.close()
orig = open(self.test_file, 'rb')
copy = open(self.dbfcopy, 'rb')
orig.seek(32) # self.dbObj.header_size) #skip the header, file date has changed
copy.seek(32) # self.dbObj.header_size) #skip the header, file date has changed
#PySAL writes proper DBF files with a terminator at the end, not everyone does.
n = self.dbObj.record_size * self.dbObj.n_records # bytes to read.
self.assertEqual(orig.read(n), copy.read(n))
#self.assertEquals(orig.read(1), copy.read(1)) # last byte may fail
orig.close()
copy.close()
os.remove(self.dbfcopy)
def test_writeNones(self):
import datetime
import time
f = tempfile.NamedTemporaryFile(
suffix='.dbf')
fname = f.name
f.close()
db = DBF(fname, 'w')
db.header = ["recID", "date", "strID", "aFloat"]
db.field_spec = [('N', 10, 0), ('D', 8, 0), ('C', 10, 0), ('N', 5, 5)]
records = []
for i in range(10):
d = datetime.date(*time.localtime()[:3])
rec = [i + 1, d, str(i + 1), (i + 1) / 2.0]
records.append(rec)
records.append([None, None, '', None])
records.append(rec)
for rec in records:
db.write(rec)
db.close()
db2 = DBF(fname, 'r')
self.assertEqual(records, db2.read())
os.remove(fname)
if __name__ == '__main__':
unittest.main()
| sjsrey/pysal_core | pysal_core/io/IOHandlers/tests/test_pyDbfIO.py | Python | bsd-3-clause | 3,946 |
import _plotly_utils.basevalidators
class ValueValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="value",
parent_name="treemap.marker.colorbar.tickformatstop",
**kwargs
):
super(ValueValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs
)
| plotly/plotly.py | packages/python/plotly/plotly/validators/treemap/marker/colorbar/tickformatstop/_value.py | Python | mit | 464 |
# Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from class_ext import *
Ensure sanity:
>>> x = X(42)
>>> x_function(x)
42
Demonstrate extraction in the presence of metaclass changes:
>>> class MetaX(X.__class__):
... def __new__(cls, *args):
... return super(MetaX, cls).__new__(cls, *args)
>>> class XPlusMetatype(X):
... __metaclass__ = MetaX
>>> x = XPlusMetatype(42)
>>> x_function(x)
42
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| NixaSoftware/CVis | venv/bin/libs/python/test/class.py | Python | apache-2.0 | 881 |
"""
ASGI config for shopify_django_app project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shopify_django_app.settings')
application = get_asgi_application()
| Shopify/shopify_django_app | shopify_django_app/asgi.py | Python | mit | 413 |
from __future__ import unicode_literals
import time
from datetime import datetime, timedelta
import json
import requests
import calendar
import difflib
import hashlib
from random import uniform
from operator import itemgetter, methodcaller
from itertools import izip
from pokemongo_bot import inventory
from pokemongo_bot.item_list import Item
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.inventory import Pokemons
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.event_handlers.telegram_handler import TelegramSnipe
from pokemongo_bot.cell_workers.pokemon_catch_worker import PokemonCatchWorker
from pokemongo_bot.cell_workers.utils import wait_time_sec, distance, convert
# Represents a URL source and its mappings
class SniperSource(object):
def __init__(self, data):
self.url = data.get('url', '')
self.key = data.get('key', '')
self.enabled = data.get('enabled', False)
self.time_mask = data.get('time_mask', '%Y-%m-%d %H:%M:%S')
self.mappings = SniperSourceMapping(data.get('mappings', {}))
self.timeout = data.get('timeout', 5)
def __str__(self):
return self.url
def fetch_raw(self):
some_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) Chrome/52.0.2743.116 Safari/537.36'
response = requests.get(self.url, headers={'User-Agent': some_agent}, timeout=self.timeout)
results = response.json()
# If the results is a dict, retrieve the list from it by the given key. This will return a list afterall.
if isinstance(results, dict):
results = results.get(self.key, [])
# If results is STILL a dict (eg. each pokemon is its own dict), need to build data from nested json (example whereispokemon.net)
while isinstance(results,dict):
tmpResults = []
for key, value in results.iteritems():
tmpResults.append(value)
results = tmpResults
return results
def fetch(self):
pokemons = []
try:
results = self.fetch_raw()
# Parse results
for result in results:
iv = result.get(self.mappings.iv.param)
id = result.get(self.mappings.id.param)
name = self._get_closest_name(self._fixname(result.get(self.mappings.name.param)))
latitude = result.get(self.mappings.latitude.param)
longitude = result.get(self.mappings.longitude.param)
expiration = result.get(self.mappings.expiration.param)
encounter = result.get(self.mappings.encounter.param)
spawnpoint = result.get(self.mappings.spawnpoint.param)
# If this is a composite param, split it ("coords": "-31.415553, -64.190480")
if self.mappings.latitude.param == self.mappings.longitude.param:
position = result.get(self.mappings.latitude.param).replace(" ", "").split(",")
latitude = position[0]
longitude = position[1]
# Some sources block access to all pokemon, need to skip those!
try:
float(latitude)
float(longitude)
except ValueError:
# Seems to be blacked out, do next.
continue
# Format the time accordingly. Pokemon times are in milliseconds!
if self.mappings.expiration.exists and expiration:
if self.mappings.expiration.format == SniperSourceMappingTimeFormat.SECONDS:
expiration = expiration * 1000
elif self.mappings.expiration.format == SniperSourceMappingTimeFormat.UTC:
utc_date = datetime.strptime(expiration.replace("T", " ")[:19], self.time_mask)
unix_timestamp = calendar.timegm(utc_date.timetuple())
local_date = datetime.fromtimestamp(unix_timestamp)
local_date = local_date.replace(microsecond=utc_date.microsecond)
expiration = time.mktime(local_date.timetuple()) * 1000
else:
minutes_to_expire = 3
seconds_per_minute = 60
expiration = (time.time() + minutes_to_expire * seconds_per_minute) * 1000
# If either name or ID are invalid, fix it using each other
if not name or not id:
if not name and id:
name = Pokemons.name_for(id)
if not id and name:
id = Pokemons.id_for(name)
# Some type castings were specified for a better readability
pokemons.append({
'iv': float(iv or 0),
'pokemon_id': int(id or 0),
'pokemon_name': str(name or ''),
'latitude': float(latitude or .0),
'longitude': float(longitude or .0),
'expiration_timestamp_ms': long(expiration or 0),
'last_modified_timestamp_ms': long(expiration or 0),
'encounter_id': long(encounter or 0),
'spawn_point_id': str(spawnpoint or '')
})
except requests.exceptions.Timeout:
raise Exception("Fetching has timed out")
except requests.exceptions.ConnectionError:
raise Exception("Source not available")
except:
raise
return pokemons
def validate(self):
try:
if self.enabled:
errors = []
data = self.fetch_raw()
# Check whether the params really exist if they have been specified like so
if data:
if self.mappings.iv.exists and self.mappings.iv.param not in data[0]:
errors.append(self.mappings.iv.param)
if self.mappings.id.exists and self.mappings.id.param not in data[0]:
errors.append(self.mappings.id.param)
if self.mappings.name.exists and self.mappings.name.param not in data[0]:
errors.append(self.mappings.name.param)
if self.mappings.latitude.exists and self.mappings.latitude.param not in data[0]:
errors.append(self.mappings.latitude.param)
if self.mappings.longitude.exists and self.mappings.longitude.param not in data[0]:
errors.append(self.mappings.longitude.param)
if self.mappings.expiration.exists and self.mappings.expiration.param not in data[0]:
errors.append(self.mappings.expiration.param)
if self.mappings.encounter.exists and self.mappings.encounter.param not in data[0]:
errors.append(self.mappings.encounter.param)
if self.mappings.spawnpoint.exists and self.mappings.spawnpoint.param not in data[0]:
errors.append(self.mappings.spawnpoint.param)
# All wrong mappings were gathered at once for a better usability (instead of raising multiple exceptions)
if errors:
raise LookupError("The following params dont exist: {}".format(", ".join(errors)))
else:
raise ValueError("Source is not enabled")
except requests.exceptions.Timeout:
raise ValueError("Fetching has timed out")
except requests.exceptions.ConnectionError:
raise ValueError("Source not available")
except:
raise
def _fixname(self,name):
if name:
name = name.replace("mr-mime","mr. mime")
name = name.replace("farfetchd","farfetch'd")
name = name.replace("Nidoran\u2642","nidoran m")
name = name.replace("Nidoran\u2640","nidoran f")
return name
def _get_closest_name(self, name):
if not name:
return
pokemon_names = [p.name for p in inventory.pokemons().STATIC_DATA]
closest_names = difflib.get_close_matches(name, pokemon_names, 1)
if closest_names:
closest_name = closest_names[0]
return closest_name
return name
# Represents the JSON params mappings
class SniperSourceMapping(object):
def __init__(self, mapping):
self.iv = SniperSourceMappingValues(False, mapping.get('iv', {}))
self.id = SniperSourceMappingValues(True, mapping.get('id', {}))
self.name = SniperSourceMappingValues(True, mapping.get('name', {}))
self.latitude = SniperSourceMappingValues(True, mapping.get('latitude', {}))
self.longitude = SniperSourceMappingValues(True, mapping.get('longitude', {}))
self.expiration = SniperSourceMappingValues(False, mapping.get('expiration', {}))
self.encounter = SniperSourceMappingValues(False, mapping.get('encounter', {}))
self.spawnpoint = SniperSourceMappingValues(False, mapping.get('spawnpoint', {}))
# Represents the JSON params mappings values
class SniperSourceMappingValues(object):
def __init__(self, required, values):
self.required = required
self.param = values.get('param', '')
self.format = values.get('format', SniperSourceMappingTimeFormat.DEFAULT)
self.exists = values != {} and values.get('param') != None
# Validate formats
if self.format not in vars(SniperSourceMappingTimeFormat).values():
raise ValueError('Unrecognized format: {}'.format(self.format))
# Represents the JSON time param formatting type
class SniperSourceMappingTimeFormat(object):
NONE = ''
UTC = 'utc'
SECONDS = 'seconds'
MILLISECONDS = 'milliseconds'
DEFAULT = NONE
# Represents the information ordering types
class SniperOrderMode(object):
IV = 'iv'
VIP = 'vip'
MISSING = 'missing'
PRIORITY = 'priority'
EXPIRATION = 'expiration_timestamp_ms'
DEFAULT = [MISSING, VIP, PRIORITY]
# Represents the snipping type
class SniperMode(object):
URL = 'url'
SOCIAL = 'social'
TELEGRAM = 'telegram'
DEFAULT = SOCIAL
# Teleports the player to a target gotten from either social or a single/multiple URL sources
class Sniper(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
MIN_SECONDS_ALLOWED_FOR_CELL_CHECK = 60
MIN_SECONDS_ALLOWED_FOR_REQUESTING_DATA = 10
MIN_BALLS_FOR_CATCHING = 10
MAX_CACHE_LIST_SIZE = 300
def __init__(self, bot, config):
super(Sniper, self).__init__(bot, config)
def initialize(self):
self.disabled = False
self.last_cell_check_time = time.time()
self.last_data_request_time = time.time()
self.inventory = inventory.items()
self.pokedex = inventory.pokedex()
self.debug = self.config.get('debug', False)
self.special_iv = self.config.get('special_iv', 0)
self.bullets = self.config.get('bullets', 1)
self.homing_shots = self.config.get('homing_shots', True)
self.mode = self.config.get('mode', SniperMode.DEFAULT)
self.order = self.config.get('order', SniperOrderMode.DEFAULT)
self.cooldown_enabled = self.config.get('cooldown_enabled', False)
self.loiter_after_snipe = self.config.get('loiter_after_snipe', False)
self.catch_list = self.config.get('catch', {})
self.altitude = uniform(self.bot.config.alt_min, self.bot.config.alt_max)
self.sources = [SniperSource(data) for data in self.config.get('sources', [])]
self.no_snipe_until = None
self.teleport_back_to_last_location = self.config.get('teleport_back_to_last_location', False)
if not hasattr(self.bot,"sniper_cache"):
self.bot.sniper_cache = []
# Dont bother validating config if task is not even enabled
if self.enabled:
# Validate ordering
for ordering in self.order:
if ordering not in vars(SniperOrderMode).values():
raise ValueError("Unrecognized ordering: '{}'".format(ordering))
# Validate mode and sources
if self.mode not in vars(SniperMode).values():
raise ValueError("Unrecognized mode: '{}'".format(self.mode))
else:
# Selected mode is valid. Validate sources if mode is URL
if self.mode == SniperMode.URL or self.mode == SniperMode.SOCIAL:
self._log("NOTE: Sniper only works if your bot location is in same city as your source")
self._log("Validating sources: {}...".format(", ".join([source.url for source in self.sources])))
# Create a copy of the list so we can iterate and remove elements at the same time
for source in list(self.sources):
try:
source.validate()
self._log("Source '{}' is good!".format(source.url))
# TODO: On ValueError, remember source and validate later (pending validation)
except (LookupError, ValueError) as exception:
self._error("Source '{}' contains errors. Details: {}. Removing from sources list...".format(source.url, exception))
self.sources.remove(source)
# Notify user if all sources are invalid and cant proceed
if not self.sources :
self._error("There is no source available. Disabling Sniper...")
self.disabled = True
# Re-enable snipping if source is from telegram
if self.mode == SniperMode.TELEGRAM:
self.disabled = False
def is_snipeable(self, pokemon):
pokeballs_count = self.inventory.get(Item.ITEM_POKE_BALL.value).count
greatballs_count = self.inventory.get(Item.ITEM_GREAT_BALL.value).count
ultraballs_count = self.inventory.get(Item.ITEM_ULTRA_BALL.value).count
all_balls_count = pokeballs_count + greatballs_count + ultraballs_count
#Skip if expired (cast milliseconds to seconds for comparision), snipe check if source is from telegram
if self.mode != SniperMode.TELEGRAM:
if (pokemon.get('expiration_timestamp_ms', 0) or pokemon.get('last_modified_timestamp_ms', 0)) / 1000 < time.time():
self._trace('{} is expired! Skipping...'.format(pokemon.get('pokemon_name')))
return False
# Skip if not enought balls. Sniping wastes a lot of balls. Theres no point to let user decide this amount
if all_balls_count < self.MIN_BALLS_FOR_CATCHING:
self._trace('Not enought balls left! Skipping...')
return False
# Skip if not in catch list, not a VIP and/or IV sucks (if any), ignore telegram mode
if pokemon.get('pokemon_name', '') in self.catch_list or self.mode == SniperMode.TELEGRAM:
self._trace('{} is catchable!'.format(pokemon.get('pokemon_name')))
else:
# Not catchable. Having a good IV should suppress the not in catch/vip list (most important)
if pokemon.get('iv', 0) and pokemon.get('iv', 0) >= self.special_iv:
self._trace('{} is not catchable, but has a decent IV ({})!'.format(pokemon.get('pokemon_name'), pokemon.get('iv', 0)))
else:
# Not catchable and IV is not good enough (if any). Check VIP list
if pokemon.get('vip', False):
self._trace('{} is not catchable and bad IV (if any), however its a VIP!'.format(pokemon.get('pokemon_name')))
else:
if pokemon.get('missing', False):
self._trace('{} is not catchable, not VIP and bad IV (if any), however its a missing one.'.format(pokemon.get('pokemon_name')))
else:
self._trace('{} is not catchable, nor a VIP or a missing one and bad IV (if any). Skipping...'.format(pokemon.get('pokemon_name')))
return False
return True
# Snipe a target. This function admits that if a target really exists, it will be 'caught'.
def snipe(self, pokemon):
success = False
# Apply snipping business rules and snipe if its good
if not self.is_snipeable(pokemon) and not self.mode == SniperMode.TELEGRAM:
self._trace('{} is not snipeable! Skipping...'.format(pokemon['pokemon_name']))
else:
# Have we already tried this pokemon?
if not hasattr(self.bot,'sniper_unique_pokemon'):
self.bot.sniper_unique_pokemon = []
# Check if already in list of pokemon we've tried
uniqueid = self._build_unique_id(pokemon)
if self._is_cached(uniqueid):
# Do nothing. Either we already got this, or it doesn't really exist
self._trace('{} was already handled! Skipping...'.format(pokemon['pokemon_name']))
else:
# Backup position before anything
last_position = self.bot.position[0:2]
teleport_position = [pokemon['latitude'], pokemon['longitude']]
#teleport_distance = self._get_distance(last_position, teleport_position)
teleport_distance = convert(distance(last_position[0],last_position[1],float(pokemon['latitude']),float(pokemon['longitude'])),"m","km")
#sleep_time = self._get_sleep_sec(teleport_distance)
sleep_time = wait_time_sec(teleport_distance)
if sleep_time > 900:
success = False
exists = False
self._log('Sniping distance is more than supported distance, abort sniping')
else:
self._log('Base on distance, pausing for {0:.2f} Mins'.format(sleep_time/60))
# Teleport, so that we can see nearby stuff
self.bot.hb_locked = True
time.sleep(sleep_time)
self._teleport_to(pokemon)
# If social is enabled and if no verification is needed, trust it. Otherwise, update IDs!
verify = not pokemon.get('encounter_id') or not pokemon.get('spawn_point_id')
exists = not verify or self.mode == SniperMode.SOCIAL
success = exists
# Always verify if it's from telegram
if TelegramSnipe.ENABLED == True:
verify = True
# If information verification have to be done, do so
if verify:
seconds_since_last_check = time.time() - self.last_cell_check_time
# Wait a maximum of MIN_SECONDS_ALLOWED_FOR_CELL_CHECK seconds before requesting nearby cells
self._log('Pausing for {} secs before checking for Pokemons'.format(self.MIN_SECONDS_ALLOWED_FOR_CELL_CHECK))
#recode it to check every 5 secs, first check for wild then catchable
nearby_pokemons = []
nearby_stuff = []
num = 0
for num in range(0,self.MIN_SECONDS_ALLOWED_FOR_CELL_CHECK):
if num%5 == 0:
nearby_stuff = self.bot.get_meta_cell()
self.last_cell_check_time = time.time()
# Retrieve nearby pokemons for validation
nearby_pokemons.extend(nearby_stuff.get('wild_pokemons', []))
if nearby_pokemons:
break
time.sleep(1)
num += 1
num = 0
for num in range(0,self.MIN_SECONDS_ALLOWED_FOR_CELL_CHECK):
if num%5 == 0:
nearby_stuff = self.bot.get_meta_cell()
self.last_cell_check_time = time.time()
# Retrieve nearby pokemons for validation
nearby_pokemons.extend(nearby_stuff.get('catchable_pokemons', []))
if nearby_pokemons:
break
time.sleep(1)
num += 1
self._trace('Pokemon Nearby: {}'.format(nearby_pokemons))
# Make sure the target really/still exists (nearby_pokemon key names are game-bound!)
for nearby_pokemon in nearby_pokemons:
nearby_pokemon_id = nearby_pokemon.get('pokemon_data', {}).get('pokemon_id') or nearby_pokemon.get('pokemon_id')
# If we found the target, it exists and will very likely be encountered/caught (success)
if nearby_pokemon_id == pokemon.get('pokemon_id', 0):
exists = True
success = True
# Also, if the IDs arent valid, override them (nearby_pokemon key names are game-bound!) with game values
if not pokemon.get('encounter_id') or not pokemon.get('spawn_point_id'):
pokemon['encounter_id'] = nearby_pokemon['encounter_id']
pokemon['spawn_point_id'] = nearby_pokemon['spawn_point_id']
break
# If target exists, catch it, otherwise ignore
if exists:
self._log('Yay! There really is a wild {} nearby!'.format(pokemon.get('pokemon_name')))
self._catch(pokemon)
if self.teleport_back_to_last_location:
self._log('You have set to return to previous location, pause for {} sec before returning'.format(sleep_time))
time.sleep(sleep_time)
self._teleport_back(last_position)
#self._teleport_back_and_catch(last_position, pokemon)
else:
self._error('Damn! Its not here. Reasons: too far, caught, expired or fake data. Skipping...')
if self.teleport_back_to_last_location:
self._log('You have set to return to previous location, pause for {} sec before returning'.format(sleep_time))
time.sleep(sleep_time)
self._teleport_back(last_position)
else:
self._log('Bot will now continue from new position')
#self._teleport_back(last_position)
#Set always to false to re-enable sniper to check for telegram data
TelegramSnipe.ENABLED = False
# Save target and unlock heartbeat calls
self._cache(uniqueid)
self.bot.hb_locked = False
return success
def work(self):
#Check if telegram is called
if self.no_snipe_until != None and self.no_snipe_until > time.time():
# No hunting now, cooling down
return WorkerResult.SUCCESS
else:
# Resume hunting
self.no_hunt_until = None
if self.bot.softban:
if not hasattr(self.bot, "sniper_softban_global_warning") or \
(hasattr(self.bot, "sniper_softban_global_warning") and not self.bot.sniper_softban_global_warning):
self.logger.info("Possible softban! Not sniping any targets.")
self.bot.sniper_softban_global_warning = True
return WorkerResult.SUCCESS
else:
self.bot.softban_global_warning = False
sniped = False
# Do nothing if this task was invalidated
if self.disabled:
self._error("Sniper was disabled for some reason. Scroll up to find out.")
elif self.bot.catch_disabled:
if not hasattr(self.bot,"sniper_disabled_global_warning") or \
(hasattr(self.bot,"sniper_disabled_global_warning") and not self.bot.sniper_disabled_global_warning):
self._log("All catching tasks are currently disabled until {}. Sniper will resume when catching tasks are re-enabled".format(self.bot.catch_resume_at.strftime("%H:%M:%S")))
self.bot.sniper_disabled_global_warning = True
return WorkerResult.SUCCESS
else:
self.bot.sniper_disabled_global_warning = False
targets = []
sniped = False
# Retrieve the targets
if self.mode == SniperMode.SOCIAL:
targets = self._get_pokemons_from_social()
elif self.mode == SniperMode.URL:
targets = self._get_pokemons_from_url()
elif self.mode == SniperMode.TELEGRAM and TelegramSnipe.ENABLED:
targets = self._get_pokemons_from_telegram()
if targets:
# Order the targets (descending)
targets = sorted(targets, key=itemgetter(*self.order), reverse=True)
#List Pokemons found
self._trace('Sniping the {} best Pokemons found, ordered by {}'.format(self.bullets, self.order))
self._trace('+----+------+----------------+-------+----------+---------+---------+----------+')
self._trace('| # | Id | Name | IV | Verified | VIP | Missing | Priority |')
self._trace('+----+------+----------------+-------+----------+---------+---------+----------+')
row_format ="|{:>3} |{:>5} | {:<15}|{:>6} | {:<9}| {:<8}| {:<8}|{:>9} |"
for index, target in enumerate(targets):
self._trace(row_format.format(*[index+1, target.get('pokemon_id'), target.get('pokemon_name'), target.get('iv'), str(target.get('verified')), str(target.get('vip')), str(target.get('missing')), target.get('priority')]))
self._trace('+----+------+----------------+-------+----------+---------+---------+----------+')
shots = 0
# For as long as there are targets available, try to snipe untill we run out of bullets
for index, target in enumerate(targets):
sniped = True
if shots < self.bullets:
success = self.snipe(target)
shots += 1
# Homing shots are supposed to hit the target (capture). Rollback
if self.homing_shots and not success:
shots -= 1
# Wait a bit if were going to snipe again (bullets and targets left)
if shots < self.bullets and index < len(targets):
self._trace('Waiting a few seconds to teleport again to another target...')
time.sleep(3)
# Always set telegram back to false
TelegramSnipe.ENABLED = False
if sniped:
if self.loiter_after_snipe:
loiter = int(uniform(20, 40))
self._log("Loitering for {} seconds after sniping to allow Niantic flags to drop off...".format(loiter))
time.sleep(loiter)
if self.cooldown_enabled:
wait = uniform(60, 360)
self.no_snipe_until = time.time() + wait
self._log("Snipe on cooldown until {}.".format((datetime.now() + timedelta(seconds=wait)).strftime("%H:%M:%S")))
return WorkerResult.SUCCESS
def _parse_pokemons(self, pokemon_dictionary_list):
result = []
# Build up the pokemon. Pops are used to destroy random attribute names and keep the known ones!
for pokemon in pokemon_dictionary_list:
pokemon['iv'] = pokemon.get('iv', 100)
pokemon['pokemon_name'] = pokemon.get('pokemon_name', Pokemons.name_for(pokemon.get('pokemon_id')))
pokemon['vip'] = pokemon.get('pokemon_name') in self.bot.config.vips
pokemon['missing'] = not self.pokedex.captured(pokemon.get('pokemon_id'))
pokemon['priority'] = self.catch_list.get(pokemon.get('pokemon_name'), 0)
# Check whether this is a valid target
if self.is_snipeable(pokemon):
result.append(pokemon)
return result
def _get_pokemons_from_telegram(self):
if not TelegramSnipe.ENABLED:
return {}
pokemons = []
pokemon = {'iv': int(0), 'pokemon_id': int(TelegramSnipe.ID), 'pokemon_name': str(TelegramSnipe.POKEMON_NAME), 'latitude': float(TelegramSnipe.LATITUDE), 'longitude': float(TelegramSnipe.LONGITUDE)}
self._log('Telegram snipe request: {}'.format(pokemon.get('pokemon_name')))
pokemons = [pokemon]
return self._parse_pokemons(pokemons)
def _get_pokemons_from_social(self):
if not hasattr(self.bot, 'mqtt_pokemon_list') or not self.bot.mqtt_pokemon_list:
return []
self._trace('Social has returned {} pokemon(s)'.format(len(self.bot.mqtt_pokemon_list)))
return self._parse_pokemons(self.bot.mqtt_pokemon_list)
def _get_pokemons_from_url(self):
results_hash_map = {}
seconds_since_last_valid_request = time.time() - self.last_data_request_time
# If something is requesting this info too fast, skip it (we might crash their servers)
if (seconds_since_last_valid_request > self.MIN_SECONDS_ALLOWED_FOR_REQUESTING_DATA):
self.last_data_request_time = time.time()
self._trace("Fetching pokemons from the sources...")
for source in self.sources:
try:
if source.enabled:
source_pokemons = source.fetch()
self._trace("Source '{}' returned {} results".format(source.url, len(source_pokemons)))
# Merge lists, making sure to exclude repeated data. Use location as the hash key
for source_pokemon in source_pokemons:
hash_key = self._hash(source_pokemon)
# Add if new
if not results_hash_map.has_key(hash_key):
results_hash_map[hash_key] = source_pokemon
else:
self._trace("Source '{}' is disabled".format(source.url))
except Exception as exception:
self._error("Could not fetch data from '{}'. Details: {}. Skipping...".format(source.url, exception))
self._trace("After merging, we've got {} results".format(len(results_hash_map.values())))
else:
self._trace("Not ready yet to retrieve data...")
return self._parse_pokemons(results_hash_map.values())
def _hash(self, pokemon):
# Use approximate location instead, because some IDs might be wrong. The first 4 decimal places is enough for this
return "{0:.4f};{1:.4f}".format(pokemon.get('latitude'), pokemon.get('longitude'))
def _equals(self, pokemon_1, pokemon_2):
return self._hash(pokemon_1) == self._hash(pokemon_2)
def _is_cached(self, uniqueid):
if uniqueid in self.bot.sniper_cache:
return True
return False
def _cache(self, uniqueid):
if not self._is_cached(uniqueid):
# Free space if full and store it
if len(self.bot.sniper_cache) >= self.MAX_CACHE_LIST_SIZE:
self.bot.sniper_cache.pop(0)
self.bot.sniper_cache.append(uniqueid)
def _build_unique_id(self, pokemon):
# Build unique id for this pokemon from id, latitude, longitude and expiration
uniqueid = str(pokemon.get('pokemon_id','')) + str(pokemon.get('latitude','')) + str(pokemon.get('longitude','')) + str(pokemon.get('expiration',''))
md5str = hashlib.md5()
md5str.update(uniqueid)
uniqueid = str(md5str.hexdigest())
return uniqueid
def _log(self, message):
self.emit_event('sniper_log', formatted='{message}', data={'message': message})
def _error(self, message):
self.emit_event('sniper_error', formatted='{message}', data={'message': message})
def _trace(self, message):
if self.debug:
self._log(message)
def _teleport(self, latitude, longitude, altitude):
self.bot.api.set_position(latitude, longitude, altitude, True)
time.sleep(3)
def _teleport_to(self, pokemon):
self.emit_event(
'sniper_teleporting',
formatted = 'Teleporting to meet {name} ({latitude}; {longitude})...',
data = { 'name': pokemon['pokemon_name'], 'latitude': pokemon['latitude'], 'longitude': pokemon['longitude'] }
)
self._teleport(pokemon['latitude'], pokemon['longitude'], self.altitude)
def _teleport_back(self, position_array):
self.emit_event(
'sniper_teleporting',
formatted = 'Teleporting back to the old position ({latitude}; {longitude})...',
data = { 'latitude': position_array[0], 'longitude': position_array[1] }
)
self._teleport(position_array[0], position_array[1], self.altitude)
def _catch(self, pokemon):
catch_worker = PokemonCatchWorker(pokemon, self.bot)
api_encounter_response = catch_worker.create_encounter_api_call()
catch_worker.work(api_encounter_response)
def _teleport_back_and_catch(self, position_array, pokemon):
catch_worker = PokemonCatchWorker(pokemon, self.bot)
api_encounter_response = catch_worker.create_encounter_api_call()
self._teleport_back(position_array)
catch_worker.work(api_encounter_response)
| goedzo/PokemonGo-Bot | pokemongo_bot/cell_workers/sniper.py | Python | mit | 34,433 |
#!/usr/bin/python2.7
#coding:utf-8
def main():
"""
TODO main
"""
# print dir(m1)
for key in dir(m1):
print key, getattr(m1, key)
# print dir(m2) # 错误 m2模块对象并没有导入该符号表中
f1 = m1.fun1
f2 = m1.fun2
print f1(8)
print f2(8)
print fun3(10)
print f4(10)
b1.b1_fun()
# b2.b2_fun() # 没有定义(__init__.py)
if __name__ == "__main__" :
import sys
print sys.path
sys.path.append("mod") # 将mod模块加入到搜索路径中
import m1
from m2 import fun3, fun4 as f4
from pack import *
main()
| qrsforever/workspace | python/learn/base/module/l1/main.py | Python | mit | 613 |
import base64
import hashlib
import hmac
import uuid
import json
import mimetypes
from django.conf import settings
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import permission_classes
from rest_framework.permissions import IsAuthenticated
AWS_MAX_SIZE = 15000000000
import boto
from boto.s3.connection import Key, S3Connection
@csrf_exempt
def success_redirect_endpoint(request):
""" This is where the upload will snd a POST request after the
file has been stored in S3.
"""
if request.method == "POST":
bucket_name = request.POST.get('bucket')
key_name = request.POST.get('key')
cloud_front = getattr(settings, 'AWS_CLOUDFRONT_DOMAIN', None)
temp_link = "https://%s.s3.amazonaws.com/%s" % (bucket_name, key_name)
if cloud_front:
temp_link = "https://%s/%s" % (cloud_front, key_name)
content = {
"tempLink": temp_link
}
return make_response(200, json.dumps(content))
else:
return make_response(405)
def handle_POST(request):
""" Handle S3 uploader POST requests here. For files <=5MiB this is a simple
request to sign the policy document. For files >5MiB this is a request
to sign the headers to start a multipart encoded request.
"""
if request.POST.get('success', None):
return make_response(200)
else:
request_payload = json.loads(request.body)
headers = request_payload.get('headers', None)
if headers:
# The presence of the 'headers' property in the request payload
# means this is a request to sign a REST/multipart request
# and NOT a policy document
response_data = sign_headers(headers)
else:
if not is_valid_policy(request_payload):
return make_response(400, {'invalid': True})
response_data = sign_policy_document(request_payload)
response_payload = json.dumps(response_data)
return make_response(200, response_payload)
def handle_DELETE(request):
""" Handle file deletion requests. For this, we use the Amazon Python SDK,
boto.
"""
try:
boto.set_stream_logger('boto')
S3 = S3Connection(aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY)
except ImportError, e:
print("Could not import boto, the Amazon SDK for Python.")
print("Deleting files will not work.")
print("Install boto with")
print("$ pip install boto")
if boto:
bucket_name = request.POST.get('bucket')
key_name = request.POST.get('key')
try:
aws_bucket = S3.get_bucket(bucket_name, validate=False)
aws_key = Key(aws_bucket, key_name)
aws_key.delete()
return make_response(200)
except Exception as err:
print err
return make_response(500)
else:
return make_response(500)
def make_response(status=200, content=None):
""" Construct an HTTP response. Fine Uploader expects 'application/json'.
"""
response = HttpResponse()
response.status_code = status
response['Content-Type'] = "application/json"
response.content = content
return response
def is_valid_policy(policy_document):
""" Verify the policy document has not been tampered with client-side
before sending it off.
"""
# bucket = settings.AWS_STORAGE_BUCKET_NAME
# parsed_max_size = settings.AWS_MAX_SIZE
bucket = ''
parsed_max_size = 0
for condition in policy_document['conditions']:
if isinstance(condition, list) and condition[0] == 'content-length-range':
parsed_max_size = condition[2]
else:
if condition.get('bucket', None):
bucket = condition['bucket']
return bucket == settings.AWS_STORAGE_BUCKET_NAME and int(parsed_max_size) == AWS_MAX_SIZE
def sign_policy_document(policy_document):
""" Sign and return the policy doucument for a simple upload.
http://aws.amazon.com/articles/1434/#signyours3postform
"""
policy = base64.b64encode(json.dumps(policy_document))
signature = base64.b64encode(hmac.new(settings.AWS_SECRET_ACCESS_KEY, policy, hashlib.sha1).digest())
return {
'policy': policy,
'signature': signature
}
def sign_headers(headers):
""" Sign and return the headers for a chunked upload. """
return {
'signature': base64.b64encode(hmac.new(settings.AWS_SECRET_ACCESS_KEY, headers, hashlib.sha1).digest())
}
@permission_classes((IsAuthenticated,))
def sign_s3_upload(request):
object_name = request.GET['objectName']
folder_name = request.GET["folderName"]
object_name = str(uuid.uuid4()) + "-" + object_name
key_name = folder_name + "/" + object_name
content_type = request.GET.get("contentType", mimetypes.guess_type(object_name)[0])
bucket_name = settings.AWS_STORAGE_BUCKET_NAME
import boto3
from botocore.client import Config
# Get the service client with sigv4 configured
s3 = boto3.client('s3', aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY, config=Config(signature_version='s3v4'))
# Generate the URL to get 'key-name' from 'bucket-name'
signed_url = s3.generate_presigned_url(
ClientMethod='put_object',
Params={
'Bucket': bucket_name,
'Key': key_name,
'ACL': 'public-read',
'ContentType': content_type
}
)
cloud_front = getattr(settings, 'AWS_CLOUDFRONT_DOMAIN', None)
cloud_front_url = "https://%s.s3.amazonaws.com/%s" % (bucket_name, key_name)
if cloud_front:
cloud_front_url = "https://%s/%s" % (cloud_front, key_name)
response = {
'signedUrl': signed_url,
'cloudFrontURL': cloud_front_url
}
return HttpResponse(json.dumps(response))
| anush0247/django-fine-uploader-s3 | django_fine_uploader_s3/views.py | Python | mit | 6,074 |
from __future__ import division
"""
Contains
========
* Connector
"""
class Connector:
"""
This class is the primary medium for data transfer. Objects of this
class can be connected to any digital object.
Example
=======
>>> from BinPy import *
>>> conn = Connector(1) #Initializing connector with initial state = 1
>>> conn.state
1
>>> gate = OR(0, 1)
>>> conn.tap(gate, 'output') #Tapping the connector
Methods
=======
* tap
* untap
* isInputof
* isOutputof
* trigger
"""
def __init__(self, state=None):
self.connections = {"output": [], "input": []}
# To store the all the taps onto this connection
self.state = state # To store the state of the connection
self.oldstate = None
def tap(self, element, mode):
# Can't serve output for multiple devices
if mode == "output":
self.connections["output"] = []
if element not in self.connections[mode]:
self.connections[mode].append(
element) # Add an element to the connections list
def untap(self, element, mode):
if element in self.connections[mode]:
self.connections[mode].remove(
element) # Delete an element from the connections list
else:
raise Exception(
"ERROR:Connector is not the %s of the passed element" %
mode)
def isInputof(self, element):
return element in self.connections["input"]
def isOutputof(self, element):
return element in self.connections["output"]
# This function is called when the value of the connection changes
def trigger(self):
for i in self.connections["input"]:
i.trigger()
def __call__(self):
return self.state
# Overloads the bool method
# For python3
def __bool__(self):
return True if self.state == 1 else False
# To be compatible with Python 2.x
__nonzero__ = __bool__
# Overloads the int() method
def __int__(self):
return 1 if self.state == 1 else 0
def __float__(self):
return float(self.state)
def __repr__(self):
return str(self.state)
def __str__(self):
return "Connector; State: " + str(self.state)
def __add__(self, other):
return self.state + other.state
def __sub__(self, other):
return self.state - other.state
def __mul__(self, other):
return self.state * other.state
def __truediv__(self, other):
return self.state / other.state
| coder006/BinPy | BinPy/Gates/connector.py | Python | bsd-3-clause | 2,623 |
'''
@date Aug 28, 2010
@author: Matthew A. Todd
This file is part of Test Parser
by Matthew A. Todd
Test Parser is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Test Parser is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Test Parser. If not, see <http://www.gnu.org/licenses/>.
''' | matcatc/Test_Parser | src/TestParser/View/Tkinter/__init__.py | Python | gpl-3.0 | 714 |
#! /usr/bin/env python
from __future__ import division
# System imports
import sys
import unittest
# Import NumPy
import numpy as np
major, minor = [int(d) for d in np.__version__.split(".")[:2]]
if major == 0:
BadListError = TypeError
else:
BadListError = ValueError
import SuperTensor
######################################################################
class SuperTensorTestCase(unittest.TestCase):
def __init__(self, methodName="runTests"):
unittest.TestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNorm(self):
"Test norm function"
print >> sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2))
# Note: cludge to get an answer of the same type as supertensor.
# Answer is simply sqrt(sum(supertensor*supertensor)/16)
answer = np.array([np.sqrt(np.sum(supertensor.astype('d') * supertensor) / 16.)], dtype=self.typeCode)[0]
self.assertAlmostEqual(norm(supertensor), answer, 6)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormBadList(self):
"Test norm function with bad list"
print >> sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = [[[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]], [[[0, "one"], [2, 3]], [[3, "two"], [1, 0]]]]
self.assertRaises(BadListError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongDim(self):
"Test norm function with wrong dimensions"
print >> sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormWrongSize(self):
"Test norm function with wrong size"
print >> sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
supertensor = np.arange(3 * 2 * 2, dtype=self.typeCode).reshape((3, 2, 2))
self.assertRaises(TypeError, norm, supertensor)
# Test (type IN_ARRAY3[ANY][ANY][ANY]) typemap
def testNormNonContainer(self):
"Test norm function with non-container"
print >> sys.stderr, self.typeStr, "... ",
norm = SuperTensor.__dict__[self.typeStr + "Norm"]
self.assertRaises(TypeError, norm, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMax(self):
"Test max function"
print >> sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
supertensor = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]
self.assertEquals(max(supertensor), 8)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
"Test max function with bad list"
print >> sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
supertensor = [[[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]], [[[1, "two"], [3, 4]], [[5, "six"], [7, 8]]]]
self.assertRaises(BadListError, max, supertensor)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxNonContainer(self):
"Test max function with non-container"
print >> sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, None)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxWrongDim(self):
"Test max function with wrong dimensions"
print >> sys.stderr, self.typeStr, "... ",
max = SuperTensor.__dict__[self.typeStr + "Max"]
self.assertRaises(TypeError, max, [0, -1, 2, -3])
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMin(self):
"Test min function"
print >> sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
supertensor = [[[[9, 8], [7, 6]], [[5, 4], [3, 2]]], [[[9, 8], [7, 6]], [[5, 4], [3, 2]]]]
self.assertEquals(min(supertensor), 2)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
"Test min function with bad list"
print >> sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
supertensor = [[[["nine", 8], [7, 6]], [["five", 4], [3, 2]]], [[["nine", 8], [7, 6]], [["five", 4], [3, 2]]]]
self.assertRaises(BadListError, min, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinNonContainer(self):
"Test min function with non-container"
print >> sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, True)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinWrongDim(self):
"Test min function with wrong dimensions"
print >> sys.stderr, self.typeStr, "... ",
min = SuperTensor.__dict__[self.typeStr + "Min"]
self.assertRaises(TypeError, min, [[1, 3], [5, 7]])
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScale(self):
"Test scale function"
print >> sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.arange(3 * 3 * 3 * 3, dtype=self.typeCode).reshape((3, 3, 3, 3))
answer = supertensor.copy() * 4
scale(supertensor, 4)
self.assertEquals((supertensor == answer).all(), True)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongType(self):
"Test scale function with wrong type"
print >> sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[[1, 0, 1], [0, 1, 0], [1, 0, 1]],
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], 'c')
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongDim(self):
"Test scale function with wrong dimensions"
print >> sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[1, 0, 1], [0, 1, 0], [1, 0, 1],
[0, 1, 0], [1, 0, 1], [0, 1, 0]], self.typeCode)
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongSize(self):
"Test scale function with wrong size"
print >> sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
supertensor = np.array([[[1, 0], [0, 1], [1, 0]],
[[0, 1], [1, 0], [0, 1]],
[[1, 0], [0, 1], [1, 0]]], self.typeCode)
self.assertRaises(TypeError, scale, supertensor)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleNonArray(self):
"Test scale function with non-array"
print >> sys.stderr, self.typeStr, "... ",
scale = SuperTensor.__dict__[self.typeStr + "Scale"]
self.assertRaises(TypeError, scale, True)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloor(self):
"Test floor function"
print >> sys.stderr, self.typeStr, "... ",
supertensor = np.arange(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2))
answer = supertensor.copy()
answer[answer < 4] = 4
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
floor(supertensor, 4)
np.testing.assert_array_equal(supertensor, answer)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongType(self):
"Test floor function with wrong type"
print >> sys.stderr, self.typeStr, "... ",
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
supertensor = np.ones(2 * 2 * 2 * 2, dtype='c').reshape((2, 2, 2, 2))
self.assertRaises(TypeError, floor, supertensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorWrongDim(self):
"Test floor function with wrong type"
print >> sys.stderr, self.typeStr, "... ",
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, floor, supertensor)
# Test (type* INPLACE_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testFloorNonArray(self):
"Test floor function with non-array"
print >> sys.stderr, self.typeStr, "... ",
floor = SuperTensor.__dict__[self.typeStr + "Floor"]
self.assertRaises(TypeError, floor, object)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeil(self):
"Test ceil function"
print >> sys.stderr, self.typeStr, "... ",
supertensor = np.arange(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2))
answer = supertensor.copy()
answer[answer > 5] = 5
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
ceil(supertensor, 5)
np.testing.assert_array_equal(supertensor, answer)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongType(self):
"Test ceil function with wrong type"
print >> sys.stderr, self.typeStr, "... ",
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.ones(2 * 2 * 2 * 2, 'c').reshape((2, 2, 2, 2))
self.assertRaises(TypeError, ceil, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilWrongDim(self):
"Test ceil function with wrong dimensions"
print >> sys.stderr, self.typeStr, "... ",
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.arange(2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2))
self.assertRaises(TypeError, ceil, supertensor)
# Test (int DIM1, int DIM2, int DIM3, type* INPLACE_ARRAY3) typemap
def testCeilNonArray(self):
"Test ceil function with non-array"
print >> sys.stderr, self.typeStr, "... ",
ceil = SuperTensor.__dict__[self.typeStr + "Ceil"]
supertensor = np.arange(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2)).tolist()
self.assertRaises(TypeError, ceil, supertensor)
# Test (type ARGOUT_ARRAY3[ANY][ANY][ANY]) typemap
def testLUSplit(self):
"Test luSplit function"
print >> sys.stderr, self.typeStr, "... ",
luSplit = SuperTensor.__dict__[self.typeStr + "LUSplit"]
supertensor = np.ones(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2))
answer_upper = [[[[0, 0], [0, 1]], [[0, 1], [1, 1]]], [[[0, 1], [1, 1]], [[1, 1], [1, 1]]]]
answer_lower = [[[[1, 1], [1, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]]
lower, upper = luSplit(supertensor)
self.assertEquals((lower == answer_lower).all(), True)
self.assertEquals((upper == answer_upper).all(), True)
######################################################################
class scharTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
# self.result = int(self.result)
######################################################################
class ucharTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "uchar"
self.typeCode = "B"
# self.result = int(self.result)
######################################################################
class shortTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "short"
self.typeCode = "h"
# self.result = int(self.result)
######################################################################
class ushortTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "ushort"
self.typeCode = "H"
# self.result = int(self.result)
######################################################################
class intTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "int"
self.typeCode = "i"
# self.result = int(self.result)
######################################################################
class uintTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "uint"
self.typeCode = "I"
# self.result = int(self.result)
######################################################################
class longTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "long"
self.typeCode = "l"
# self.result = int(self.result)
######################################################################
class ulongTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "ulong"
self.typeCode = "L"
# self.result = int(self.result)
######################################################################
class longLongTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
# self.result = int(self.result)
######################################################################
class ulongLongTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "ulongLong"
self.typeCode = "Q"
# self.result = int(self.result)
######################################################################
class floatTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "float"
self.typeCode = "f"
######################################################################
class doubleTestCase(SuperTensorTestCase):
def __init__(self, methodName="runTest"):
SuperTensorTestCase.__init__(self, methodName)
self.typeStr = "double"
self.typeCode = "d"
######################################################################
if __name__ == "__main__":
# Build the test suite
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(scharTestCase))
suite.addTest(unittest.makeSuite(ucharTestCase))
suite.addTest(unittest.makeSuite(shortTestCase))
suite.addTest(unittest.makeSuite(ushortTestCase))
suite.addTest(unittest.makeSuite(intTestCase))
suite.addTest(unittest.makeSuite(uintTestCase))
suite.addTest(unittest.makeSuite(longTestCase))
suite.addTest(unittest.makeSuite(ulongTestCase))
suite.addTest(unittest.makeSuite(longLongTestCase))
suite.addTest(unittest.makeSuite(ulongLongTestCase))
suite.addTest(unittest.makeSuite(floatTestCase))
suite.addTest(unittest.makeSuite(doubleTestCase))
# Execute the test suite
print "Testing 4D Functions of Module SuperTensor"
print "NumPy version", np.__version__
print
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(bool(result.errors + result.failures))
| DailyActie/Surrogate-Model | 01-codes/numpy-master/tools/swig/test/testSuperTensor.py | Python | mit | 16,499 |
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Samples embedded in the Usage Guide (docs/usage.rst)
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.logging.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
"""
import os
import time
from google.cloud.logging import Client
def snippet(func):
"""Mark ``func`` as a snippet example function."""
func._snippet = True
return func
def _millis():
return time.time() * 1000
def do_something_with(item): # pylint: disable=unused-argument
pass
# pylint: enable=reimported,unused-variable,unused-argument
@snippet
def client_list_entries(client, to_delete): # pylint: disable=unused-argument
"""List entries via client."""
# [START client_list_entries_default]
for entry in client.list_entries(): # API call(s)
do_something_with(entry)
# [END client_list_entries_default]
break
# [START client_list_entries_filter]
filter_str = "logName:log_name AND textPayload:simple"
for entry in client.list_entries(filter_=filter_str): # API call(s)
do_something_with(entry)
# [END client_list_entries_filter]
break
# [START client_list_entries_order_by]
from google.cloud.logging import DESCENDING
for entry in client.list_entries(order_by=DESCENDING): # API call(s)
do_something_with(entry)
# [END client_list_entries_order_by]
break
# [START logging_list_gke_audit_logs]
import google.cloud.logging
from datetime import datetime, timedelta, timezone
import os
# pull your project id from an environment variable
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
# construct a date object representing yesterday
yesterday = datetime.now(timezone.utc) - timedelta(days=1)
# Cloud Logging expects a timestamp in RFC3339 UTC "Zulu" format
# https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry
time_format = "%Y-%m-%dT%H:%M:%S.%f%z"
# build a filter that returns GKE Admin Activity audit Logs from
# the past 24 hours
# https://cloud.google.com/kubernetes-engine/docs/how-to/audit-logging
filter_str = (
f'logName="projects/{project_id}/logs/cloudaudit.googleapis.com%2Factivity"'
f' AND resource.type="k8s_cluster"'
f' AND timestamp>="{yesterday.strftime(time_format)}"'
)
# query and print all matching logs
client = google.cloud.logging.Client()
for entry in client.list_entries(filter_=filter_str):
print(entry)
# [END logging_list_gke_audit_logs]
break # we don't really need to print them all
@snippet
def client_setup(client2, to_delete):
"""Client setup."""
# [START usage_client_setup]
import google.cloud.logging
# if project not given, it will be inferred from the environment
client = google.cloud.logging.Client(project="my-project")
# [END usage_client_setup]
to_delete.append(client)
# [START usage_http_client_setup]
http_client = google.cloud.logging.Client(_use_grpc=False)
# [END usage_http_client_setup]
to_delete.append(http_client)
@snippet
def logger_usage(client_true, to_delete):
"""Logger usage."""
import google.cloud.logging
# [START logger_create]
client = google.cloud.logging.Client(project="my-project")
logger = client.logger(name="log_id")
# logger will bind to logName "projects/my_project/logs/log_id"
# [END logger_create]
client = client_true
log_id = "logger_usage_%d" % (_millis())
# [START logger_custom_labels]
custom_labels = {"my-key": "my-value"}
label_logger = client.logger(log_id, labels=custom_labels)
# [END logger_custom_labels]
to_delete.append(label_logger)
# [START logger_custom_resource]
from google.cloud.logging_v2.resource import Resource
resource = Resource(type="global", labels={})
global_logger = client.logger(log_id, resource=resource)
# [END logger_custom_resource]
to_delete.append(global_logger)
logger = client_true.logger(log_id)
to_delete.append(logger)
# [START logger_log_basic]
logger.log("A simple entry") # API call
# [END logger_log_basic]
# [START logger_log_fields]
logger.log(
"an entry with fields set",
severity="ERROR",
insert_id="0123",
labels={"my-label": "my-value"},
) # API call
# [END logger_log_fields]
# [START logger_log_text]
logger.log_text("A simple entry") # API call
# [END logger_log_text]
# [START logger_log_struct]
logger.log_struct(
{"message": "My second entry", "weather": "partly cloudy"}
) # API call
# [END logger_log_struct]
# [START logger_log_resource_text]
from google.cloud.logging import Resource
res = Resource(
type="generic_node",
labels={
"location": "us-central1-a",
"namespace": "default",
"node_id": "10.10.10.1",
},
)
logger.log_struct(
{"message": "My first entry", "weather": "partly cloudy"}, resource=res
)
# [END logger_log_resource_text]
# [START logger_log_batch]
batch = logger.batch()
batch.log("first log")
batch.log("second log")
batch.commit()
# [END logger_log_batch]
# [START logger_log_batch_context]
with logger.batch() as batch:
batch.log("first log")
# do work
batch.log("last log")
# [END logger_log_batch_context]
# [START logger_list_entries]
from google.cloud.logging import DESCENDING
for entry in logger.list_entries(order_by=DESCENDING): # API call(s)
do_something_with(entry)
# [END logger_list_entries]
def _logger_delete():
# [START logger_delete]
logger.delete() # API call
# [END logger_delete]
_backoff_not_found(_logger_delete)
to_delete.remove(logger)
@snippet
def metric_crud(client, to_delete):
"""Metric CRUD."""
metric_name = "robots-%d" % (_millis(),)
description = "Robots all up in your server"
filter = "logName:apache-access AND textPayload:robot"
updated_filter = "textPayload:robot"
updated_description = "Danger, Will Robinson!"
# [START client_list_metrics]
for metric in client.list_metrics(): # API call(s)
do_something_with(metric)
# [END client_list_metrics]
# [START metric_create]
metric = client.metric(metric_name, filter_=filter, description=description)
assert not metric.exists() # API call
metric.create() # API call
assert metric.exists() # API call
# [END metric_create]
to_delete.append(metric)
# [START metric_reload]
existing_metric = client.metric(metric_name)
existing_metric.reload() # API call
# [END metric_reload]
assert existing_metric.filter_ == filter
assert existing_metric.description == description
# [START metric_update]
existing_metric.filter_ = updated_filter
existing_metric.description = updated_description
existing_metric.update() # API call
# [END metric_update]
existing_metric.reload()
assert existing_metric.filter_ == updated_filter
assert existing_metric.description == updated_description
def _metric_delete():
# [START metric_delete]
metric.delete()
# [END metric_delete]
_backoff_not_found(_metric_delete)
to_delete.remove(metric)
def _sink_storage_setup(client):
from google.cloud import storage
bucket_name = "sink-storage-%d" % (_millis(),)
client = storage.Client()
bucket = client.bucket(bucket_name)
bucket.create()
# [START sink_bucket_permissions]
bucket.acl.reload() # API call
logs_group = bucket.acl.group("cloud-logs@google.com")
logs_group.grant_owner()
bucket.acl.add_entity(logs_group)
bucket.acl.save() # API call
# [END sink_bucket_permissions]
return bucket
@snippet
def sink_storage(client, to_delete):
"""Sink log entries to storage."""
bucket = _sink_storage_setup(client)
to_delete.append(bucket)
sink_name = "robots-storage-%d" % (_millis(),)
filter = "textPayload:robot"
# [START sink_storage_create]
destination = "storage.googleapis.com/%s" % (bucket.name,)
sink = client.sink(sink_name, filter_=filter, destination=destination)
assert not sink.exists() # API call
sink.create() # API call
assert sink.exists() # API call
# [END sink_storage_create]
to_delete.insert(0, sink) # delete sink before bucket
def _sink_bigquery_setup(client):
from google.cloud import bigquery
dataset_name = "sink_bigquery_%d" % (_millis(),)
client = bigquery.Client()
dataset = client.create_dataset(dataset_name)
# [START sink_dataset_permissions]
from google.cloud.bigquery.dataset import AccessEntry
entry_list = dataset.access_entries
entry_list.append(AccessEntry("WRITER", "groupByEmail", "cloud-logs@google.com"))
dataset.access_entries = entry_list
client.update_dataset(dataset, ["access_entries"]) # API call
# [END sink_dataset_permissions]
return dataset
@snippet
def sink_bigquery(client, to_delete):
"""Sink log entries to bigquery."""
dataset = _sink_bigquery_setup(client)
sink_name = "robots-bigquery-%d" % (_millis(),)
filter_str = "textPayload:robot"
# [START sink_bigquery_create]
destination = "bigquery.googleapis.com%s" % (dataset.path,)
sink = client.sink(sink_name, filter_=filter_str, destination=destination)
assert not sink.exists() # API call
sink.create() # API call
assert sink.exists() # API call
# [END sink_bigquery_create]
to_delete.insert(0, sink) # delete sink before dataset
def _sink_pubsub_setup(client):
from google.cloud import pubsub
client = pubsub.PublisherClient()
project_id = os.environ["GOOGLE_CLOUD_PROJECT"]
topic_id = "sink-pubsub-%d" % (_millis(),)
# [START sink_topic_permissions]
topic_path = client.topic_path(project_id, topic_id)
topic = client.create_topic(request={"name": topic_path})
policy = client.get_iam_policy(request={"resource": topic_path}) # API call
policy.bindings.add(role="roles/owner", members=["group:cloud-logs@google.com"])
client.set_iam_policy(
request={"resource": topic_path, "policy": policy}
) # API call
# [END sink_topic_permissions]
# create callback wrapper to delete topic when done
class TopicDeleter:
def delete(self):
client.delete_topic(request={"topic": topic_path})
return topic, TopicDeleter()
@snippet
def sink_pubsub(client, to_delete):
"""Sink log entries to pubsub."""
topic, topic_deleter = _sink_pubsub_setup(client)
to_delete.append(topic_deleter)
sink_name = "robots-pubsub-%d" % (_millis(),)
filter_str = "logName:apache-access AND textPayload:robot"
updated_filter = "textPayload:robot"
# [START sink_pubsub_create]
destination = "pubsub.googleapis.com/%s" % (topic.name,)
sink = client.sink(sink_name, filter_=filter_str, destination=destination)
assert not sink.exists() # API call
sink.create() # API call
assert sink.exists() # API call
# [END sink_pubsub_create]
to_delete.append(sink)
created_sink = sink
# [START client_list_sinks]
for sink in client.list_sinks(): # API call(s)
do_something_with(sink)
# [END client_list_sinks]
# [START sink_reload]
existing_sink = client.sink(sink_name)
existing_sink.reload()
# [END sink_reload]
assert existing_sink.filter_ == filter_str
assert existing_sink.destination == destination
# [START sink_update]
existing_sink.filter_ = updated_filter
existing_sink.update()
# [END sink_update]
existing_sink.reload()
assert existing_sink.filter_ == updated_filter
sink = created_sink
# [START sink_delete]
sink.delete()
# [END sink_delete]
@snippet
def logging_handler(client):
# [START create_default_handler]
import logging
handler = client.get_default_handler()
cloud_logger = logging.getLogger("cloudLogger")
cloud_logger.setLevel(logging.INFO)
cloud_logger.addHandler(handler)
cloud_logger.error("bad news")
# [END create_default_handler]
# [START create_cloud_handler]
from google.cloud.logging.handlers import CloudLoggingHandler
from google.cloud.logging_v2.handlers import setup_logging
handler = CloudLoggingHandler(client)
setup_logging(handler)
# [END create_cloud_handler]
# [START create_named_handler]
handler = CloudLoggingHandler(client, name="mycustomlog")
# [END create_named_handler]
@snippet
def logging_json(client):
# [START logging_json_dumps]
import logging
import json
data_dict = {"hello": "world"}
logging.info(json.dumps(data_dict))
# [END logging_json_dumps]
# [START logging_extra_json_fields]
import logging
data_dict = {"hello": "world"}
logging.info("message field", extra={"json_fields": data_dict})
# [END logging_extra_json_fields]
@snippet
def using_extras(client):
import logging
# [START logging_extras]
my_labels = {"foo": "bar"}
my_http = {"requestUrl": "localhost"}
my_trace = "01234"
logging.info(
"hello", extra={"labels": my_labels, "http_request": my_http, "trace": my_trace}
)
# [END logging_extras]
@snippet
def setup_logging(client):
import logging
# [START setup_logging]
client.setup_logging(log_level=logging.INFO)
# [END setup_logging]
# [START setup_logging_excludes]
client.setup_logging(log_level=logging.INFO, excluded_loggers=("werkzeug",))
# [END setup_logging_excludes]
def _line_no(func):
return func.__code__.co_firstlineno
def _find_examples():
funcs = [obj for obj in globals().values() if getattr(obj, "_snippet", False)]
for func in sorted(funcs, key=_line_no):
yield func
def _name_and_doc(func):
return func.__name__, func.__doc__
def _backoff_not_found(deleter):
from google.cloud.exceptions import NotFound
timeouts = [1, 2, 4, 8, 16]
while timeouts:
try:
deleter()
except NotFound:
time.sleep(timeouts.pop(0))
else:
break
def main():
client = Client()
for example in _find_examples():
to_delete = []
print("%-25s: %s" % _name_and_doc(example))
try:
example(client, to_delete)
except AssertionError as failure:
print(" FAIL: %s" % (failure,))
except Exception as error: # pylint: disable=broad-except
print(" ERROR: %r" % (error,))
for item in to_delete:
_backoff_not_found(item.delete)
if __name__ == "__main__":
main()
| googleapis/python-logging | samples/snippets/usage_guide.py | Python | apache-2.0 | 15,618 |
#!"C:\Users\hog\Documents\Visual Studio 2010\Projects\ArdupilotMega\ArdupilotMega\bin\Debug\ipy.exe"
from os.path import join
def configuration(parent_package='', top_path=None):
import warnings
from numpy.distutils.misc_util import Configuration
from numpy.distutils.system_info import get_info, BlasNotFoundError
config = Configuration('odr', parent_package, top_path)
libodr_files = ['d_odr.f',
'd_mprec.f',
'dlunoc.f']
blas_info = get_info('blas_opt')
if blas_info:
libodr_files.append('d_lpk.f')
else:
warnings.warn(BlasNotFoundError.__doc__)
libodr_files.append('d_lpkbls.f')
libodr = [join('odrpack', x) for x in libodr_files]
config.add_library('odrpack', sources=libodr)
sources = ['__odrpack.c']
libraries = ['odrpack'] + blas_info.pop('libraries', [])
include_dirs = ['.'] + blas_info.pop('include_dirs', [])
config.add_extension('__odrpack',
sources=sources,
libraries=libraries,
include_dirs=include_dirs,
depends=['odrpack.h'],
**blas_info
)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| gigglesninja/senior-design | MissionPlanner/Lib/site-packages/scipy/odr/setup.py | Python | gpl-2.0 | 1,297 |
# -*- coding: utf-8 -*-
import XiteWin
if __name__ == "__main__":
XiteWin.main("")
| sdottaka/mruby-bin-scite-mruby | tools/scintilla/test/xite.py | Python | mit | 86 |
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Fujicoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test coinbase transactions return the correct categories.
Tests listtransactions, listsinceblock, and gettransaction.
"""
from test_framework.test_framework import FujicoinTestFramework
from test_framework.util import (
assert_array_result
)
class CoinbaseCategoryTest(FujicoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def assert_category(self, category, address, txid, skip):
assert_array_result(self.nodes[0].listtransactions(skip=skip),
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].listsinceblock()["transactions"],
{"address": address},
{"category": category})
assert_array_result(self.nodes[0].gettransaction(txid)["details"],
{"address": address},
{"category": category})
def run_test(self):
# Generate one block to an address
address = self.nodes[0].getnewaddress()
self.nodes[0].generatetoaddress(1, address)
hash = self.nodes[0].getbestblockhash()
txid = self.nodes[0].getblock(hash)["tx"][0]
# Coinbase transaction is immature after 1 confirmation
self.assert_category("immature", address, txid, 0)
# Mine another 99 blocks on top
self.nodes[0].generate(99)
# Coinbase transaction is still immature after 100 confirmations
self.assert_category("immature", address, txid, 99)
# Mine one more block
self.nodes[0].generate(1)
# Coinbase transaction is now matured, so category is "generate"
self.assert_category("generate", address, txid, 100)
# Orphan block that paid to address
self.nodes[0].invalidateblock(hash)
# Coinbase transaction is now orphaned
self.assert_category("orphan", address, txid, 100)
if __name__ == '__main__':
CoinbaseCategoryTest().main()
| fujicoin/fujicoin | test/functional/wallet_coinbase_category.py | Python | mit | 2,302 |
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
NOTE: Installed SCons is not importable like usual Python packages. It is
executed explicitly with command line scripts. This allows multiple
SCons versions to coexist within single Python installation, which
is critical for enterprise build cases. Explicit invokation is
necessary to avoid confusion over which version of SCons is active.
By default SCons is installed into versioned directory, e.g.
site-packages/scons-2.1.0.alpha.20101125 and much of the stuff
below is dedicated to make it happen on various platforms.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import stat
import sys
Version = "__VERSION__"
man_pages = [
'scons.1',
'sconsign.1',
'scons-time.1',
]
# Exit with error if trying to install with Python >= 3.0
if sys.version_info >= (3,0,0):
msg = "scons: *** SCons does not run under Python version %s.\n\
Python 3 and above are not yet supported.\n"
sys.stderr.write(msg % (sys.version.split()[0]))
sys.exit(1)
# change to setup.py directory if it was executed from other dir
(head, tail) = os.path.split(sys.argv[0])
if head:
os.chdir(head)
sys.argv[0] = tail
# flag if setup.py is run on win32 or _for_ win32 platform,
# (when building windows installer on linux, for example)
is_win32 = 0
if not sys.platform == 'win32':
try:
if sys.argv[1] == 'bdist_wininst':
is_win32 = 1
except IndexError:
pass
else:
is_win32 = 1
import distutils
import distutils.core
import distutils.command.install
import distutils.command.install_data
import distutils.command.install_lib
import distutils.command.install_scripts
import distutils.command.build_scripts
_install = distutils.command.install.install
_install_data = distutils.command.install_data.install_data
_install_lib = distutils.command.install_lib.install_lib
_install_scripts = distutils.command.install_scripts.install_scripts
_build_scripts = distutils.command.build_scripts.build_scripts
class _options(object):
pass
Options = _options()
Installed = []
def set_explicitly(name, args):
"""
Return if the installation directory was set explicitly by the
user on the command line. This is complicated by the fact that
"install --install-lib=/foo" gets turned into "install_lib
--install-dir=/foo" internally.
"""
if args[0] == "install_" + name:
s = "--install-dir="
else:
# The command is something else (usually "install")
s = "--install-%s=" % name
set = 0
length = len(s)
for a in args[1:]:
if a[:length] == s:
set = 1
break
return set
class install(_install):
user_options = _install.user_options + [
('no-scons-script', None,
"don't install 'scons', only install 'scons-%s'" % Version),
('no-version-script', None,
"don't install 'scons-%s', only install 'scons'" % Version),
('install-bat', None,
"install 'scons.bat' script"),
('no-install-bat', None,
"do not install 'scons.bat' script"),
('install-man', None,
"install SCons man pages"),
('no-install-man', None,
"do not install SCons man pages"),
('standard-lib', None,
"install SCons library in standard Python location"),
('standalone-lib', None,
"install SCons library in separate standalone directory"),
('version-lib', None,
"install SCons library in version-numbered directory"),
]
boolean_options = _install.boolean_options + [
'no-scons-script',
'no-version-script',
'install-bat',
'no-install-bat',
'install-man',
'no-install-man',
'standard-lib',
'standalone-lib',
'version-lib'
]
if hasattr(os, 'link'):
user_options.append(
('hardlink-scons', None,
"hard link 'scons' to the version-numbered script, don't make a separate 'scons' copy"),
)
boolean_options.append('hardlink-script')
if hasattr(os, 'symlink'):
user_options.append(
('symlink-scons', None,
"make 'scons' a symbolic link to the version-numbered script, don't make a separate 'scons' copy"),
)
boolean_options.append('symlink-script')
def initialize_options(self):
_install.initialize_options(self)
self.no_scons_script = 0
self.no_version_script = 0
self.install_bat = 0
self.no_install_bat = not is_win32
self.install_man = 0
self.no_install_man = is_win32
self.standard_lib = 0
self.standalone_lib = 0
self.version_lib = 0
self.hardlink_scons = 0
self.symlink_scons = 0
# Don't warn about having to put the library directory in the
# search path.
self.warn_dir = 0
def finalize_options(self):
_install.finalize_options(self)
if self.install_bat:
Options.install_bat = 1
else:
Options.install_bat = not self.no_install_bat
if self.install_man:
Options.install_man = 1
else:
Options.install_man = not self.no_install_man
Options.standard_lib = self.standard_lib
Options.standalone_lib = self.standalone_lib
Options.version_lib = self.version_lib
Options.install_scons_script = not self.no_scons_script
Options.install_version_script = not self.no_version_script
Options.hardlink_scons = self.hardlink_scons
Options.symlink_scons = self.symlink_scons
def get_scons_prefix(libdir, is_win32):
"""
Return the right prefix for SCons library installation. Find
this by starting with the library installation directory
(.../site-packages, most likely) and crawling back up until we reach
a directory name beginning with "python" (or "Python").
"""
drive, head = os.path.splitdrive(libdir)
while head:
if head == os.sep:
break
head, tail = os.path.split(head)
if tail.lower()[:6] == "python":
# Found the Python library directory...
if is_win32:
# ...on Win32 systems, "scons" goes in the directory:
# C:\PythonXX => C:\PythonXX\scons
return os.path.join(drive + head, tail)
else:
# ...on other systems, "scons" goes above the directory:
# /usr/lib/pythonX.X => /usr/lib/scons
return os.path.join(drive + head)
return libdir
def force_to_usr_local(self):
"""
A hack to decide if we need to "force" the installation directories
to be under /usr/local. This is because Mac Os X Tiger and
Leopard, by default, put the libraries and scripts in their own
directories under /Library or /System/Library.
"""
return (sys.platform[:6] == 'darwin' and
(self.install_dir[:9] == '/Library/' or
self.install_dir[:16] == '/System/Library/'))
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
if force_to_usr_local(self):
self.install_dir = '/usr/local/lib'
args = self.distribution.script_args
if not set_explicitly("lib", args):
# They didn't explicitly specify the installation
# directory for libraries...
is_win32 = sys.platform == "win32" or args[0] == 'bdist_wininst'
prefix = get_scons_prefix(self.install_dir, is_win32)
if Options.standalone_lib:
# ...but they asked for a standalone directory.
self.install_dir = os.path.join(prefix, "scons")
elif Options.version_lib or not Options.standard_lib:
# ...they asked for a version-specific directory,
# or they get it by default.
self.install_dir = os.path.join(prefix, "scons-%s" % Version)
msg = "Installed SCons library modules into %s" % self.install_dir
Installed.append(msg)
class install_scripts(_install_scripts):
def finalize_options(self):
_install_scripts.finalize_options(self)
if force_to_usr_local(self):
self.install_dir = '/usr/local/bin'
self.build_dir = os.path.join('build', 'scripts')
msg = "Installed SCons scripts into %s" % self.install_dir
Installed.append(msg)
def do_nothing(self, *args, **kw):
pass
def hardlink_scons(self, src, dst, ver):
try: os.unlink(dst)
except OSError: pass
os.link(ver, dst)
def symlink_scons(self, src, dst, ver):
try: os.unlink(dst)
except OSError: pass
os.symlink(os.path.split(ver)[1], dst)
def copy_scons(self, src, dst, *args):
try: os.unlink(dst)
except OSError: pass
self.copy_file(src, dst)
self.outfiles.append(dst)
def run(self):
# --- distutils copy/paste ---
if not self.skip_build:
self.run_command('build_scripts')
# --- /distutils copy/paste ---
# Custom SCons installation stuff.
if Options.hardlink_scons:
create_basename_script = self.hardlink_scons
elif Options.symlink_scons:
create_basename_script = self.symlink_scons
elif Options.install_scons_script:
create_basename_script = self.copy_scons
else:
create_basename_script = self.do_nothing
if Options.install_version_script:
create_version_script = self.copy_scons
else:
create_version_script = self.do_nothing
inputs = self.get_inputs()
bat_scripts = [x for x in inputs if x[-4:] == '.bat']
non_bat_scripts = [x for x in inputs if x[-4:] != '.bat']
self.outfiles = []
self.mkpath(self.install_dir)
for src in non_bat_scripts:
base = os.path.basename(src)
scons = os.path.join(self.install_dir, base)
scons_ver = scons + '-' + Version
if is_win32:
scons += '.py'
scons_ver += '.py'
create_version_script(src, scons_ver)
create_basename_script(src, scons, scons_ver)
if Options.install_bat:
if is_win32:
bat_install_dir = get_scons_prefix(self.install_dir, is_win32)
else:
bat_install_dir = self.install_dir
for src in bat_scripts:
scons_bat = os.path.join(bat_install_dir, 'scons.bat')
scons_version_bat = os.path.join(bat_install_dir,
'scons-' + Version + '.bat')
self.copy_scons(src, scons_bat)
self.copy_scons(src, scons_version_bat)
# --- distutils copy/paste ---
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the scripts we just installed.
for file in self.get_outputs():
if self.dry_run:
# log.info("changing mode of %s", file)
pass
else:
# Use symbolic versions of permissions so this script doesn't fail to parse under python3.x
exec_and_read_permission = stat.S_IXOTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IROTH | stat.S_IRUSR | stat.S_IRGRP
mode_mask = 4095 # Octal 07777 used because python3 has different octal syntax than python 2
mode = ((os.stat(file)[stat.ST_MODE]) | exec_and_read_permission) & mode_mask
# log.info("changing mode of %s to %o", file, mode)
os.chmod(file, mode)
# --- /distutils copy/paste ---
class build_scripts(_build_scripts):
def finalize_options(self):
_build_scripts.finalize_options(self)
self.build_dir = os.path.join('build', 'scripts')
class install_data(_install_data):
def initialize_options(self):
_install_data.initialize_options(self)
def finalize_options(self):
_install_data.finalize_options(self)
if force_to_usr_local(self):
self.install_dir = '/usr/local'
if Options.install_man:
if is_win32:
dir = 'Doc'
else:
dir = os.path.join('man', 'man1')
self.data_files = [(dir, man_pages)]
man_dir = os.path.join(self.install_dir, dir)
msg = "Installed SCons man pages into %s" % man_dir
Installed.append(msg)
else:
self.data_files = []
description = "Open Source next-generation build tool."
long_description = """Open Source next-generation build tool.
Improved, cross-platform substitute for the classic Make
utility. In short, SCons is an easier, more reliable
and faster way to build software."""
scripts = [
'script/scons',
'script/sconsign',
'script/scons-time',
# We include scons.bat in the list of scripts, even on UNIX systems,
# because we provide an option to allow it be installed explicitly,
# for example if you're installing from UNIX on a share that's
# accessible to Windows and you want the scons.bat.
'script/scons.bat',
]
arguments = {
'name' : "scons",
'version' : Version,
'description' : description,
'long_description' : long_description,
'author' : 'Steven Knight',
'author_email' : 'knight@baldmt.com',
'url' : "http://www.scons.org/",
'packages' : ["SCons",
"SCons.compat",
"SCons.Node",
"SCons.Options",
"SCons.Platform",
"SCons.Scanner",
"SCons.Script",
"SCons.Tool",
"SCons.Tool.MSCommon",
"SCons.Tool.packaging",
"SCons.Variables",
],
'package_dir' : {'' : 'engine'},
'data_files' : [('man/man1', man_pages)],
'scripts' : scripts,
'cmdclass' : {'install' : install,
'install_lib' : install_lib,
'install_data' : install_data,
'install_scripts' : install_scripts,
'build_scripts' : build_scripts}
}
distutils.core.setup(**arguments)
if Installed:
for i in Installed:
print(i)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| Distrotech/scons | src/setup.py | Python | mit | 16,346 |
# Copyright 2012, SIL International
# All rights reserved.
#
# This library is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should also have received a copy of the GNU Lesser General Public
# License along with this library in the file named "LICENSE".
# If not, write to the Free Software Foundation, 51 Franklin Street,
# suite 500, Boston, MA 02110-1335, USA or visit their web page on the
# internet at http://www.fsf.org/licenses/lgpl.html.
import os, subprocess, re, sys
from tempfile import mktemp
from shutil import copyfile
from qtpy import QtCore, QtGui, QtWidgets
from xml.etree import cElementTree as XmlTree
mainapp = None
pendingErrors = []
class DataObj(object) :
def attribModel(self) :
return None
class ModelSuper(object) :
pass
def configval(config, section, option) :
if config.has_option(section, option) :
return config.get(section, option)
else :
return None
def configvalString(config, section, option) :
if config.has_option(section, option) :
return config.get(section, option)
else :
return ''
def configintval(config, section, option) :
if config.has_option(section, option) :
txt = config.get(section, option)
if not txt : return 0
if txt.isdigit() :
return int(txt)
elif txt.lower() == 'true' :
return 1
else :
return 0
else :
return 0
def copyobj(src, dest) :
for x in dir(src) :
y = getattr(src, x)
if not callable(y) and not x.startswith('__') :
setattr(dest, x, y)
grcompiler = None
def findgrcompiler() :
global grcompiler
if sys.platform == 'win32' :
try :
from winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE
node = "Microsoft\\Windows\\CurrentVersion\\Uninstall\\Graphite Compiler_is1"
if sys.maxsize > 1 << 32 :
r = OpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\\Wow6432Node\\" + node)
else:
r = OpenKey(HKEY_LOCAL_MACHINE, "SOFTWARE\\" + node)
p = QueryValue(r, "InstallLocation")
grcompiler = os.path.join(p, "GrCompiler.exe")
except WindowsError :
exe = os.path.join(os.path.dirname(__file__), 'grcompiler', 'GrCompiler.exe')
if os.path.exists(exe) :
grcompiler = exe
else :
for p in os.environ['PATH'].split(';') :
a = os.path.join(p, 'grcompiler.exe')
if os.path.exists(a) :
grcompiler = a
break
else :
exe = os.path.join(os.path.dirname(__file__), 'grcompiler', 'grcompiler')
if os.path.exists(exe) :
grcompiler = exe
else :
for p in os.environ['PATH'].split(':') :
a = os.path.join(p, "grcompiler")
if os.path.exists(a) :
grcompiler = a
break
if grcompiler is None:
print("...not found")
else:
print("...found in " + grcompiler)
# Return 0 if successful.
def buildGraphite(config, app, font, fontFileName, lowLevelErrFile = None, gdlErrFileName = None) :
global grcompiler
#print("buildGraphite")
# Prevent the error-reporting mechanism from interpreting this file as legitimate output
# in case the entire call fails. (This assumes that the full path to the file is provided,
# which the caller currently does.)
try:
os.remove(gdlErrFileName)
except:
pass
if configintval(config, 'build', 'usemakegdl') :
gdlFileName = configval(config, 'build', 'makegdlfile') # auto-generated GDL
if config.has_option('main', 'ap') and not configval(config, 'build', 'apronly'): # AP XML file
# Generate the AP GDL file.
apFilename = config.get('main', 'ap')
font.saveAP(apFilename, gdlFileName)
if app : app.updateFileEdit(apFilename)
cmd = configval(config, 'build', 'makegdlcmd')
if cmd and cmd.strip() :
# Call the make command to perform makegdl.
makecmd = expandMakeCmd(config, cmd)
print(makecmd)
subprocess.call(makecmd, shell = True)
else :
# Use the default makegdl process.
font.createClasses()
font.calculatePointClasses()
font.ligClasses()
attPassNum = int(config.get('build', 'attpass'))
f = open(gdlFileName, "w")
font.outGDL(f)
if attPassNum > 0 : font.outPosRules(f, attPassNum)
if configval(config, 'build', 'gdlfile') :
f.write('\n\n#include "%s"\n' % (os.path.abspath(config.get('build', 'gdlfile'))))
f.close()
if app : app.updateFileEdit(gdlFileName)
else :
gdlFileName = configval(config, 'build', 'gdlfile')
if not gdlFileName or not os.path.exists(gdlFileName) :
f = open('gdlerr.txt' ,'w')
if not gdlFileName :
f.write("No GDL File specified. Build failed")
else :
f.write("No such GDL file: \"%s\". Build failed" % gdlFileName)
f.close()
return True
cwd = os.getcwd()
sourcePath = os.path.dirname(os.path.abspath(gdlFileName))
pathToCwd = pathFromTo(sourcePath, cwd) # prepend this to existing paths
#tweakWarning = generateTweakerGDL(config, app)
#if tweakWarning != "" :
# app.tab_errors.addWarning(tweakWarning)
# app.tab_errors.setBringToFront(True)
tempFontFileIn = mktemp()
if config.has_option('build', 'usettftable') : # unimplemented
subprocess.call(("ttftable", "-delete", "graphite", fontFileName , tempFontFileIn))
else :
copyfile(fontFileName, tempFontFileIn)
parms = {}
if lowLevelErrFile :
parms['stderr'] = subprocess.STDOUT
parms['stdout'] = lowLevelErrFile
if config.has_option('build', 'grcexecutable') and configval(config, 'build', 'grcexecutable') != "":
# Call the compiler they specified:
grcExec = configval(config, 'build', 'grcexecutable')
else:
grcExec = grcompiler
if config.has_option('build', 'ignorewarnings') :
warningList = configval(config, 'build', 'ignorewarnings')
warningList = warningList.replace(' ', '')
if warningList == 'none' :
warningList = ['-wall']
elif warningList == '' :
warningList = ['-w510', '-w3521'] # warnings to ignore by default
else :
warningList = warningList.replace(',', ' -w')
warningList = "-w" + warningList
warningList = warningList.split(' ')
else:
warningList = ['-w510', '-w3521'] # warnings to ignore by default
res = 1
if grcExec is not None:
try:
# Change the current working directory to the one where the GDL file is located.
# This is done to account for a bug in the compiler that interprets #include statements
# as relative to the CWD rather than the main source file (happens only on Windows).
# -- NO LONGER NEEDED; the compiler has been fixed.
#gdlFileBase = os.path.basename(gdlFileName)
#tempFontFileIn = os.path.abspath(tempFontFileIn)
#fontFileName_src = pathToCwd + os.path.relpath(fontFileName)
#os.chdir(sourcePath)
print("Compiling...")
argList = [grcExec]
argList.extend(warningList)
if gdlErrFileName is not None and gdlErrFileName != "":
argList.extend(["-e", gdlErrFileName])
argList.extend(["-D", "-q", gdlFileName, tempFontFileIn, fontFileName])
res = subprocess.call(argList, **parms)
except:
print("error in running compiler")
else:
print("grcompiler is missing")
#os.chdir(cwd) # return to where we were - NOT NEEDED
#print("compilation result =", res)
if res:
# failure in compilation - restore the previous version of the font
copyfile(tempFontFileIn, fontFileName)
os.remove(tempFontFileIn)
return res
replacements = {
'a' : ['main', 'ap'],
'f' : ['main', 'font'],
'g' : ['build', 'makegdlfile'],
'i' : ['build', 'gdlfile'],
'p' : ['build', 'attpass']
}
def expandMakeCmd(config, txt) :
###return re.sub(r'%([afgip])', lambda m: os.path.abspath(configval(config, *replacements[m.group(1)])), txt)
for key, val in replacements.items() :
cval = configval(config, val[0], val[1])
if key == 'p' :
# Not a filename
txt = txt.replace('%p', cval)
elif key == 'i' :
# Don't convert main GDL include file to absolute path - this can create problems on Windows.
# (It seems to confuse the gdlpp module that handles the #include statements.)
# Just use whatever they put in the field.
txt = txt.replace('%i', cval)
elif cval == None :
txt = txt.replace('%'+key, "[missing filename]")
else :
txt = txt.replace('%'+key, os.path.abspath(cval))
return txt
def reportError(text) :
global mainapp, pendingErrors
if not mainapp :
pendingErrors += [text]
else :
mainapp.tab_errors.addItem(text)
def registerErrorLog(app) :
global mainapp, pendingErrors
mainapp = app
for e in pendingErrors :
mainapp.tab_errors.addItem(e)
pendingErrors = []
def ETcanon(et, curr = 0, indent = 2) :
n = len(et)
if n :
et.text = "\n" + (' ' * (curr + indent))
for i in range(n) :
ETcanon(et[i], curr + indent, indent)
et[i].tail = "\n" + (' ' * (curr + indent if i < n - 1 else curr))
return et
def ETinsert(elem, child) :
for (i, e) in enumerate(elem) :
if e.tag > child.tag :
elem.insert(i, child)
return
elem.append(child)
# Return the file p with a path relative to the given base.
def relpath(p, base) :
d = os.path.dirname(base) or '.'
return os.path.relpath(p, d)
def as_entities(text) :
if text :
return re.sub(u'([^\u0000-\u007f])', lambda x: "\\u%04X" % ord(x.group(1)), text)
else :
return ""
def generateTweakerGDL(config, app) :
if not config.has_option('build', 'tweakxmlfile') :
return ""
tweakxmlfile = config.get('build', 'tweakxmlfile')
if not config.has_option('build', 'tweakgdlfile') or config.get('build', 'tweakgdlfile') == "":
return "Warning: no GDL tweak file specified; tweaks ignored."
tweakgdlfile = config.get('build', 'tweakgdlfile')
if config.has_option('build', 'tweakconstraint') :
tweakConstraint = config.get('build', 'tweakconstraint')
else:
tweakConstraint = ""
gdlfile = config.get('build', 'gdlfile')
fontname = config.get('main', 'font')
tweakData = app.tab_tweak.parseFile(tweakxmlfile)
passindex = configval(config, 'build', 'tweakpass')
f = open(tweakgdlfile, 'w')
f.write("/*\n Tweaker GDL file for font " + fontname + " to include in " + gdlfile + "\n*/\n\n")
if passindex :
f.write("table(positioning)\n\n")
if tweakConstraint != "" :
# Output pass constraint
if tweakConstraint[0:2] != "if" :
f.write("if " + "( " + tweakConstraint + " )")
else :
f.write(tweakConstraint)
f.write("\n\n")
f.write("pass(" + passindex + ")\n\n")
for (groupLabel, tweaks) in tweakData.items() :
f.write("\n//--- " + groupLabel + " ---\n\n")
for tweak in tweaks :
f.write("// " + tweak.name + "\n")
# Don't output the feature tests for now. If we reinstate this code, we need to get the
# the GDL feature name out of the GDX file.
# if (tweak.feats and tweak.feats != "") or (tweak.lang and tweak.lang != "") :
# f.write("if (")
# andText = ""
# if (tweak.lang and tweak.lang != "") :
# f.write("lang"," == ", '"',tweak.lang,'"')
# andText = " && "
# for (fid, value) in tweak.feats.items() :
# f.write(andText + fid + " == " + str(value))
# andText = " && "
# f.write(")\n")
i = 0
for twglyph in tweak.glyphs :
if twglyph.status != "ignore" :
if i > 0 :
if len(tweak.glyphs) > 2 :
f.write("\n ")
else :
f.write(" ")
if twglyph.gclass and twglyph.gclass != "" :
f.write(twglyph.gclass)
else:
f.write(twglyph.name)
if twglyph.status == "optional" :
f.write("?")
shiftx = twglyph.shiftx + twglyph.shiftx_pending
shifty = twglyph.shifty + twglyph.shifty_pending
if shiftx != 0 or shifty != 0 :
f.write(" { ")
if shiftx != 0 : f.write("shift.x = " + str(shiftx) + "m; ")
if shifty != 0 : f.write("shift.y = " + str(shifty) + "m; ")
f.write("}")
i += 1
if i > 0 : f.write(" ;")
# if tweak.feats and tweak.feats != "" :
# f.write("\nendif;")
f.write("\n\n")
if passindex :
f.write("\nendpass; // " + passindex)
if tweakConstraint != "" :
f.write("\n\nendif; // pass constraint")
f.write("\n\nendtable; // positioning\n\n")
f.close()
if app : app.updateFileEdit(tweakgdlfile)
print("Tweak GDL generated - accepting pending tweaks.")
# Accept all pending shifts, since they are now part of the Graphite rules.
app.tab_tweak.acceptPending(tweakxmlfile)
return "" # success
def popUpError(msg) :
dlg = QtWidgets.QMessageBox()
dlg.setText(msg)
dlg.setWindowTitle("Graide")
dlg.exec_()
def pathFromTo(path1, path2):
path1abs = splitWholePath(os.path.abspath(path1))
path2abs = splitWholePath(os.path.abspath(path2))
#print(path1abs)
#print(path2abs)
commonI = 0
while commonI < len(path1abs) and commonI < len(path2abs) and path1abs[commonI] == path2abs[commonI]:
commonI = commonI + 1
result = ""
for i2 in range(commonI, len(path1abs)):
result += "../"
for i2 in range(commonI, len(path2abs)):
result += path2abs[i2] + "/"
return result
def splitWholePath(path):
result = []
while True:
(head, tail) = os.path.split(path)
if tail is None or tail == "":
result.append(head)
break
else:
result.append(tail)
path = head
result.reverse()
return result
| silnrsi/graide | lib/graide/utils.py | Python | lgpl-2.1 | 15,696 |
#!/usr/bin/env python
from Globber import match
import unittest
class TestGlobber(unittest.TestCase):
def test_star(self):
self.assertTrue(match('*', ''))
self.assertTrue(match('*2*', '123'))
self.assertTrue(match('*3', '123'))
self.assertTrue(match('*', 'IOUJHG234*#$^%'))
self.assertFalse(match('*2', '123'))
self.assertFalse(match('*b', 'IOUJHG234*#$^%'))
def test_question_mark(self):
self.assertTrue(match('?', 'a'))
self.assertTrue(match('?b', 'ab'))
self.assertFalse(match('?', 'ab'))
self.assertFalse(match('g?', 'ab'))
def test_set(self):
self.assertTrue(match('[ch]', 'c'))
self.assertTrue(match('[ch]', 'h'))
self.assertFalse(match('[ch]', 'g'))
self.assertFalse(match('[^ch]', 'c'))
def test_examples(self):
self.assertTrue(match('bo*awesome', 'bo is awesome'))
self.assertTrue(match('*.java', 'server.java'))
self.assertTrue(match('*.java', 'server2.java'))
self.assertTrue(match('*.[ch]', 'abc.c'))
self.assertTrue(match('*.[ch]', 'abc.h'))
self.assertTrue(match('*.?', 'abcd.d'))
self.assertFalse(match('*.[ch]', 'abc.g'))
if __name__ == '__main__':
unittest.main()
| blopker/Globber | GlobberTest.py | Python | mit | 1,281 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
import logging
import argparse
import tgcli
logging.basicConfig(stream=sys.stdout, format='%(asctime)s [%(levelname)s] %(message)s', level=logging.INFO)
def export_avatar_peer(tc, peertype, pid, filename):
peername = '%s#id%d' % (peertype, pid)
if os.path.isfile(filename):
logging.info('Avatar exists: ' + peername)
return
res = getattr(tc, 'cmd_load_%s_photo' % peertype)(peername)
if 'result' in res and res['result'] != 'FAIL':
os.rename(res['result'], filename)
logging.info('Exported avatar for %s' % peername)
else:
logging.warning('Failed to export avatar for %s: %s' % (peername, res))
def export_avatar_group(tc, grouptype, pid, path):
peername = '%s#id%d' % (grouptype, pid)
members = {}
logging.info('Fetching info for %s' % peername)
if grouptype == 'channel':
items = tc.cmd_channel_get_members(peername, 100)
for item in items:
members[item['peer_id']] = item
dcount = 100
while items:
items = tc.cmd_channel_get_members(peername, 100, dcount)
for item in items:
members[item['peer_id']] = item
dcount += 100
else:
obj = tc.cmd_chat_info(peername)
for item in obj['members']:
members[item['peer_id']] = item
for key in members:
export_avatar_peer(tc, 'user', key, os.path.join(path, '%d.jpg' % key))
def main(argv):
parser = argparse.ArgumentParser(description="Export Telegram messages.")
parser.add_argument("-o", "--output", help="output path", default="export")
parser.add_argument("-g", "--group", help="export every user's avatar in a group or channel", action='store_true')
parser.add_argument("-t", "--type", help="peer type, can be 'user', 'chat', 'channel'", default="user")
parser.add_argument("-i", "--id", help="peer id", type=int)
parser.add_argument("-e", "--tgbin", help="Telegram-cli binary path", default="bin/telegram-cli")
args = parser.parse_args(argv)
with tgcli.TelegramCliInterface(args.tgbin, run=False) as tc:
tc.cmd_dialog_list()
if not os.path.isdir(args.output):
os.mkdir(args.output)
if args.group:
export_avatar_group(tc, args.type, args.id, args.output)
else:
export_avatar_peer(tc, args.type, args.id, os.path.join(args.output, '%s%d.jpg' % (args.type, args.id)))
if __name__ == '__main__':
main(sys.argv[1:])
| gumblex/tg-export | avatar.py | Python | lgpl-3.0 | 2,554 |
from __future__ import unicode_literals
import cgi
import codecs
import logging
import re
import sys
from io import BytesIO
from threading import Lock
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.urls import set_script_prefix
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
_slashes_re = re.compile(br'/+')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
self.content_type, self.content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in self.content_params:
try:
codecs.lookup(self.content_params['charset'])
except LookupError:
pass
else:
self.encoding = self.content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
if getattr(response, 'file_to_stream', None) is not None and environ.get('wsgi.file_wrapper'):
response = environ['wsgi.file_wrapper'](response.file_to_stream)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
if b'//' in script_url:
# mod_wsgi squashes multiple successive slashes in PATH_INFO,
# do the same with script_url before manipulating paths (#17133).
script_url = _slashes_re.sub(b'/', script_url)
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)] if path_info else script_url
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
| vincepandolfo/django | django/core/handlers/wsgi.py | Python | bsd-3-clause | 9,300 |
#!/usr/bin/env python3
# coding: utf-8
# iOS specific imports
import keychain
import stringutils as su
import jumon
keychain_service = 'jumon'
keychain_account = 'secret'
# Clears saved secret
def clearSecret():
keychain.delete_password(keychain_service, keychain_account)
print('Done.')
def updateSecret(secret):
keychain.set_password(keychain_service, keychain_account, secret)
print('Done.')
def getSecret():
return keychain.get_password(keychain_service, keychain_account)
def newJumon(salt, fmt_string=jumon.default_fmt_string):
secret = getSecret()
return jumon.Jumon(salt, secret, fmt_string)
| daniel-jozsef/Pythonista | jumon_iOS.py | Python | gpl-3.0 | 638 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import azure.cli.command_modules.extension._help # pylint: disable=unused-import
def load_params(_):
import azure.cli.command_modules.extension._params # pylint: disable=redefined-outer-name, unused-variable
def load_commands():
import azure.cli.command_modules.extension.commands # pylint: disable=redefined-outer-name, unused-variable
| QingChenmsft/azure-cli | src/command_modules/azure-cli-extension/azure/cli/command_modules/extension/__init__.py | Python | mit | 698 |
# -*- coding: utf-8 -*-
""" RESTful Search Methods
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{gluon}} <http://web2py.com>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import re
import string
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.serializers import json as jsons
from gluon.storage import Storage
from gluon.html import BUTTON
from s3crud import S3CRUD
from s3navigation import s3_search_tabs
from s3utils import S3DateTime, s3_get_foreign_key, s3_unicode
from s3validators import *
from s3widgets import S3OrganisationHierarchyWidget, s3_grouped_checkboxes_widget
from s3export import S3Exporter
from s3resource import S3FieldSelector, S3Resource
__all__ = ["S3SearchWidget",
"S3SearchSimpleWidget",
"S3SearchMinMaxWidget",
"S3SearchOptionsWidget",
"S3SearchLocationWidget",
"S3SearchSkillsWidget",
"S3SearchOrgHierarchyWidget",
"S3Search",
"S3LocationSearch",
"S3OrganisationSearch",
"S3PersonSearch",
"S3HRSearch",
"S3PentitySearch",
]
MAX_RESULTS = 1000
MAX_SEARCH_RESULTS = 200
# =============================================================================
class S3SearchWidget(object):
"""
Search Widget for interactive search (base class)
"""
def __init__(self, field=None, name=None, **attr):
"""
Configures the search option
@param field: name(s) of the fields to search in
@param name: ?
@keyword label: a label for the search widget
@keyword comment: a comment for the search widget
"""
self.other = None
self.field = field
if not self.field:
raise SyntaxError("No search field specified.")
self.attr = Storage(attr)
if name is not None:
self.attr["_name"] = name
self.master_query = None
self.search_field = None
# -------------------------------------------------------------------------
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
self.attr = Storage(attr)
raise NotImplementedError
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def build_master_query(self, resource):
"""
Get the master query for the specified field(s)
"""
db = current.db
table = resource.table
components = resource.components
accessible_query = resource.accessible_query
master_query = Storage()
search_field = Storage()
fields = self.field
if fields and not isinstance(fields, (list, tuple)):
fields = [fields]
# Find the tables, joins and fields to search in
for f in fields:
ktable = None
rtable = None
component = None
reference = None
multiple = False
if f.find(".") != -1: # Component
cname, f = f.split(".", 1)
if cname not in components:
continue
else:
component = components[cname]
ktable = component.table
ktablename = component.tablename
pkey = component.pkey
fkey = component.fkey
# Do not add queries for empty tables
if not db(ktable.id > 0).select(ktable.id,
limitby=(0, 1)).first():
continue
else: # this resource
ktable = table
ktablename = table._tablename
if f.find("$") != -1: # Referenced object
rkey, f = f.split("$", 1)
if not rkey in ktable.fields:
continue
rtable = ktable
rtablename = ktablename
ktablename, key, multiple = s3_get_foreign_key(ktable[rkey])
if not ktablename:
continue
else:
ktable = db[ktablename]
# Do not add queries for empty tables
if not db(ktable.id > 0).select(ktable.id,
limitby=(0, 1)).first():
continue
# Master queries
# @todo: update this for new QueryBuilder (S3ResourceFilter)
if ktable and ktablename not in master_query:
query = (accessible_query("read", ktable))
if "deleted" in ktable.fields:
query = (query & (ktable.deleted == "False"))
join = None
if reference:
if ktablename != rtablename:
q = (accessible_query("read", rtable))
if "deleted" in rtable.fields:
q = (q & (rtable.deleted == "False"))
else:
q = None
if multiple:
j = (rtable[rkey].contains(ktable.id))
else:
j = (rtable[rkey] == ktable.id)
if q is not None:
join = q & j
else:
join = j
j = None
if component:
if reference:
q = (accessible_query("read", table))
if "deleted" in table.fields:
q = (q & (table.deleted == "False"))
j = (q & (table[pkey] == rtable[fkey]))
else:
j = (table[pkey] == ktable[fkey])
if j is not None and join is not None:
join = (join & j)
elif j:
join = j
if join is not None:
query = (query & join)
master_query[ktable._tablename] = query
# Search fields
if ktable and f in ktable.fields:
if ktable._tablename not in search_field:
search_field[ktablename] = [ktable[f]]
else:
search_field[ktablename].append(ktable[f])
self.master_query = master_query
self.search_field = search_field
# =============================================================================
class S3SearchSimpleWidget(S3SearchWidget):
"""
Simple full-text search widget
"""
def widget(self,
resource,
vars=None,
name=None,
value=None,
autocomplete=None):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
attr = self.attr
# SearchAutocomplete must set name depending on the field
if name:
attr.update(_name=name)
if "_size" not in attr:
attr.update(_size="40")
if "_name" not in attr:
attr.update(_name="%s_search_simple" % resource.name)
if "_id" not in attr:
attr.update(_id="%s_search_simple" % resource.name)
if autocomplete:
attr.update(_autocomplete=autocomplete)
attr.update(_type="text")
self.name = attr._name
# Search Autocomplete - Display current value
attr["_value"] = vars.get(self.name, value) if vars else value
return INPUT(**attr)
# -------------------------------------------------------------------------
def query(self, resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
# Build the query
if value and isinstance(value, str):
values = value.split()
final_query = None
# Create a set of queries for each value
for value in values:
field_queries = None
# Create a set of queries that test the current
# value against each field
for field in self.field:
s = S3FieldSelector(field).lower()
field_query = s.like("%%%s%%" % value.lower())
# We want a match against any field
if field_queries:
field_queries = field_query | field_queries
else:
field_queries = field_query
# We want all values to be matched
if final_query:
final_query = field_queries & final_query
else:
final_query = field_queries
return final_query
else:
return None
# =============================================================================
class S3SearchMinMaxWidget(S3SearchWidget):
"""
Min/Max search widget for numeric fields
"""
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
T = current.T
settings = current.deployment_settings
self.names = []
attr = self.attr
self.method = attr.get("method", "range")
select_min = self.method in ("min", "range")
select_max = self.method in ("max", "range")
self.widmin = Storage()
self.widmax = Storage()
if not self.search_field:
self.build_master_query(resource)
search_field = self.search_field.values()
if not search_field:
return SPAN(T("no options available"),
_class="no-options-available")
search_field = search_field[0][0]
ftype = str(search_field.type)
input_min = input_max = None
if ftype == "integer":
requires = IS_EMPTY_OR(IS_INT_IN_RANGE())
elif ftype == "date":
attr.update(_class="date")
requires = IS_EMPTY_OR(IS_DATE(format=settings.get_L10n_date_format()))
elif ftype == "time":
attr.update(_class="time")
requires = IS_EMPTY_OR(IS_TIME())
elif ftype == "datetime":
attr.update(_class="datetime")
requires = IS_EMPTY_OR(IS_DATETIME(format=settings.get_L10n_datetime_format()))
else:
raise SyntaxError("Unsupported search field type")
attr.update(_type="text")
trl = TR(_class="sublabels")
tri = TR()
# dictionaries for storing details of the input elements
name = attr["_name"]
self.widmin = dict(name="%s_min" % name,
label=T("min"),
requires=requires,
attributes=attr)
self.widmax = dict(name="%s_max" % name,
label=T("max"),
requires=requires,
attributes=attr)
if select_min:
min_label = self.widget_label(self.widmin)
min_input = self.widget_input(self.widmin)
self.names.append(self.widmin["name"])
trl.append(min_label)
tri.append(min_input)
if select_max:
max_label = self.widget_label(self.widmax)
max_input = self.widget_input(self.widmax)
self.names.append(self.widmax["name"])
trl.append(max_label)
tri.append(max_input)
w = TABLE(trl, tri, _class="s3searchminmaxwidget")
return w
# -------------------------------------------------------------------------
@staticmethod
def widget_label(widget):
"""
@param widget: dict with the name, label, requires and
attributes for the input element
@return: LABEL
"""
return LABEL(widget["label"], _for="id-%s" % widget["name"])
# -------------------------------------------------------------------------
@staticmethod
def widget_input(widget):
"""
@param widget: dict with the name, label, requires and
attributes for the input element
@return: INPUT
"""
attr = widget["attributes"].copy()
attr.update(_name=widget["name"],
_id="id-%s" % widget["name"])
return INPUT(requires=widget["requires"], **attr)
# -------------------------------------------------------------------------
def validate(self, resource, value):
"""
Validate the input values of the widget
"""
T = current.T
errors = dict()
select_min = self.method in ("min", "range")
select_max = self.method in ("max", "range")
if select_min and select_max:
vmin = value.get(self.widmin["name"], None)
vmax = value.get(self.widmax["name"], None)
if vmax is not None and vmin is not None and vmin > vmax:
errors[self.widmax["name"]] = \
T("Maximum must be greater than minimum")
return errors or None
# -------------------------------------------------------------------------
def query(self, resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
select_min = self.method in ("min", "range")
select_max = self.method in ("max", "range")
min_query = max_query = query = None
if select_min:
v = value.get(self.widmin["name"], None)
if v is not None and str(v):
min_query = S3FieldSelector(self.field) >= v
if select_max:
v = value.get(self.widmax["name"], None)
if v is not None and str(v):
max_query = S3FieldSelector(self.field) <= v
if min_query is not None:
query = min_query
if max_query is not None:
query = query & max_query
else:
query = max_query
return query
# =============================================================================
class S3SearchOptionsWidget(S3SearchWidget):
"""
Option select widget for option or boolean fields
Displays a search widget which allows the user to search for records
with fields matching a certain criteria.
Field must be an integer or reference to work on all versions of
gluon/sqlhtml.py
@param represent: If the field is a reference, represent can pass a
formatting string with mapping fields to the
referenced record.
@param cols: The number of columns which the options will be
displayed in
"""
def __init__(self, field=None, name=None, options=None, null=False, **attr):
"""
Configures the search option
@param field: name(s) of the fields to search in
@param name: used to build the HTML ID of the widget
@param options: either a value:label dictionary to populate the
search widget or a callable to create this
@param null: False if no null value to be included in the options,
otherwise a LazyT for the label
@keyword label: a label for the search widget
@keyword location_level: If-specified then generate a label when
rendered based on the current hierarchy
@keyword comment: a comment for the search widget
"""
super(S3SearchOptionsWidget, self).__init__(field, name, **attr)
self.options = options
self.null = null
# -------------------------------------------------------------------------
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
T = current.T
field_name = self.field
attr = self.attr
name = attr.pop("_name",
"%s_search_select_%s" % (resource.name,
field_name))
self.name = name
if "location_level" in attr:
# This is searching a Location Hierarchy, so lookup the label now
level = attr["location_level"]
hierarchy = current.gis.get_location_hierarchy()
if level in hierarchy:
label = hierarchy[level]
else:
label = level
attr["label"] = label
# Populate the field value from the GET parameter
if vars and name in vars:
value = vars[name]
else:
value = None
fs = S3FieldSelector(field_name)
fl = fs.resolve(resource)
field = fl.field
# Check the field type
if field is not None:
field_type = str(field.type)
else:
field_type = "virtual"
if self.options is not None:
# Custom dict of options {value: label} or callable
if isinstance(self.options, dict):
options = self.options
elif callable(self.options):
options = self.options()
opt_values = options.keys()
else:
if field_type == "boolean":
opt_values = (True, False)
else:
multiple = field_type[:5] == "list:"
groupby = field if field and not multiple else None
virtual = field is None
rows = resource.select(fields=[field_name],
start=None,
limit=None,
orderby=field,
groupby=groupby,
virtual=virtual)
opt_values = []
if rows:
opt_extend = opt_values.extend
opt_append = opt_values.append
if multiple:
for row in rows:
values = row[field]
if values:
opt_extend([v for v in values
if v is not None and
v not in opt_values])
else:
for row in rows:
v = row[field]
if v is not None and v not in opt_values:
opt_append(v)
if len(opt_values) < 1:
msg = attr.get("_no_opts", T("No options available"))
return SPAN(msg, _class="no-options-available")
if self.options is None:
opt_list = []
# Always use the represent of the widget, if present
represent = attr.represent
# Fallback to the field's represent
if not represent or field_type[:9] != "reference":
represent = field.represent
if callable(represent):
# Execute, if callable
if "show_link" in represent.func_code.co_varnames:
opt_list = [(opt_value, represent(opt_value, show_link=False)) for opt_value
in opt_values]
else:
opt_list = [(opt_value, represent(opt_value)) for opt_value
in opt_values]
elif isinstance(represent, str) and field_type[:9] == "reference":
# Feed the format string
# Use the represent string to reduce db calls
# Find the fields which are needed to represent:
db = current.db
ktable = db[field_type[10:]]
fieldnames = ["id"]
fieldnames += re.findall("%\(([a-zA-Z0-9_]*)\)s", represent)
represent_fields = [ktable[fieldname] for fieldname in fieldnames]
query = (ktable.id.belongs(opt_values)) & (ktable.deleted == False)
represent_rows = db(query).select(*represent_fields).as_dict(key=represent_fields[0].name)
opt_list = []
for opt_value in opt_values:
opt_represent = represent % represent_rows[opt_value]
if opt_represent:
opt_list.append([opt_value, opt_represent])
else:
# Straight string representations of the values
opt_list = [(opt_value, s3_unicode(opt_value))
for opt_value in opt_values if opt_value]
options = dict(opt_list)
# Dummy field
dummy_field = Storage(name=name,
type=field_type,
requires=IS_IN_SET(options,
multiple=True))
# For many-to-many fields the user can search for records containing
# all the options or any of the options.
if len(options) > 1 and field_type[:4] == "list":
self.filter_type = vars.get("%s_filter" % name, "any")
any_all = DIV(
T("Filter type "),
INPUT(_name="%s_filter" % name,
_id="%s_filter_all" % name,
_type="radio",
_value="all",
value=self.filter_type),
LABEL(T("All"),
_for="%s_filter_all" % name),
INPUT(_name="%s_filter" % name,
_id="%s_filter_any" % name,
_type="radio",
_value="any",
value=self.filter_type),
LABEL(T("Any"),
_for="%s_filter_any" % name),
_class="s3-checkboxes-widget-filter"
)
else:
any_all = ""
return TAG[""](any_all,
s3_grouped_checkboxes_widget(dummy_field,
value,
**attr))
# -------------------------------------------------------------------------
def query(self, resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget
"""
field_name = self.field
if value:
if not isinstance(value, (list, tuple)):
value = [value]
fs = S3FieldSelector(field_name)
fl = fs.resolve(resource)
try:
table_field = fl.field
except:
table_field = None
# What do we do if we need to search within a virtual field
# that is a list:* ?
if table_field and str(table_field.type).startswith("list"):
query = None
if self.filter_type == "any":
query = S3FieldSelector(field_name).anyof(value)
else:
query = S3FieldSelector(field_name).contains(value)
elif "None" in value:
# Needs special handling (doesn't show up in 'belongs')
query = S3FieldSelector(field_name) == None
opts = [v for v in value if v != "None"]
if opts:
query = query | S3FieldSelector(field_name).belongs(opts)
else:
query = S3FieldSelector(field_name).belongs(value)
return query
else:
return None
# =============================================================================
class S3SearchLocationWidget(S3SearchWidget):
"""
Interactive location search widget
- allows the user to select a BBOX & have only results from within
that BBOX returned
@ToDo: Have an option to use a Circular Radius
http://openlayers.org/dev/examples/regular-polygons.html
@ToDo: Have an option to use a full Polygon
Hard to process this as a resource filter
"""
def __init__(self,
field="location_id",
name=None, # Needs to be specified by caller
**attr):
"""
Initialise parent class & make any necessary modifications
"""
S3SearchWidget.__init__(self, field, name, **attr)
# -------------------------------------------------------------------------
def widget(self, resource, vars):
"""
Returns the widget
@param resource: the resource to search in
@param vars: the URL GET variables as dict
"""
format = current.auth.permission.format
if format == "plain":
return None
try:
from shapely.wkt import loads as wkt_loads
except ImportError:
from s3utils import s3_debug
s3_debug("WARNING: %s: Shapely GIS library not installed" % __name__)
return None
T = current.T
# Components
if "comment" not in self.attr:
self.attr.update(comment=T("Draw a square to limit the results to just those within the square."))
#self.attr.update(comment="%s|%s|%s" % (T("Draw a Polygon around the area to which you wish to restrict your search."),
# T("Click on the map to add the points that make up your polygon. Double-click to finish drawing."),
# T("To activate Freehand mode, hold down the shift key.")))
self.comment = self.attr.comment
# Hidden Field to store the Polygon value in
polygon_input = INPUT(_id="gis_search_polygon_input",
_name=self.attr._name,
_class="hide")
# Map Popup
# - not added as we reuse the one that comes with dataTables
# Button to open the Map
OPEN_MAP = T("Open Map")
CLEAR_MAP = T("Clear selection")
map_buttons = TAG[""](BUTTON(OPEN_MAP,
_id="gis_search_map-btn"),
BUTTON(CLEAR_MAP,
_id="gis_search_polygon_input_clear"))
# Settings to be read by static/scripts/S3/s3.gis.js
js_location_search = '''S3.gis.draw_polygon=true'''
# The overall layout of the components
return TAG[""](
polygon_input,
map_buttons,
#map_popup,
SCRIPT(js_location_search)
)
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
"""
Returns a sub-query for this search option
@param resource: the resource to search in
@param value: the value returned from the widget: WKT format
"""
if value:
# @ToDo:
# if current.deployment_settings.get_gis_spatialdb():
# # Use PostGIS-optimised routine
# query = (S3FieldSelector("location_id$the_geom").st_intersects(value))
# else:
from shapely.wkt import loads as wkt_loads
try:
shape = wkt_loads(value)
except:
from s3utils import s3_debug
s3_debug("WARNING: S3Search: Invalid WKT")
return None
bounds = shape.bounds
lon_min = bounds[0]
lat_min = bounds[1]
lon_max = bounds[2]
lat_max = bounds[3]
# Return all locations which have a part of themselves inside the BBOX
# This requires the locations to have their bounds set properly
# This can be done globally using:
# gis.update_location_tree()
query = (S3FieldSelector("location_id$lat_min") <= lat_max) & \
(S3FieldSelector("location_id$lat_max") >= lat_min) & \
(S3FieldSelector("location_id$lon_min") <= lon_max) & \
(S3FieldSelector("location_id$lon_max") >= lon_min)
return query
else:
return None
# =============================================================================
class S3SearchCredentialsWidget(S3SearchOptionsWidget):
"""
Options Widget to search for HRMs with specified Credentials
"""
def widget(self, resource, vars):
c = S3Resource("hrm_credential")
return S3SearchOptionsWidget.widget(self, c, vars)
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
if value:
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
ctable = s3db.hrm_credential
query = (htable.person_id == ptable.id) & \
(htable.deleted != True) & \
(ctable.person_id == ptable.id) & \
(ctable.deleted != True) & \
(ctable.job_role_id.belongs(value))
return query
else:
return None
# =============================================================================
class S3SearchSkillsWidget(S3SearchOptionsWidget):
"""
Options Widget to search for HRMs with specified Skills
@ToDo: Provide a filter for confirmed/unconfirmed only
(latter useful to see who needs confirming)
@ToDo: Provide a filter for level of competency
- meanwhile at least sort by level of competency
"""
# -------------------------------------------------------------------------
def widget(self, resource, vars):
c = S3Resource("hrm_competency")
return S3SearchOptionsWidget.widget(self, c, vars)
# -------------------------------------------------------------------------
@staticmethod
def query(resource, value):
if value:
s3db = current.s3db
htable = s3db.hrm_human_resource
ptable = s3db.pr_person
ctable = s3db.hrm_competency
query = (htable.person_id == ptable.id) & \
(htable.deleted != True) & \
(ctable.person_id == ptable.id) & \
(ctable.deleted != True) & \
(ctable.skill_id.belongs(value))
return query
else:
return None
# =============================================================================
class S3Search(S3CRUD):
"""
RESTful Search Method for S3Resources
"""
def __init__(self, simple=None, advanced=None, any=False, **args):
"""
Constructor
@param simple: the widgets for the simple search form as list
@param advanced: the widgets for the advanced search form as list
@param any: match "any of" (True) or "all of" (False) the options
in advanced search
"""
S3CRUD.__init__(self)
args = Storage(args)
if simple is None:
if "field" in args:
if "name" in args:
name = args.name
elif "_name" in args:
name = args._name
else:
name = "search_simple"
simple = S3SearchSimpleWidget(field=args.field,
name=name,
label=args.label,
comment=args.comment)
# Create a list of Simple search form widgets, by name,
# and throw an error if a duplicate is found
names = []
self.simple = []
if not isinstance(simple, (list, tuple)):
simple = [simple]
for widget in simple:
if widget is not None:
name = widget.attr._name
if name in names:
raise SyntaxError("Duplicate widget: %s") % name
# Widgets should be able to have default names
# elif not name:
# raise SyntaxError("Widget with no name")
else:
self.simple.append((name, widget))
names.append(name)
# Create a list of Advanced search form widgets, by name,
# and throw an error if a duplicate is found
names = []
self.advanced = []
append = self.advanced.append
if not isinstance(advanced, (list, tuple)):
advanced = [advanced]
for widget in advanced:
if widget is not None:
name = widget.attr._name
if name in names:
raise SyntaxError("Duplicate widget: %s" % name)
# Widgets should be able to have default names
# elif not name:
# raise SyntaxError("Widget with no name")
else:
append((name, widget))
names.append(name)
self.__any = any
if self.simple or self.advanced:
self.__interactive = True
else:
self.__interactive = False
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Entry point to apply search method to S3Requests
@param r: the S3Request
@param attr: request attributes
"""
format = r.representation
output = dict()
if r.component and self != self.resource.search:
output = self.resource.search(r, **attr)
# Autocomplete-Widget
elif "is_autocomplete" in attr:
output = self.search_autocomplete(r, **attr)
# Interactive or saved search
elif r.interactive and self.__interactive:
output = self.search_interactive(r, **attr)
# SSPag response => CRUD native
elif format == "aadata" and self.__interactive:
output = self.select(r, **attr)
# JSON search
elif format == "json":
output = self.search_json(r, **attr)
# Autocomplete-JSON search
elif format == "acjson":
output = self.search_json_autocomplete(r, **attr)
# Search form for popup on Map Layers
elif format == "plain":
output = self.search_interactive(r, **attr)
elif format == "email":
output = self.email(r, **attr)
# Not supported
else:
r.error(501, current.manager.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
@staticmethod
def _build_widget_query(resource, name, widget, form, query):
"""
@todo: docstring
"""
errors = None
if hasattr(widget, "names"):
value = Storage([(name, form.vars[name])
for name in widget.names
if name in form.vars])
elif name in form.vars:
value = form.vars[name]
else:
value = None
if hasattr(widget, "validate"):
errors = widget.validate(resource, value)
if not errors:
q = widget.query(resource, value)
if q is not None:
if query is None:
query = q
else:
query = query & q
return (query, errors)
# -------------------------------------------------------------------------
def save_search_widget(self, r, query, **attr):
"""
Add a widget to a Search form to allow saving this search to the
user's profile, to which they can subscribe
"""
import urllib
T = current.T
person_id = current.auth.s3_logged_in_person()
resource = self.resource
save_options = {
"url": URL(c="pr", f="saved_search", vars={"format": "s3json"}),
"url_detail": URL(c="pr", f="person", args=[person_id, "saved_search", "<id>", "update"]),
"data": json.dumps({
"$_pr_saved_search": [
{
"controller": r.controller,
"function": r.function,
"prefix": resource.prefix,
"resource_name": resource.name,
"url": r.url(
# Can't use the search method handler because then
# we can't get different formats
method = "search", # want to see search form
vars = query.serialize_url(resource),
),
"filters": urllib.urlencode(query.serialize_url(resource)),
},
],
}),
}
widget = TAG[""](BUTTON(T("Save this search"),
_id="save-search"),
SCRIPT('''
S3.search.saveOptions=%s
i18n.edit_saved_search="%s"
''' % (json.dumps(save_options),
T("Edit saved search"))))
return widget
# -------------------------------------------------------------------------
def email(self, r, **kwargs):
"""
Take a search request and render it through a template
to format it for email notifications.
@param r: S3Request object
"""
represent = current.manager.represent
# Saved search is optional, but used to filter results and
# put save search name into the output
search_subscription = current.request.get_vars.get("search_subscription", None)
if search_subscription:
search = current.db(current.s3db.pr_saved_search.auth_token == search_subscription).select().first()
else:
search = None
if search:
controller = search.controller
function = search.function
last_checked = search.last_checked
list_fields = self._config("list_fields")
# Create a list of the names, not labels, from list_fields
field_names = []
for f in list_fields:
if f != "id":
if isinstance(f, tuple):
field_names.append(f[1]) # (label, name)
else:
field_names.append(f)
# Get the field objects based on list_fields
fields = self.resource.readable_fields(field_names)
# We don't want to show the "id" field at all
head_row = TR([TH(f.label) for f in fields if f.name != "id"])
new_rows = []
nappend = new_rows.append
mod_rows = []
mappend = mod_rows.append
for row in self.resource.load():
first_cell = True # disabled
row_cells = []
rappend = row_cells.append
for f in fields:
rep_value = represent(f, record=row)
# Hyperlink the text in the first
# cell to the record page
if first_cell and search:
url = URL(c=controller, f=function, args=row["id"],
scheme=True)
rep_value = A(rep_value, _href=url)
first_cell = False
rappend(TD(XML(rep_value)))
if row_cells:
tr = TR(*row_cells)
if search and row.created_on >= last_checked:
# Records are either "new" or "modified"
nappend(tr)
else:
mappend(tr)
if not new_rows and not mod_rows:
return ""
# Generate a table for the new records
if new_rows:
new_table = TABLE(THEAD(head_row),
TBODY(*new_rows))
else:
new_table = None
# Generate a table for updated records
if mod_rows:
mod_table = TABLE(THEAD(head_row),
TBODY(*mod_rows))
else:
mod_table = None
if search:
search_name = search.name
else:
search_name = ""
response = current.response
crud_strings = response.s3.crud_strings[self.resource.tablename]
if crud_strings:
resource_name = crud_strings.title_list
else:
resource_name = string.capwords(self.resource.name, "_")
# Render the records via a template
message = response.render(
"msg/notification_email.html",
{
"search_name": search_name,
"new_table": new_table,
"mod_table": mod_table,
"system_name": current.deployment_settings.get_system_name(),
"resource_name": resource_name,
}
)
return message
# -------------------------------------------------------------------------
def search_interactive(self, r, **attr):
"""
Interactive search
@param r: the S3Request instance
@param attr: request parameters
"""
from s3.s3utils import S3DataTable
T = current.T
session = current.session
request = self.request
response = current.response
s3 = response.s3
resource = self.resource
settings = current.deployment_settings
db = current.db
s3db = current.s3db
table = self.table
tablename = self.tablename
representation = r.representation
# Initialize output
output = dict()
# Get table-specific parameters
config = self._config
sortby = config("sortby", [[1, "asc"]])
orderby = config("orderby", None)
list_fields = config("list_fields")
insertable = config("insertable", True)
# Initialize the form
form = DIV(_class="search_form form-container")
# Figure out which set of form values to use
# POST > GET > session > unfiltered
if r.http == "POST":
# POST
form_values = r.post_vars
else:
url_options = Storage([(k, v) for k, v in r.get_vars.iteritems() if v])
if url_options:
# GET
form_values = url_options
else:
session_options = session.s3.search_options
if session_options and tablename in session_options:
# session
if "clear_opts" in r.get_vars:
session_options = Storage()
else:
session_options = session_options[tablename]
else:
# unfiltered
session_options = Storage()
form_values = session_options
if "clear_opts" in r.get_vars:
del r.get_vars["clear_opts"]
if "clear_opts" in r.vars:
del r.vars["clear_opts"]
# Remove the existing session filter if this is a new
# search (@todo: do not store the filter in session)
if r.http == "GET" and r.representation != "aadata":
if "filter" in session.s3:
del session.s3["filter"]
# Build the search forms
simple_form, advanced_form = self.build_forms(r, form_values)
# Process the search forms
query, errors = self.process_forms(r,
simple_form,
advanced_form,
form_values)
search_url = None
search_url_vars = Storage()
save_search = ""
if not errors:
if hasattr(query, "serialize_url"):
search_url_vars = query.serialize_url(resource)
search_url = r.url(method = "",
vars = search_url_vars)
# Create a Save Search widget
save_search = self.save_search_widget(r, query, **attr)
resource.add_filter(query)
search_vars = dict(simple=False,
advanced=True,
criteria=form_values)
else:
search_vars = dict()
s3.formats.pdf = r.url(method="",
vars=search_url_vars,
representation="pdf")
s3.formats.xls = r.url(method="",
vars=search_url_vars,
representation="xls")
s3.formats.rss = r.url(method="",
vars=search_url_vars,
representation="rss")
if representation == "plain":
# Map popup filter
# Return just the advanced form, no results
form.append(advanced_form)
output["item"] = form
response.view = self._view(r, "plain.html")
return output
if s3.simple_search:
form.append(DIV(_id="search-mode", _mode="simple"))
else:
form.append(DIV(_id="search-mode", _mode="advanced"))
# Complete the output form-DIV()
if simple_form is not None:
# Insert the save button next to the submit button
simple_form[0][-1][1].insert(1, save_search)
form.append(simple_form)
if advanced_form is not None:
# Insert the save button next to the submit button
advanced_form[0][-1][1].insert(1, save_search)
form.append(advanced_form)
output["form"] = form
# List fields
if not list_fields:
fields = resource.readable_fields()
list_fields = [f.name for f in fields]
else:
fields = [table[f] for f in list_fields if f in table.fields]
if not fields:
fields = []
if fields[0].name != table.fields[0]:
fields.insert(0, table[table.fields[0]])
if list_fields[0] != table.fields[0]:
list_fields.insert(0, table.fields[0])
# Count rows
totalrows = resource.count()
displayrows = totalrows
# How many records per page?
if s3.dataTable_iDisplayLength:
display_length = s3.dataTable_iDisplayLength
else:
display_length = 25
# Server-side pagination?
if not s3.no_sspag:
dt_pagination = "true"
limit = 2 * display_length
# Build session filter for data tables
# @todo: do not use session to store filter
ids = resource.get_id()
if ids:
if not isinstance(ids, list):
ids = str(ids)
else:
ids = ",".join([str(i) for i in ids])
session.s3.filter = {"%s.id" % resource.name: ids}
else:
limit = None
dt_pagination = "false"
if not orderby:
orderby = fields[0]
# Truncate long texts
if r.interactive or representation == "aadata":
for f in table:
if str(f.type) == "text" and not f.represent:
f.represent = self.truncate
# Remove the dataTables search box to avoid confusion
s3.dataTable_NobFilter = True
# Get the data table
dt = resource.datatable(fields=list_fields,
start=None,
limit=limit,
#left=left,
#distinct=distinct,
orderby=orderby)
if dt is None:
datatable = self.crud_string(tablename, "msg_no_match")
s3.no_formats = True
else:
datatable = dt.html(totalrows, displayrows, "list",
dt_pagination=dt_pagination,
dt_displayLength=display_length,
dt_permalink=search_url)
# Add items to output
output["items"] = datatable
items = output["items"]
if isinstance(items, DIV):
filter = session.s3.filter
app = request.application
tabs = []
if "location_id" in table or \
"site_id" in table:
# Add a map for search results
# (this same map is also used by the Map Search Widget, if-present)
tabs.append((T("Map"), "map"))
app = request.application
# Build URL to load the features onto the map
if query:
vars = query.serialize_url(resource=resource)
else:
vars = None
url = URL(extension="geojson",
args=None,
vars=vars)
gis = current.gis
feature_resources = [{
"name" : T("Search Results"),
"id" : "search_results",
"url" : url,
"active" : False, # Gets activated when the Map is opened up
"marker" : gis.get_marker(request.controller, request.function)
}]
map_popup = gis.show_map(
feature_resources=feature_resources,
# Added by search widget onClick in s3.dataTables.js
#add_polygon = True,
#add_polygon_active = True,
catalogue_layers=True,
legend=True,
toolbar=True,
collapsed=True,
#search = True,
window=True,
window_hide=True
)
s3.dataTableMap = map_popup
if settings.has_module("msg") and \
("pe_id" in table or "person_id" in table) and \
current.auth.permission.has_permission("update", c="msg"):
# Provide the ability to Message person entities in search results
tabs.append((T("Message"), "compose"))
if tabs:
tabs.insert(0, ((T("List"), None)))
# @todo: this needs rework
# - s3FormatRequest must retain any URL filters
# - s3FormatRequest must remove the "search" method
# - other data formats could have other list_fields,
# hence applying the datatable sorting/filters is
# not transparent
#if not s3.datatable_ajax_source:
#s3.datatable_ajax_source = str(r.url(representation = "aaData"))
#s3.formats.pdf = r.url(method="")
#s3.formats.xls = r.url(method="")
#s3.formats.rss = r.url(method="")
#attr = S3DataTable.getConfigData()
#items = S3DataTable.htmlConfig(items,
#"list",
#sortby, # order by
##filter, # the filter string
#None, # the rfields
#**attr
#)
#items[0].insert(0, sep)
#items[0].insert(0, link)
else:
tabs = []
output["items"] = items
output["sortby"] = sortby
# Search Tabs
search_tabs = s3_search_tabs(r, tabs)
output["search_tabs"] = search_tabs
# Title and subtitle
output["title"] = self.crud_string(tablename, "title_search")
output["subtitle"] = self.crud_string(tablename, "msg_match")
# View
response.view = self._view(r, "search.html")
# RHeader gets added later in S3Method()
return output
# -------------------------------------------------------------------------
def process_forms(self, r, simple_form, advanced_form, form_values):
"""
Validate the form values against the forms. If valid, generate
and return a query object. Otherwise return an empty query and
the errors.
If valid, save the values into the users' session.
"""
s3 = current.session.s3
query = None
errors = None
# Create a container in the session to saves search options
if "search_options" not in s3:
s3.search_options = Storage()
# Process the simple search form:
simple = simple_form is not None
if simple_form is not None:
if simple_form.accepts(form_values,
formname="search_simple"):
for name, widget in self.simple:
query, errors = self._build_widget_query(self.resource,
name,
widget,
simple_form,
query)
if errors:
simple_form.errors.update(errors)
errors = simple_form.errors
# Save the form values into the session
s3.search_options[self.tablename] = \
Storage([(k, v) for k, v in form_values.iteritems() if v])
elif simple_form.errors:
errors = simple_form.errors
return query, errors, simple
# Process the advanced search form:
if advanced_form is not None:
if advanced_form.accepts(form_values,
formname="search_advanced"):
simple = False
resource = self.resource
for name, widget in self.advanced:
query, errors = self._build_widget_query(resource,
name,
widget,
advanced_form,
query)
if errors:
advanced_form.errors.update(errors)
errors = advanced_form.errors
# Save the form values into the session
s3.search_options[self.tablename] = \
Storage([(k, v) for k, v in form_values.iteritems() if v])
elif advanced_form.errors:
simple = False
current.response.s3.simple_search = simple
return (query, errors)
# -------------------------------------------------------------------------
def build_forms(self, r, form_values=None):
"""
Builds a form customised to the module/resource.
"""
simple = self.simple
advanced = self.advanced
T = current.T
tablename = self.tablename
representation = r.representation
simple_form = None
advanced_form = None
opts = Storage(r.get_vars)
opts["clear_opts"] = "1"
clear_opts = A(T("Reset all filters"),
_href=r.url(vars=opts),
_class="action-lnk")
# Simple search form
if simple:
# Switch-link
if advanced:
switch_link = A(T("Advanced Search"), _href="#",
_class="action-lnk advanced-lnk")
else:
switch_link = ""
simple_form = self._build_form(simple,
form_values=form_values,
clear=clear_opts,
switch=switch_link,
_class="simple-form")
# Advanced search form
if advanced:
if simple and not r.representation == "plain":
switch_link = A(T("Simple Search"), _href="#",
_class="action-lnk simple-lnk")
_class = "%s hide"
else:
switch_link = ""
_class = "%s"
advanced_form = self._build_form(advanced,
form_values=form_values,
clear=clear_opts,
switch=switch_link,
_class=_class % "advanced-form")
return (simple_form, advanced_form)
# -------------------------------------------------------------------------
def _build_form(self, widgets,
form_values=None,
clear="",
switch="",
**attr):
"""
Render a search form.
@param widgets: the widgets
@param form_values: the form values (as dict) to pass to
the widgets
@param clear: the clear-values action link
@param switch: the switch-forms action link
@param attr: HTML attributes for the form
@returns: a FORM instance
"""
T = current.T
request = self.request
resource = self.resource
trows = []
for name, widget in widgets:
_widget = widget.widget(resource, form_values)
if _widget is None:
# Skip this widget as we have nothing but the label
continue
label = widget.field
if isinstance(label, (list, tuple)) and len(label):
label = label[0]
comment = ""
if hasattr(widget, "attr"):
label = widget.attr.get("label", label)
comment = widget.attr.get("comment", comment)
tr = TR(TD("%s: " % label, _class="w2p_fl"), _widget)
if comment:
tr.append(DIV(DIV(_class="tooltip",
_title="%s|%s" % (label, comment))))
trows.append(tr)
trows.append(TR("", TD(INPUT(_type="submit", _value=T("Search")),
clear, switch)))
form = FORM(TABLE(trows), **attr)
return form
# -------------------------------------------------------------------------
def search_json(self, r, **attr):
"""
JSON search method for S3AutocompleteWidget
@param r: the S3Request
@param attr: request attributes
"""
output = None
_vars = self.request.vars
# JQueryUI Autocomplete uses "term" instead of "value"
# (old JQuery Autocomplete uses "q" instead of "value")
value = _vars.value or _vars.term or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower().strip()
if _vars.field and _vars.filter and value:
s3db = current.s3db
resource = self.resource
table = self.table
limit = int(_vars.limit or 0)
fieldname = str.lower(_vars.field)
field = table[fieldname]
# Default fields to return
fields = [table.id, field]
if self.tablename == "org_site":
# Simpler to provide an exception case than write a whole new class
table = s3db.org_site
fields.append(table.instance_type)
filter = _vars.filter
if filter == "~":
# Normal single-field Autocomplete
query = (field.lower().like(value + "%"))
elif filter == "=":
if field.type.split(" ")[0] in \
["reference", "id", "float", "integer"]:
# Numeric, e.g. Organizations' offices_by_org
query = (field == value)
else:
# Text
query = (field.lower() == value)
elif filter == "<":
query = (field < value)
elif filter == ">":
query = (field > value)
else:
output = current.xml.json_message(
False,
400,
"Unsupported filter! Supported filters: ~, =, <, >")
raise HTTP(400, body=output)
# Exclude records which are already linked:
# ?link=<linktablename>.<leftkey>.<id>.<rkey>.<fkey>
# e.g. ?link=project_organisation.organisation_id.5.project_id.id
if "link" in _vars:
try:
link, lkey, _id, rkey, fkey = _vars.link.split(".")
linktable = s3db[link]
fq = (linktable[rkey] == table[fkey]) & \
(linktable[lkey] == _id)
linked = current.db(fq).select(table._id)
exclude = (~(table._id.belongs([r[table._id.name]
for r in linked])))
except Exception, e:
pass # ignore
else:
query &= exclude
# Select only or exclude template records:
# to only select templates:
# ?template=<fieldname>.<value>,
# e.g. ?template=template.true
# to exclude templates:
# ?template=~<fieldname>.<value>
# e.g. ?template=~template.true
if "template" in _vars:
try:
flag, val = _vars.template.split(".", 1)
if flag[0] == "~":
exclude = True
flag = flag[1:]
else:
exclude = False
ffield = table[flag]
except:
pass # ignore
else:
if str(ffield.type) == "boolean":
if val.lower() == "true":
val = True
else:
val = False
if exclude:
templates = (ffield != val)
else:
templates = (ffield == val)
resource.add_filter(templates)
resource.add_filter(query)
if filter == "~":
if (not limit or limit > MAX_SEARCH_RESULTS) and \
resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
if output is None:
output = S3Exporter().json(resource,
start=0,
limit=limit,
fields=fields,
orderby=field)
current.response.headers["Content-Type"] = "application/json"
else:
output = current.xml.json_message(
False,
400,
"Missing options! Require: field, filter & value")
raise HTTP(400, body=output)
return output
# -------------------------------------------------------------------------
@staticmethod
def _check_search_autcomplete_search_simple_widget(widget):
"""
@todo: docstring
"""
if not isinstance(widget, S3SearchSimpleWidget):
raise SyntaxError("First simple widget for Search AutoComplete must be S3SearchSimpleWidget")
# -------------------------------------------------------------------------
def search_autocomplete(self, r, **attr):
"""
Interactive search
@param r: the S3Request instance
@param attr: request parameters
"""
# Get environment
T = current.T
resource = self.resource
vars = self.request.get_vars
resource.clear_query()
# Fieldname of the value for the autocomplete (default to id)
get_fieldname = attr.get("get_fieldname")
fieldname = attr.get("fieldname")
value = attr.get("value")
# Get representation
representation = r.representation
# Initialize output
feature_queries = []
bounds = None
output = dict()
simple = False
# Get table-specific parameters
sortby = self._config("sortby", [[1, "asc"]])
orderby = self._config("orderby", None)
list_fields = self._config("list_fields")
insertable = self._config("insertable", True)
# Initialize the form
form_attr = dict(_class="search_form form-container",
_prefix=resource.prefix,
_resourcename=resource.name,
_fieldname=fieldname,
)
if get_fieldname:
form_attr["_get_fieldname"] = get_fieldname
# Otherwise default get_fieldname is "id"
form = DIV(**form_attr)
# Append the simple search form
if self.simple:
simple = True
if self.advanced:
switch_link = A(T("Advanced Search"), _href="#",
_class="action-lnk advanced-lnk %s",
_fieldname=fieldname)
else:
switch_link = ""
# Only display the S3SearchSimpleWidget (should be first)
name, widget = self.simple[0]
self._check_search_autcomplete_search_simple_widget(widget)
name = "%s_search_simple_simple" % fieldname
autocomplete_widget = widget.widget(resource,
vars,
name=name,
value=value,
autocomplete="off")
simple_form = DIV(TABLE(autocomplete_widget,
switch_link
),
_class="simple-form")
form.append(simple_form)
# Append the advanced search form
if self.advanced:
trows = []
first_widget = True
for name, widget in self.advanced:
_widget = widget.widget(resource, vars)
if _widget is None:
# Skip this widget as we have nothing but the label
continue
label = widget.field
if first_widget:
self._check_search_autcomplete_search_simple_widget(widget)
name = "%s_search_simple_advanced" % fieldname
autocomplete_widget = widget.widget(resource,
vars,
name=name,
value=value,
autocomplete="off")
first_widget = False
else:
if isinstance(label, (list, tuple)) and len(label):
label = label[0]
if hasattr(widget, "attr"):
label = widget.attr.get("label", label)
tr = TR(TD("%s: " % label, _class="w2p_fl"), _widget)
trows.append(tr)
if self.simple:
switch_link = A(T("Simple Search"), _href="#",
_class="action-lnk simple-lnk",
_fieldname=fieldname)
else:
switch_link = ""
if simple:
_class = "hide"
else:
_class = None
advanced_form = DIV(autocomplete_widget,
TABLE(trows),
TABLE(TR(switch_link)),
_class="%s advanced-form" % _class,
#_resourcename = resource.name
)
form.append(advanced_form)
output.update(form=form)
return output
# -------------------------------------------------------------------------
def search_json_autocomplete(self, r, **attr):
"""
@todo: docstring
"""
query = None
errors = True
request = self.request
resource = self.resource
response = current.response
response.headers["Content-Type"] = "application/json"
# Process the simple search form:
if self.simple and request.vars.simple_form:
for name, widget in self.simple:
# Pass request instead of form - it contains the vars
query, errors = self._build_widget_query(resource,
name,
widget,
request,
query)
if errors:
break
# Process the advanced search form:
elif self.advanced:
for name, widget in self.advanced:
# Pass request instead of form - it contains the vars
query, errors = self._build_widget_query(resource,
name,
widget,
request,
query)
if errors:
break
else:
errors = True
resource.add_filter(query)
try:
get_fieldname = request.vars.get("get_fieldname", "id")
field = resource.table[get_fieldname]
except:
errors = True
# How can this be done more elegantly?
resource_represent = {"human_resource": lambda id: \
response.s3.hrm_human_resource_represent(id,
show_link=True)
}
if get_fieldname == "id":
represent = resource_represent[resource.name]
else:
represent = field.represent
attributes = dict(orderby=field,
limitby=resource.limitby(start=0, limit=11),
distinct=True)
# Get the rows
rows = resource._load(field, **attributes)
if not errors:
output = [{"id" : row[get_fieldname],
"represent" : str(represent(row[get_fieldname]))
} for row in rows ]
else:
jsons("{}")
return jsons(output)
# -------------------------------------------------------------------------
@staticmethod
def save_search(r, **attr):
"""
Save a Search Filter in the user's profile
- db.pr_save_search
"""
search_vars = json.load(r.body)
s_vars = {}
for i in search_vars.iterkeys():
if str(i) == "criteria" :
s_dict = {}
c_dict = search_vars[i]
for j in c_dict.iterkeys():
key = str(j)
s_dict[key] = str(c_dict[j])
s_vars[str(i)] = s_dict
else:
key = str(i)
s_vars[key] = str(search_vars[i])
import cPickle
search_str = cPickle.dumps(s_vars)
table = current.s3db.pr_save_search
query = (table.user_id == current.auth.user_id) & \
(table.search_vars == search_str)
if len(current.db(query).select(table.id)) == 0:
new_search = {}
new_search["search_vars"] = search_str
_id = table.insert(**new_search)
msg = "success"
return msg
# =============================================================================
class S3LocationSearch(S3Search):
"""
Search method with specifics for Location records (hierarchy search)
"""
def search_json(self, r, **attr):
"""
JSON search method for S3LocationAutocompleteWidget
@param r: the S3Request
@param attr: request attributes
"""
output = None
response = current.response
resource = self.resource
table = self.table
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars
limit = int(_vars.limit or 0)
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
if value:
value = value.lower().strip()
query = None
fields = []
field = table.id
if _vars.field and _vars.filter and value:
fieldname = str.lower(_vars.field)
field = table[fieldname]
if _vars.simple:
fields = [table.id,
table.name,
table.level,
table.path,
table.L0,
table.L1,
table.L2,
table.L3
]
else:
# Default fields to return
fields = [table.id,
table.name,
table.level,
table.parent,
table.path,
table.uuid,
table.lat,
table.lon,
table.addr_street,
table.addr_postcode
]
# Optional fields
if "level" in _vars and _vars.level:
if _vars.level == "null":
level = None
elif "|" in _vars.level:
level = _vars.level.split("|")
else:
level = str.upper(_vars.level)
else:
level = None
if "parent" in _vars and _vars.parent:
if _vars.parent == "null":
parent = None
else:
parent = int(_vars.parent)
else:
parent = None
if "children" in _vars and _vars.children:
if _vars.children == "null":
children = None
else:
children = int(_vars.children)
else:
children = None
if "field2" in _vars and _vars.field2:
fieldname = str.lower(_vars.field2)
field2 = table[fieldname]
else:
field2 = None
if "exclude_field" in _vars:
exclude_field = str.lower(_vars.exclude_field)
if "exclude_value" in _vars:
exclude_value = str.lower(_vars.exclude_value)
else:
exclude_value = None
else:
exclude_field = None
exclude_value = None
filter = _vars.filter
if filter == "~":
if children:
# LocationSelector
children = current.gis.get_children(children, level=level)
children = children.find(lambda row: \
row.name and value in str.lower(row.name))
output = children.json()
response.headers["Content-Type"] = "application/json"
return output
if field2:
# LocationSelector for addr_street
query = ((field.lower().like(value + "%")) | \
(field2.lower().like(value + "%")))
else:
# Normal single-field
query = (field.lower().like(value + "%"))
resource.add_filter(query)
if level:
# LocationSelector or Autocomplete
if isinstance(level, list):
query = (table.level.belongs(level))
elif str.upper(level) == "NULLNONE":
level = None
query = (table.level == level)
else:
query = (table.level == level)
else:
# Filter out poor-quality data, such as from Ushahidi
query = (table.level != "XX")
if parent:
# LocationSelector
resource.add_filter(query)
query = (table.parent == parent)
elif filter == "=":
if field.type.split(" ")[0] in \
["reference", "id", "float", "integer"]:
# Numeric, e.g. Organizations' offices_by_org
query = (field == value)
else:
# Text
if value == "nullnone":
# i.e. old Location Selector
query = (field == None)
else:
query = (field.lower() == value)
if parent:
# i.e. gis_location hierarchical search
resource.add_filter(query)
query = (table.parent == parent)
fields = [table.id,
table.name,
table.level,
table.uuid,
table.parent,
table.lat,
table.lon,
table.addr_street,
table.addr_postcode
]
else:
output = current.xml.json_message(False, 400,
"Unsupported filter! Supported filters: ~, =")
raise HTTP(400, body=output)
if not fields:
append = fields.append
for field in table.fields:
append(table[field])
resource.add_filter(query)
if filter == "~":
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
elif not parent:
if (not limit or limit > MAX_RESULTS) and resource.count() > MAX_RESULTS:
output = jsons([])
if output is None:
output = S3Exporter().json(resource,
start=0,
limit=limit,
fields=fields,
orderby=field)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3OrganisationSearch(S3Search):
"""
Search method with specifics for Organisation records
- searches name & acronym for both this organisation & the parent of
branches
"""
def search_json(self, r, **attr):
"""
JSON search method for S3OrganisationAutocompleteWidget
@param r: the S3Request
@param attr: request attributes
"""
response = current.response
resource = self.resource
table = self.table
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower().strip()
filter = _vars.filter
if filter and value:
if filter == "~":
query = (S3FieldSelector("parent.name").lower().like(value + "%")) | \
(S3FieldSelector("parent.acronym").lower().like(value + "%")) | \
(S3FieldSelector("organisation.name").lower().like(value + "%")) | \
(S3FieldSelector("organisation.acronym").lower().like(value + "%"))
else:
output = current.xml.json_message(False, 400,
"Unsupported filter! Supported filters: ~")
raise HTTP(400, body=output)
resource.add_filter(query)
limit = int(_vars.limit or MAX_SEARCH_RESULTS)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
else:
btable = current.s3db.org_organisation_branch
field = table.name
field2 = table.acronym
field3 = btable.organisation_id
# Fields to return
fields = [table.id, field, field2, field3]
attributes = dict(orderby=field)
limitby = resource.limitby(start=0, limit=limit)
if limitby is not None:
attributes["limitby"] = limitby
rows = resource._load(*fields, **attributes)
output = []
append = output.append
db = current.db
for row in rows:
name = row[table].name
parent = None
if "org_organisation_branch" in row:
query = (table.id == row[btable].organisation_id)
parent = db(query).select(table.name,
limitby = (0, 1)).first()
if parent:
name = "%s > %s" % (parent.name, name)
if not parent:
acronym = row[table].acronym
if acronym:
name = "%s (%s)" % (name, acronym)
record = dict(
id = row[table].id,
name = name,
)
append(record)
output = jsons(output)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3PersonSearch(S3Search):
"""
Search method for Persons
"""
def search_json(self, r, **attr):
"""
JSON search method for S3PersonAutocompleteWidget
- full name search
"""
response = current.response
resource = self.resource
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
output = current.xml.json_message(
False,
400,
"No value provided!"
)
raise HTTP(400, body=output)
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if " " in value:
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = (S3FieldSelector("first_name").lower().like(value1 + "%")) & \
((S3FieldSelector("middle_name").lower().like(value2 + "%")) | \
(S3FieldSelector("last_name").lower().like(value2 + "%")))
else:
value = value.strip()
query = ((S3FieldSelector("first_name").lower().like(value + "%")) | \
(S3FieldSelector("middle_name").lower().like(value + "%")) | \
(S3FieldSelector("last_name").lower().like(value + "%")))
resource.add_filter(query)
limit = int(_vars.limit or 0)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
else:
fields = ["id",
"first_name",
"middle_name",
"last_name",
]
rows = resource.select(fields=fields,
start=0,
limit=limit,
orderby="pr_person.first_name")
if rows:
items = [{
"id" : row.id,
"first" : row.first_name,
"middle" : row.middle_name or "",
"last" : row.last_name or "",
} for row in rows ]
else:
items = []
output = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3HRSearch(S3Search):
"""
Search method for Human Resources
"""
def search_json(self, r, **attr):
"""
JSON search method for S3HumanResourceAutocompleteWidget
- full name search
- include Organisation & Job Role in the output
"""
resource = self.resource
response = current.response
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
if not value:
output = current.xml.json_message(False, 400, "No value provided!")
raise HTTP(400, body=output)
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
if " " in value:
# Multiple words
# - check for match of first word against first_name
# - & second word against either middle_name or last_name
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = ((S3FieldSelector("person_id$first_name").lower().like(value1 + "%")) & \
((S3FieldSelector("person_id$middle_name").lower().like(value2 + "%")) | \
(S3FieldSelector("person_id$last_name").lower().like(value2 + "%"))))
else:
# Single word - check for match against any of the 3 names
value = value.strip()
query = ((S3FieldSelector("person_id$first_name").lower().like(value + "%")) | \
(S3FieldSelector("person_id$middle_name").lower().like(value + "%")) | \
(S3FieldSelector("person_id$last_name").lower().like(value + "%")))
resource.add_filter(query)
limit = int(_vars.limit or 0)
if (not limit or limit > MAX_SEARCH_RESULTS) and resource.count() > MAX_SEARCH_RESULTS:
output = jsons([dict(id="",
name="Search results are over %d. Please input more characters." \
% MAX_SEARCH_RESULTS)])
else:
fields = ["id",
"person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
"job_title_id$name",
]
show_orgs = current.deployment_settings.get_hrm_show_organisation()
if show_orgs:
fields.append("organisation_id$name")
rows = resource.select(fields=fields,
start=0,
limit=limit,
orderby="pr_person.first_name")
if rows:
items = [{
"id" : row["hrm_human_resource"].id,
"first" : row["pr_person"].first_name,
"middle" : row["pr_person"].middle_name or "",
"last" : row["pr_person"].last_name or "",
"org" : row["org_organisation"].name if show_orgs else "",
"job" : row["hrm_job_title"].name or "",
} for row in rows ]
else:
items = []
output = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3PentitySearch(S3Search):
"""
Search method with specifics for Pentity records (full name search)
"""
def search_json(self, r, **attr):
"""
Legacy JSON search method (for autocomplete widgets)
@param r: the S3Request
@param attr: request attributes
"""
response = current.response
resource = self.resource
table = self.table
s3db = current.s3db
# Query comes in pre-filtered to accessible & deletion_status
# Respect response.s3.filter
resource.add_filter(response.s3.filter)
_vars = self.request.vars # should be request.get_vars?
# JQueryUI Autocomplete uses "term"
# old JQuery Autocomplete uses "q"
# what uses "value"?
value = _vars.term or _vars.value or _vars.q or None
# We want to do case-insensitive searches
# (default anyway on MySQL/SQLite, but not PostgreSQL)
value = value.lower()
filter = _vars.filter
limit = int(_vars.limit or 0)
# Persons
if filter and value:
ptable = s3db.pr_person
field = ptable.first_name
field2 = ptable.middle_name
field3 = ptable.last_name
if filter == "~":
# pr_person Autocomplete
if " " in value:
value1, value2 = value.split(" ", 1)
value2 = value2.strip()
query = (field.lower().like(value1 + "%")) & \
(field2.lower().like(value2 + "%")) | \
(field3.lower().like(value2 + "%"))
else:
value = value.strip()
query = ((field.lower().like(value + "%")) | \
(field2.lower().like(value + "%")) | \
(field3.lower().like(value + "%")))
resource.add_filter(query)
else:
output = current.xml.json_message(False, 400,
"Unsupported filter! Supported filters: ~")
raise HTTP(400, body=output)
resource.add_filter(ptable.pe_id == table.pe_id)
output = S3Exporter().json(resource, start=0, limit=limit,
fields=[table.pe_id], orderby=field)
items = json.loads(output)
# Add Groups
if filter and value:
gtable = s3db.pr_group
field = gtable.name
query = field.lower().like("%" + value + "%")
resource.clear_query()
resource.add_filter(query)
resource.add_filter(gtable.pe_id == table.pe_id)
output = S3Exporter().json(resource,
start=0,
limit=limit,
fields=[table.pe_id],
orderby=field)
items += json.loads(output)
# Add Organisations
if filter and value:
otable = s3db.org_organisation
field = otable.name
query = field.lower().like("%" + value + "%")
resource.clear_query()
resource.add_filter(query)
resource.add_filter(otable.pe_id == table.pe_id)
output = S3Exporter().json(resource,
start=0,
limit=limit,
fields=[table.pe_id],
orderby=field)
items += json.loads(output)
items = [ { "id" : item[u'pe_id'],
"name" : s3db.pr_pentity_represent(item[u'pe_id'],
show_label=False) }
for item in items ]
output = json.dumps(items)
response.headers["Content-Type"] = "application/json"
return output
# =============================================================================
class S3SearchOrgHierarchyWidget(S3SearchOptionsWidget):
def widget(self, resource, vars):
field_name = self.field
# check the field type
try:
field = resource.table[field_name]
except:
field_type = "virtual"
else:
field_type = str(field.type)
return S3OrganisationHierarchyWidget()(field, {}, **self.attr)
# END =========================================================================
| vgupta6/Project-2 | modules/s3/s3search.py | Python | mit | 99,058 |
#
# The Python Imaging Library.
# $Id$
#
# Windows Cursor support for PIL
#
# notes:
# uses BmpImagePlugin.py to read the bitmap data.
#
# history:
# 96-05-27 fl Created
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.1"
import Image, BmpImagePlugin
#
# --------------------------------------------------------------------
def i16(c):
return ord(c[0]) + (ord(c[1])<<8)
def i32(c):
return ord(c[0]) + (ord(c[1])<<8) + (ord(c[2])<<16) + (ord(c[3])<<24)
def _accept(prefix):
return prefix[:4] == "\0\0\2\0"
##
# Image plugin for Windows Cursor files.
class CurImageFile(BmpImagePlugin.BmpImageFile):
format = "CUR"
format_description = "Windows Cursor"
def _open(self):
offset = self.fp.tell()
# check magic
s = self.fp.read(6)
if not _accept(s):
raise SyntaxError, "not an CUR file"
# pick the largest cursor in the file
m = ""
for i in range(i16(s[4:])):
s = self.fp.read(16)
if not m:
m = s
elif ord(s[0]) > ord(m[0]) and ord(s[1]) > ord(m[1]):
m = s
#print "width", ord(s[0])
#print "height", ord(s[1])
#print "colors", ord(s[2])
#print "reserved", ord(s[3])
#print "hotspot x", i16(s[4:])
#print "hotspot y", i16(s[6:])
#print "bytes", i32(s[8:])
#print "offset", i32(s[12:])
# load as bitmap
self._bitmap(i32(m[12:]) + offset)
# patch up the bitmap height
self.size = self.size[0], self.size[1]/2
d, e, o, a = self.tile[0]
self.tile[0] = d, (0,0)+self.size, o, a
return
#
# --------------------------------------------------------------------
Image.register_open("CUR", CurImageFile, _accept)
Image.register_extension("CUR", ".cur")
| zhaochl/python-utils | verify_code/Imaging-1.1.7/build/lib.linux-x86_64-2.7/CurImagePlugin.py | Python | apache-2.0 | 2,002 |
#!/usr/bin/python2.7
# encoding: utf-8
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test cases for end-to-end testing. Run with the server_tests script."""
import mock
import model
import photo
from google.appengine.api import images
from photo import MAX_IMAGE_DIMENSION, MAX_THUMBNAIL_DIMENSION, set_thumbnail
from server_tests_base import ServerTestsBase
class PhotoTests(ServerTestsBase):
"""Tests that verify photo upload and serving."""
def submit_create(self, **kwargs):
doc = self.go('/haiti/create?role=provide')
form = doc.cssselect_one('form')
return self.s.submit(form,
given_name='_test_given_name',
family_name='_test_family_name',
author_name='_test_author_name',
text='_test_text',
**kwargs)
def test_upload_photo(self):
"""Verifies a photo is uploaded and properly served on the server."""
# Create a new person record with a profile photo.
with open('tests/testdata/small_image.png') as photo:
original_image = images.Image(photo.read())
doc = self.submit_create(photo=photo)
# Verify the image is uploaded and displayed on the view page.
photo = doc.cssselect_one('img.photo')
photo_anchor = doc.xpath_one('//a[img[@class="photo"]]')
# Verify the image is served properly by checking the image metadata.
doc = self.s.go(photo.get('src'))
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == original_image.width
assert image.height == original_image.height
# Follow the link on the image and verify the same image is served.
doc = self.s.follow(photo_anchor)
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == original_image.width
assert image.height == original_image.height
def test_upload_photos_with_transformation(self):
"""Uploads both profile photo and note photo and verifies the images are
properly transformed and served on the server i.e., jpg is converted to
png and a large image is resized to match MAX_IMAGE_DIMENSION."""
# Create a new person record with a profile photo and a note photo.
with open('tests/testdata/small_image.png') as photo:
with open('tests/testdata/large_image.png') as note_photo:
original_image = images.Image(photo.read())
doc = self.submit_create(photo=photo, note_photo=note_photo)
# Verify the images are uploaded and displayed on the view page.
photos = doc.cssselect('img.photo')
assert len(photos) == 2
# Verify the profile image is converted to png.
doc = self.s.go(photos[0].get('src'))
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == original_image.width
assert image.height == original_image.height
# Verify the note image is resized to match MAX_IMAGE_DIMENSION.
doc = self.s.go(photos[1].get('src'))
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.width == MAX_IMAGE_DIMENSION
assert image.height == MAX_IMAGE_DIMENSION
def test_upload_empty_photo(self):
"""Uploads an empty image and verifies no img tag in the view page."""
# Create a new person record with a zero-byte profile photo.
with open('tests/testdata/empty_image.png') as photo:
doc = self.submit_create(photo=photo)
# Verify there is no img tag in the view page.
assert '_test_given_name' in doc.text
assert not doc.cssselect('img.photo')
def test_upload_broken_photo(self):
"""Uploads a broken image and verifies an error message is displayed."""
# Create a new person record with a broken profile photo.
with open('tests/testdata/broken_image.png') as photo:
doc = self.submit_create(photo=photo)
# Verify an error message is displayed.
assert not doc.cssselect('img.photo')
assert 'unrecognized format' in doc.text
def test_set_thumbnail(self):
"""Tests that a thumbnail is generated."""
with open('tests/testdata/small_image.png') as image_file:
photo = model.Photo.create('haiti', image_data=image_file.read())
photo.save()
self.go('/haiti/tasks/thumbnail_preparer')
doc = self.s.go('/haiti/photo?id=%s&thumb=true' %
photo.key().name().split(':')[1])
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.height == MAX_THUMBNAIL_DIMENSION
assert image.width == MAX_THUMBNAIL_DIMENSION
def test_skip_thumbnail_for_small_enough_images(self):
"""Tests that a thumbnail isn't generated for small enough images."""
with open('tests/testdata/tiny_image.png') as image_file:
photo = model.Photo.create('haiti', image_data=image_file.read())
photo.save()
self.go('/haiti/tasks/thumbnail_preparer')
db_photo = model.Photo.get_by_key_name(photo.key().name())
# tiny_image.png is 40x40, so it shouldn't bother generating a
# thumbnail.
assert not db_photo.thumbnail_data
doc = self.s.go('/haiti/photo?id=%s&thumb=true' %
photo.key().name().split(':')[1])
image = images.Image(doc.content_bytes)
assert image.format == images.PNG
assert image.height == 40
assert image.width == 40
def test_download_photo(self):
photo_url = 'http://www.example.com/photo.jpg'
photo_response = mock.MagicMock()
photo_response.content = open('tests/testdata/tiny_image.png').read()
handler = mock.MagicMock()
handler.repo = 'test'
handler.get_url.return_value = 'photo_url_value'
with mock.patch('requests.get') as mock_requests_get:
mock_requests_get.return_value = photo_response
res = photo.create_photo_from_url(photo_url, handler)
mock_requests_get.assert_called_once_with(photo_url)
res_image = images.Image(res[0].image_data)
assert res_image.height == 40
assert res_image.width == 40
| gimite/personfinder | tests/server_test_cases/photo_tests.py | Python | apache-2.0 | 7,029 |
import pytest
from spacy.matcher import Matcher
from spacy.errors import MatchPatternError
from spacy.schemas import validate_token_pattern
# (pattern, num errors with validation, num errors identified with minimal
# checks)
TEST_PATTERNS = [
# Bad patterns flagged in all cases
([{"XX": "foo"}], 1, 1),
([{"IS_ALPHA": {"==": True}}, {"LIKE_NUM": None}], 2, 1),
([{"IS_PUNCT": True, "OP": "$"}], 1, 1),
([{"_": "foo"}], 1, 1),
('[{"TEXT": "foo"}, {"LOWER": "bar"}]', 1, 1),
([1, 2, 3], 3, 1),
# Bad patterns flagged outside of Matcher
([{"_": {"foo": "bar", "baz": {"IN": "foo"}}}], 2, 0), # prev: (1, 0)
# Bad patterns not flagged with minimal checks
([{"LENGTH": "2", "TEXT": 2}, {"LOWER": "test"}], 2, 0),
([{"LENGTH": {"IN": [1, 2, "3"]}}, {"POS": {"IN": "VERB"}}], 4, 0), # prev: (2, 0)
([{"LENGTH": {"VALUE": 5}}], 2, 0), # prev: (1, 0)
([{"TEXT": {"VALUE": "foo"}}], 2, 0), # prev: (1, 0)
([{"IS_DIGIT": -1}], 1, 0),
([{"ORTH": -1}], 1, 0),
# Good patterns
([{"TEXT": "foo"}, {"LOWER": "bar"}], 0, 0),
([{"LEMMA": {"IN": ["love", "like"]}}, {"POS": "DET", "OP": "?"}], 0, 0),
([{"LIKE_NUM": True, "LENGTH": {">=": 5}}], 0, 0),
([{"LENGTH": 2}], 0, 0),
([{"LOWER": {"REGEX": "^X", "NOT_IN": ["XXX", "XY"]}}], 0, 0),
([{"NORM": "a"}, {"POS": {"IN": ["NOUN"]}}], 0, 0),
([{"_": {"foo": {"NOT_IN": ["bar", "baz"]}, "a": 5, "b": {">": 10}}}], 0, 0),
([{"orth": "foo"}], 0, 0), # prev: xfail
([{"IS_SENT_START": True}], 0, 0),
([{"SENT_START": True}], 0, 0),
]
@pytest.mark.parametrize(
"pattern", [[{"XX": "y"}, {"LENGTH": "2"}, {"TEXT": {"IN": 5}}]]
)
def test_matcher_pattern_validation(en_vocab, pattern):
matcher = Matcher(en_vocab, validate=True)
with pytest.raises(MatchPatternError):
matcher.add("TEST", [pattern])
@pytest.mark.parametrize("pattern,n_errors,_", TEST_PATTERNS)
def test_pattern_validation(pattern, n_errors, _):
errors = validate_token_pattern(pattern)
assert len(errors) == n_errors
@pytest.mark.parametrize("pattern,n_errors,n_min_errors", TEST_PATTERNS)
def test_minimal_pattern_validation(en_vocab, pattern, n_errors, n_min_errors):
matcher = Matcher(en_vocab)
if n_min_errors > 0:
with pytest.raises(ValueError):
matcher.add("TEST", [pattern])
elif n_errors == 0:
matcher.add("TEST", [pattern])
def test_pattern_errors(en_vocab):
matcher = Matcher(en_vocab)
# normalize "regex" to upper like "text"
matcher.add("TEST1", [[{"text": {"regex": "regex"}}]])
# error if subpattern attribute isn't recognized and processed
with pytest.raises(MatchPatternError):
matcher.add("TEST2", [[{"TEXT": {"XX": "xx"}}]])
| spacy-io/spaCy | spacy/tests/matcher/test_pattern_validation.py | Python | mit | 2,754 |
#!/usr/bin/env python
# encoding: utf-8
#
# rbootstrap - Install RPM based Linux into chroot jails
# Copyright (C) 2014 Lars Michelsen <lm@larsmichelsen.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import os
import signal
import shutil
import subprocess
from . import distro, config, rpm
from .log import *
from .utils import *
from .exceptions import *
class Jail(object):
def __init__(self, path):
self._path = path
if self._path == '/':
raise RBError('Won\'t continue. "/" as chroot target seems strange.')
def init(self):
""" Perform some initializations of the jail, for example creating
device nodes below /dev or mount the /proc filesystem. """
step('Initializing jail')
for d in [ 'dev', 'etc', 'proc', 'sys' ]:
path = os.path.join(self._path, d)
if not os.path.exists(path):
os.makedirs(path)
self.write_rb_info()
chown('/', 'root', 'root', jailed=False)
distro.execute_hooks('pre_init')
self.init_name_resolution()
self.init_hostname()
self.setup_devices()
distro.execute_hooks('post_init')
def init_hostname(self):
if config.hostname != None:
write_file('/etc/hostname', config.hostname + '\n')
else:
copy_file('/etc/hostname')
def init_name_resolution(self):
"""Copies resolv.conf from the host to the jail to make DNS lookups possible"""
copy_file('/etc/resolv.conf')
def write_rb_info(self):
write_file('/etc/rbootstrap.info',
'CODENAME="%s"\n'
'ARCH="%s"\n' % (config.codename, config.arch))
def mount(self):
self.setup_proc()
self.setup_sys()
def setup_devices(self):
step('Creating device nodes')
# Prevent problems when creating files with os.mknod(), which uses
# mknod(2) of the system which takes care about the umask of this process.
old_umask = os.umask(0)
for path, perm, major, minor, user, group in distro.device_nodes():
dest_path = os.path.join(self._path, 'dev', path)
if not os.path.exists(dest_path):
os.mknod(dest_path, perm, os.makedev(major, minor))
chown(os.path.join('/dev', path), user, group)
os.umask(old_umask)
def setup_proc(self):
verbose('Mounting /proc')
if subprocess.call('mount -t proc proc %s/proc' % self._path, shell=True) != 0:
raise RBError('Failed to mount /proc to jail')
def setup_sys(self):
verbose('Mounting /sys')
if subprocess.call('mount -t sysfs sys %s/sys' % self._path, shell=True) != 0:
raise RBError('Failed to mount /sys to jail')
def get_mounts(self):
""" Returns a list of mountpoints mounted in the jail """
mounts = []
try:
for l in file('/proc/mounts'):
if l.split()[1].startswith(self._path):
mounts.append(l.split()[1])
except IOError:
pass # Not existing file is OK!
return mounts
def unmount(self):
""" Tries to unmount all mounted filesystems within the jail. Sort the filesystem by
length of the mount point string to first unmount the deeper ones """
for mp in sorted([p.split('/') for p in self.get_mounts()], key = len, reverse = True):
path = '/'.join(mp)
if subprocess.call('umount -f %s' % path, shell=True) != 0:
raise RBError('Failed to unmount %s' % path)
def get_processes(self):
""" Returns a dict of file paths located in the jail as keys and the
PIDs of the processes which are currently using these files. """
try:
pids = list_dir('/proc')
except OSError:
return {} # It is ok to have no /proc, do not do anything about this
open_files = {}
for pid in sorted(pids):
try:
int(pid)
except ValueError:
continue # Only care about process id folders
fd_path = os.path.join('/proc', pid, 'fd')
try:
fds = list_dir(fd_path)
except OSError:
continue
for fname in fds:
try:
link = read_link(os.path.join(fd_path, fname))
except OSError:
continue
if not link.startswith(self._path):
continue # Only care about files within the jail
open_files.setdefault(link, []).append(int(pid))
return open_files
def kill_processes(self, enforce = False):
""" Tries to terminate all processes using files within this jail. """
for path, pids in self.get_processes().items():
for pid in pids:
verbose('Sending SIGTERM to %d (Has %s opened)' % (pid, path))
if not enforce:
os.kill(pid, signal.SIGTERM)
else:
os.kill(pid, signal.SIGKILL)
def erase(self):
if not os.path.exists(self._path):
return
step('Erasing existing jail')
if config.force_erase:
self.cleanup() # Only cleanup resources when configured
if self.get_processes():
raise RBError('Can not be erased. There are running processes using this jail.')
if self.get_mounts():
raise RBError('Can not be erased. There are still file systems mounted within the jail.')
for thing in os.listdir(self._path):
if config.keep_pkgs and thing == config.tmp_dir:
continue # Skip removing pkg directory when told to do so
path = os.path.join(self._path, thing)
if os.path.isfile(path) or os.path.islink(path):
os.unlink(path)
else:
shutil.rmtree(path)
def unpack_package(self, pkg_path):
""" Simply unpack the given package to the jail path. This does not try to create
adapt the installation mechanism, for example the pre/post scripts are missing. This
is just needed to create a minimalistics system to be able to perform the chroot into
and use the installer afterwards. """
try:
rpm.unpack(pkg_path, self._path)
except Exception, e:
if config.debug:
raise
raise RBError('Failed to extract "%s": %s' % (pkg_path, e))
def unpack(self, packages):
step('Unpacking packages to create initial system')
for pkg_name, pkg_loc, pkg_csum in packages:
pkg_path = os.path.join(self._path, config.tmp_dir, pkg_loc.split('/')[-1])
verbose('Unpacking %s' % pkg_name)
self.unpack_package(pkg_path)
distro.execute_hooks('post_unpack')
def install(self, packages):
step('Installing base packages')
distro.execute_hooks('pre_install')
if distro.gpgkey_path():
execute_jailed('rpm --import %s' % os.path.join('/', config.tmp_dir, 'gpg.key'))
# Now install the packages again to fix file permissions and make all pre/post
# being executed
packages = [
os.path.join(config.tmp_dir, pkg_loc.split('/')[-1])
for pkg_name, pkg_loc, pkg_csum in packages
]
if not config.check_pkg_sig:
nosig = ' --nosignature'
else:
nosig = ''
execute_jailed('rpm -ivh %s%s' % (nosig, ' '.join(packages)))
if config.include:
step('Installing additionally packages')
distro.install_packages(config.include)
distro.execute_hooks('post_install')
def cleanup(self):
""" Is executed to make the jail completely unused by the running system (remove
all mounted filesystems and all processes using this jail. Then verify it and return
either True or False depending on success """
self.unmount()
self.kill_processes()
# Now check whether or not this was successful
# FIXME: wait some time, test again, then force killing, wait some time again,
# test again and then succeed or fail
return True
| LaMi-/rbootstrap | rbootstrap/jail.py | Python | gpl-2.0 | 8,982 |
from docopt import docopt
from .utils import prepare_objective
from .algorithms.evolutionary_algorithm import Population
from .algorithms.particle_swarm_optimization import PSO
def parse_arguments(doc, version=None):
arguments = docopt(doc, version=version)
try:
algorithm = arguments["--algorithm"].lower()
except KeyError:
algorithm = "ea"
algorithm_arguments = {
"processes": int(arguments['--processes']),
# Vars for the genetic algorithm
"population": int(arguments['--population']),
"generations": int(arguments['--generations']),
"bottleneck": int(arguments['--bottleneck']),
"mutation_probability": float(arguments['--mu']),
"output_filename": arguments['--output'],
# Objective
"name": str(arguments['--objective']),
"repetitions": int(arguments['--repetitions']),
"turns": int(arguments['--turns']),
"noise": float(arguments['--noise']),
"nmoran": int(arguments['--nmoran'])
}
objective = prepare_objective(
algorithm_arguments["name"],
algorithm_arguments["turns"],
algorithm_arguments["noise"],
algorithm_arguments["repetitions"],
algorithm_arguments["nmoran"]
)
return arguments, algorithm, algorithm_arguments, objective
def invoke_training(doc, version, player_class, player_kwargs_func):
arguments, algorithm, algorithm_arguments, objective = parse_arguments(doc, version)
player_kwargs = player_kwargs_func(arguments)
print(arguments)
# Evolutionary Algorithm
if algorithm == "ea":
population = Population(
player_class,
player_kwargs,
algorithm_arguments["population"],
objective,
algorithm_arguments["output_filename"],
algorithm_arguments["bottleneck"],
algorithm_arguments["mutation_probability"],
processes=algorithm_arguments["processes"])
population.run(algorithm_arguments["generations"])
# Get the best member of the population to output.
scores = population.score_all()
record, record_holder = 0, -1
for i, s in enumerate(scores):
if s >= record:
record = s
record_holder = i
xopt, fopt = population.population[record_holder], record
# Particle Swarm Algorithm
elif algorithm == "ps":
pso = PSO(player_class,
player_kwargs,
objective=objective,
population=algorithm_arguments["population"],
generations=algorithm_arguments["generations"]
)
xopt_helper, fopt = pso.swarm()
xopt = player_class(**player_kwargs)
# xopt.read_vector(xopt_helper, num_states)
else:
print("Algorithm must be one of EA or PS.")
exit()
# Print best performer.
print("Best Score: {} {}".format(fopt, xopt))
| Axelrod-Python/axelrod-evolver | src/axelrod_dojo/arguments.py | Python | mit | 2,985 |
# -*- coding: utf-8 -*-
# 226. Invert Binary Tree
#
# Invert a binary tree.
#
# 4
# / \
# 2 7
# / \ / \
# 1 3 6 9
#
# to
#
# 4
# / \
# 7 2
# / \ / \
# 9 6 3 1
#
# Trivia:
# This problem was inspired by this original tweet by Max Howell:
#
# Google: 90% of our engineers use the software you wrote (Homebrew),
# but you can’t invert a binary tree on a whiteboard so fuck off.
#
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# http://www.jianshu.com/p/85abb0a5f83e
# 每一个节点的左右子树对换,左右子树的左右节点也需要交换,
# 这种时候很容易想到的就是递归的方法。
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
# http://www.tangjikai.com/algorithms/leetcode-226-invert-binary-tree
class Solution(object):
def invertTree(self, root):
"""
:type root: TreeNode
:rtype: TreeNode
"""
if root:
tmp = root.left
root.left = root.right
root.right = tmp
self.invertTree(root.left)
self.invertTree(root.right)
return root
def invertTree2(self, root):
if root:
root.left, root.right = root.right, root.left
self.invertTree(root.left)
self.invertTree(root.right)
return root
| gengwg/leetcode | 226_invert_binary_tree.py | Python | apache-2.0 | 1,673 |
#!/usr/bin/python
import os
import sys
import time
import numpy
from numpy.random import randn
def run_dgemv(N,l):
A = randn(N,N).astype('float64')
B = randn(N).astype('float64')
start = time.time();
for i in range(0,l):
ref = numpy.dot(A,B)
end = time.time()
timediff = (end -start)
mflops = ( 2*N*N) *l / timediff
mflops *= 1e-6
size = "%dx%d" % (N,N)
print("%14s :\t%20f MFlops\t%20f sec" % (size,mflops,timediff))
if __name__ == "__main__":
N=128
NMAX=2048
NINC=128
LOOPS=1
z=0
for arg in sys.argv:
if z == 1:
N = int(arg)
elif z == 2:
NMAX = int(arg)
elif z == 3:
NINC = int(arg)
elif z == 4:
LOOPS = int(arg)
z = z + 1
if 'OPENBLAS_LOOPS' in os.environ:
p = os.environ['OPENBLAS_LOOPS']
if p:
LOOPS = int(p);
print("From: %d To: %d Step=%d Loops=%d" % (N, NMAX, NINC, LOOPS))
print("\tSIZE\t\t\tFlops\t\t\t\t\tTime")
for i in range (N,NMAX+NINC,NINC):
run_dgemv(i,LOOPS)
| ryanrhymes/openblas | lib/OpenBLAS-0.2.19/benchmark/scripts/NUMPY/dgemv.py | Python | bsd-3-clause | 946 |
for i in range (10):
print i
| montgok/Python_Class | python_test2.py | Python | apache-2.0 | 31 |
from myfecviz.tests import FlaskClientTestCase
class FECViewTest(FlaskClientTestCase):
def test_get_home(self):
"""Verify home page loads."""
res = self.client.get('/')
self.assertEqual(200, res.status_code)
def test_summed_transactions(self):
"""Verify summed transaction view works."""
res = self.client.get('/fec/summed_transactions')
self.assertEqual(200, res.status_code)
self.assertEqual('application/json', res.mimetype)
def test_all_transaction_amounts(self):
"""Verify all transaction amount view works."""
res = self.client.get('/fec/all_transaction_amounts')
self.assertEqual(200, res.status_code)
self.assertEqual('application/json', res.mimetype)
| sg95/FEC-Data-Visualization | myfecviz/tests/test_fec_views.py | Python | mit | 765 |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow NMT model implementation."""
from __future__ import print_function
import argparse
import os
import random
import sys
# import matplotlib.image as mpimg
import numpy as np
import tensorflow as tf
from . import inference
from . import train
from .utils import evaluation_utils
from .utils import misc_utils as utils
from .utils import vocab_utils
utils.check_tensorflow_version()
FLAGS = None
INFERENCE_KEYS = ["src_max_len_infer", "tgt_max_len_infer", "subword_option",
"infer_batch_size", "beam_width",
"length_penalty_weight", "coverage_penalty_weight",
"sampling_temperature", "num_translations_per_input",
"infer_mode"]
def add_arguments(parser):
"""Build ArgumentParser."""
parser.register("type", "bool", lambda v: v.lower() == "true")
# network
parser.add_argument("--num_units", type=int, default=32, help="Network size.")
parser.add_argument("--num_layers", type=int, default=2,
help="Network depth.")
parser.add_argument("--num_encoder_layers", type=int, default=None,
help="Encoder depth, equal to num_layers if None.")
parser.add_argument("--num_decoder_layers", type=int, default=None,
help="Decoder depth, equal to num_layers if None.")
parser.add_argument("--encoder_type", type=str, default="uni", help="""\
uni | bi | gnmt.
For bi, we build num_encoder_layers/2 bi-directional layers.
For gnmt, we build 1 bi-directional layer, and (num_encoder_layers - 1)
uni-directional layers.\
""")
parser.add_argument("--residual", type="bool", nargs="?", const=True,
default=False,
help="Whether to add residual connections.")
parser.add_argument("--time_major", type="bool", nargs="?", const=True,
default=True,
help="Whether to use time-major mode for dynamic RNN.")
parser.add_argument("--num_embeddings_partitions", type=int, default=0,
help="Number of partitions for embedding vars.")
# attention mechanisms
parser.add_argument("--attention", type=str, default="", help="""\
luong | scaled_luong | bahdanau | normed_bahdanau or set to "" for no
attention\
""")
parser.add_argument(
"--attention_architecture",
type=str,
default="standard",
help="""\
standard | gnmt | gnmt_v2.
standard: use top layer to compute attention.
gnmt: GNMT style of computing attention, use previous bottom layer to
compute attention.
gnmt_v2: similar to gnmt, but use current bottom layer to compute
attention.\
""")
parser.add_argument(
"--output_attention", type="bool", nargs="?", const=True,
default=True,
help="""\
Only used in standard attention_architecture. Whether use attention as
the cell output at each timestep.
.\
""")
parser.add_argument(
"--pass_hidden_state", type="bool", nargs="?", const=True,
default=True,
help="""\
Whether to pass encoder's hidden state to decoder when using an attention
based model.\
""")
# optimizer
parser.add_argument("--optimizer", type=str, default="sgd", help="sgd | adam")
parser.add_argument("--learning_rate", type=float, default=1.0,
help="Learning rate. Adam: 0.001 | 0.0001")
parser.add_argument("--warmup_steps", type=int, default=0,
help="How many steps we inverse-decay learning.")
parser.add_argument("--warmup_scheme", type=str, default="t2t", help="""\
How to warmup learning rates. Options include:
t2t: Tensor2Tensor's way, start with lr 100 times smaller, then
exponentiate until the specified lr.\
""")
parser.add_argument(
"--decay_scheme", type=str, default="", help="""\
How we decay learning rate. Options include:
luong234: after 2/3 num train steps, we start halving the learning rate
for 4 times before finishing.
luong5: after 1/2 num train steps, we start halving the learning rate
for 5 times before finishing.\
luong10: after 1/2 num train steps, we start halving the learning rate
for 10 times before finishing.\
""")
parser.add_argument(
"--num_train_steps", type=int, default=12000, help="Num steps to train.")
parser.add_argument("--colocate_gradients_with_ops", type="bool", nargs="?",
const=True,
default=True,
help=("Whether try colocating gradients with "
"corresponding op"))
# initializer
parser.add_argument("--init_op", type=str, default="uniform",
help="uniform | glorot_normal | glorot_uniform")
parser.add_argument("--init_weight", type=float, default=0.1,
help=("for uniform init_op, initialize weights "
"between [-this, this]."))
# data
parser.add_argument("--src", type=str, default=None,
help="Source suffix, e.g., en.")
parser.add_argument("--tgt", type=str, default=None,
help="Target suffix, e.g., de.")
parser.add_argument("--train_prefix", type=str, default=None,
help="Train prefix, expect files with src/tgt suffixes.")
parser.add_argument("--dev_prefix", type=str, default=None,
help="Dev prefix, expect files with src/tgt suffixes.")
parser.add_argument("--test_prefix", type=str, default=None,
help="Test prefix, expect files with src/tgt suffixes.")
parser.add_argument("--out_dir", type=str, default=None,
help="Store log/model files.")
# Vocab
parser.add_argument("--vocab_prefix", type=str, default=None, help="""\
Vocab prefix, expect files with src/tgt suffixes.\
""")
parser.add_argument("--embed_prefix", type=str, default=None, help="""\
Pretrained embedding prefix, expect files with src/tgt suffixes.
The embedding files should be Glove formated txt files.\
""")
parser.add_argument("--sos", type=str, default="<s>",
help="Start-of-sentence symbol.")
parser.add_argument("--eos", type=str, default="</s>",
help="End-of-sentence symbol.")
parser.add_argument("--share_vocab", type="bool", nargs="?", const=True,
default=False,
help="""\
Whether to use the source vocab and embeddings for both source and
target.\
""")
parser.add_argument("--check_special_token", type="bool", default=True,
help="""\
Whether check special sos, eos, unk tokens exist in the
vocab files.\
""")
# Sequence lengths
parser.add_argument("--src_max_len", type=int, default=50,
help="Max length of src sequences during training.")
parser.add_argument("--tgt_max_len", type=int, default=50,
help="Max length of tgt sequences during training.")
parser.add_argument("--src_max_len_infer", type=int, default=None,
help="Max length of src sequences during inference.")
parser.add_argument("--tgt_max_len_infer", type=int, default=None,
help="""\
Max length of tgt sequences during inference. Also use to restrict the
maximum decoding length.\
""")
# Default settings works well (rarely need to change)
parser.add_argument("--unit_type", type=str, default="lstm",
help="lstm | gru | layer_norm_lstm | nas")
parser.add_argument("--forget_bias", type=float, default=1.0,
help="Forget bias for BasicLSTMCell.")
parser.add_argument("--dropout", type=float, default=0.2,
help="Dropout rate (not keep_prob)")
parser.add_argument("--max_gradient_norm", type=float, default=5.0,
help="Clip gradients to this norm.")
parser.add_argument("--batch_size", type=int, default=128, help="Batch size.")
parser.add_argument("--steps_per_stats", type=int, default=100,
help=("How many training steps to do per stats logging."
"Save checkpoint every 10x steps_per_stats"))
parser.add_argument("--max_train", type=int, default=0,
help="Limit on the size of training data (0: no limit).")
parser.add_argument("--num_buckets", type=int, default=5,
help="Put data into similar-length buckets.")
parser.add_argument("--num_sampled_softmax", type=int, default=0,
help=("Use sampled_softmax_loss if > 0."
"Otherwise, use full softmax loss."))
# SPM
parser.add_argument("--subword_option", type=str, default="",
choices=["", "bpe", "spm"],
help="""\
Set to bpe or spm to activate subword desegmentation.\
""")
# Experimental encoding feature.
parser.add_argument("--use_char_encode", type="bool", default=False,
help="""\
Whether to split each word or bpe into character, and then
generate the word-level representation from the character
reprentation.
""")
# Misc
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of gpus in each worker.")
parser.add_argument("--log_device_placement", type="bool", nargs="?",
const=True, default=False, help="Debug GPU allocation.")
parser.add_argument("--metrics", type=str, default="bleu",
help=("Comma-separated list of evaluations "
"metrics (bleu,rouge,accuracy)"))
parser.add_argument("--steps_per_external_eval", type=int, default=None,
help="""\
How many training steps to do per external evaluation. Automatically set
based on data if None.\
""")
parser.add_argument("--scope", type=str, default=None,
help="scope to put variables under")
parser.add_argument("--hparams_path", type=str, default=None,
help=("Path to standard hparams json file that overrides"
"hparams values from FLAGS."))
parser.add_argument("--random_seed", type=int, default=None,
help="Random seed (>0, set a specific seed).")
parser.add_argument("--override_loaded_hparams", type="bool", nargs="?",
const=True, default=False,
help="Override loaded hparams with values specified")
parser.add_argument("--num_keep_ckpts", type=int, default=5,
help="Max number of checkpoints to keep.")
parser.add_argument("--avg_ckpts", type="bool", nargs="?",
const=True, default=False, help=("""\
Average the last N checkpoints for external evaluation.
N can be controlled by setting --num_keep_ckpts.\
"""))
parser.add_argument("--language_model", type="bool", nargs="?",
const=True, default=False,
help="True to train a language model, ignoring encoder")
# Inference
parser.add_argument("--ckpt", type=str, default="",
help="Checkpoint file to load a model for inference.")
parser.add_argument("--inference_input_file", type=str, default=None,
help="Set to the text to decode.")
parser.add_argument("--inference_list", type=str, default=None,
help=("A comma-separated list of sentence indices "
"(0-based) to decode."))
parser.add_argument("--infer_batch_size", type=int, default=32,
help="Batch size for inference mode.")
parser.add_argument("--inference_output_file", type=str, default=None,
help="Output file to store decoding results.")
parser.add_argument("--inference_ref_file", type=str, default=None,
help=("""\
Reference file to compute evaluation scores (if provided).\
"""))
# Advanced inference arguments
parser.add_argument("--infer_mode", type=str, default="greedy",
choices=["greedy", "sample", "beam_search"],
help="Which type of decoder to use during inference.")
parser.add_argument("--beam_width", type=int, default=0,
help=("""\
beam width when using beam search decoder. If 0 (default), use standard
decoder with greedy helper.\
"""))
parser.add_argument("--length_penalty_weight", type=float, default=0.0,
help="Length penalty for beam search.")
parser.add_argument("--coverage_penalty_weight", type=float, default=0.0,
help="Coverage penalty for beam search.")
parser.add_argument("--sampling_temperature", type=float,
default=0.0,
help=("""\
Softmax sampling temperature for inference decoding, 0.0 means greedy
decoding. This option is ignored when using beam search.\
"""))
parser.add_argument("--num_translations_per_input", type=int, default=1,
help=("""\
Number of translations generated for each sentence. This is only used for
inference.\
"""))
# Job info
parser.add_argument("--jobid", type=int, default=0,
help="Task id of the worker.")
parser.add_argument("--num_workers", type=int, default=1,
help="Number of workers (inference only).")
parser.add_argument("--num_inter_threads", type=int, default=0,
help="number of inter_op_parallelism_threads")
parser.add_argument("--num_intra_threads", type=int, default=0,
help="number of intra_op_parallelism_threads")
def create_hparams(flags):
"""Create training hparams."""
return tf.contrib.training.HParams(
# Data
src=flags.src,
tgt=flags.tgt,
train_prefix=flags.train_prefix,
dev_prefix=flags.dev_prefix,
test_prefix=flags.test_prefix,
vocab_prefix=flags.vocab_prefix,
embed_prefix=flags.embed_prefix,
out_dir=flags.out_dir,
# Networks
num_units=flags.num_units,
num_encoder_layers=(flags.num_encoder_layers or flags.num_layers),
num_decoder_layers=(flags.num_decoder_layers or flags.num_layers),
dropout=flags.dropout,
unit_type=flags.unit_type,
encoder_type=flags.encoder_type,
residual=flags.residual,
time_major=flags.time_major,
num_embeddings_partitions=flags.num_embeddings_partitions,
# Attention mechanisms
attention=flags.attention,
attention_architecture=flags.attention_architecture,
output_attention=flags.output_attention,
pass_hidden_state=flags.pass_hidden_state,
# Train
optimizer=flags.optimizer,
num_train_steps=flags.num_train_steps,
batch_size=flags.batch_size,
init_op=flags.init_op,
init_weight=flags.init_weight,
max_gradient_norm=flags.max_gradient_norm,
learning_rate=flags.learning_rate,
warmup_steps=flags.warmup_steps,
warmup_scheme=flags.warmup_scheme,
decay_scheme=flags.decay_scheme,
colocate_gradients_with_ops=flags.colocate_gradients_with_ops,
num_sampled_softmax=flags.num_sampled_softmax,
# Data constraints
num_buckets=flags.num_buckets,
max_train=flags.max_train,
src_max_len=flags.src_max_len,
tgt_max_len=flags.tgt_max_len,
# Inference
src_max_len_infer=flags.src_max_len_infer,
tgt_max_len_infer=flags.tgt_max_len_infer,
infer_batch_size=flags.infer_batch_size,
# Advanced inference arguments
infer_mode=flags.infer_mode,
beam_width=flags.beam_width,
length_penalty_weight=flags.length_penalty_weight,
coverage_penalty_weight=flags.coverage_penalty_weight,
sampling_temperature=flags.sampling_temperature,
num_translations_per_input=flags.num_translations_per_input,
# Vocab
sos=flags.sos if flags.sos else vocab_utils.SOS,
eos=flags.eos if flags.eos else vocab_utils.EOS,
subword_option=flags.subword_option,
check_special_token=flags.check_special_token,
use_char_encode=flags.use_char_encode,
# Misc
forget_bias=flags.forget_bias,
num_gpus=flags.num_gpus,
epoch_step=0, # record where we were within an epoch.
steps_per_stats=flags.steps_per_stats,
steps_per_external_eval=flags.steps_per_external_eval,
share_vocab=flags.share_vocab,
metrics=flags.metrics.split(","),
log_device_placement=flags.log_device_placement,
random_seed=flags.random_seed,
override_loaded_hparams=flags.override_loaded_hparams,
num_keep_ckpts=flags.num_keep_ckpts,
avg_ckpts=flags.avg_ckpts,
language_model=flags.language_model,
num_intra_threads=flags.num_intra_threads,
num_inter_threads=flags.num_inter_threads,
)
def _add_argument(hparams, key, value, update=True):
"""Add an argument to hparams; if exists, change the value if update==True."""
if hasattr(hparams, key):
if update:
setattr(hparams, key, value)
else:
hparams.add_hparam(key, value)
def extend_hparams(hparams):
"""Add new arguments to hparams."""
# Sanity checks
if hparams.encoder_type == "bi" and hparams.num_encoder_layers % 2 != 0:
raise ValueError("For bi, num_encoder_layers %d should be even" %
hparams.num_encoder_layers)
if (hparams.attention_architecture in ["gnmt"] and
hparams.num_encoder_layers < 2):
raise ValueError("For gnmt attention architecture, "
"num_encoder_layers %d should be >= 2" %
hparams.num_encoder_layers)
if hparams.subword_option and hparams.subword_option not in ["spm", "bpe"]:
raise ValueError("subword option must be either spm, or bpe")
if hparams.infer_mode == "beam_search" and hparams.beam_width <= 0:
raise ValueError("beam_width must greater than 0 when using beam_search"
"decoder.")
if hparams.infer_mode == "sample" and hparams.sampling_temperature <= 0.0:
raise ValueError("sampling_temperature must greater than 0.0 when using"
"sample decoder.")
# Different number of encoder / decoder layers
assert hparams.num_encoder_layers and hparams.num_decoder_layers
if hparams.num_encoder_layers != hparams.num_decoder_layers:
hparams.pass_hidden_state = False
utils.print_out("Num encoder layer %d is different from num decoder layer"
" %d, so set pass_hidden_state to False" % (
hparams.num_encoder_layers,
hparams.num_decoder_layers))
# Set residual layers
num_encoder_residual_layers = 0
num_decoder_residual_layers = 0
if hparams.residual:
if hparams.num_encoder_layers > 1:
num_encoder_residual_layers = hparams.num_encoder_layers - 1
if hparams.num_decoder_layers > 1:
num_decoder_residual_layers = hparams.num_decoder_layers - 1
if hparams.encoder_type == "gnmt":
# The first unidirectional layer (after the bi-directional layer) in
# the GNMT encoder can't have residual connection due to the input is
# the concatenation of fw_cell and bw_cell's outputs.
num_encoder_residual_layers = hparams.num_encoder_layers - 2
# Compatible for GNMT models
if hparams.num_encoder_layers == hparams.num_decoder_layers:
num_decoder_residual_layers = num_encoder_residual_layers
_add_argument(hparams, "num_encoder_residual_layers",
num_encoder_residual_layers)
_add_argument(hparams, "num_decoder_residual_layers",
num_decoder_residual_layers)
# Language modeling
if getattr(hparams, "language_model", None):
hparams.attention = ""
hparams.attention_architecture = ""
hparams.pass_hidden_state = False
hparams.share_vocab = True
hparams.src = hparams.tgt
utils.print_out("For language modeling, we turn off attention and "
"pass_hidden_state; turn on share_vocab; set src to tgt.")
## Vocab
# Get vocab file names first
if hparams.vocab_prefix:
src_vocab_file = hparams.vocab_prefix + "." + hparams.src
tgt_vocab_file = hparams.vocab_prefix + "." + hparams.tgt
else:
raise ValueError("hparams.vocab_prefix must be provided.")
# Source vocab
check_special_token = getattr(hparams, "check_special_token", True)
src_vocab_size, src_vocab_file = vocab_utils.check_vocab(
src_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
# Target vocab
if hparams.share_vocab:
utils.print_out(" using source vocab for target")
tgt_vocab_file = src_vocab_file
tgt_vocab_size = src_vocab_size
else:
tgt_vocab_size, tgt_vocab_file = vocab_utils.check_vocab(
tgt_vocab_file,
hparams.out_dir,
check_special_token=check_special_token,
sos=hparams.sos,
eos=hparams.eos,
unk=vocab_utils.UNK)
_add_argument(hparams, "src_vocab_size", src_vocab_size)
_add_argument(hparams, "tgt_vocab_size", tgt_vocab_size)
_add_argument(hparams, "src_vocab_file", src_vocab_file)
_add_argument(hparams, "tgt_vocab_file", tgt_vocab_file)
# Num embedding partitions
num_embeddings_partitions = getattr(hparams, "num_embeddings_partitions", 0)
_add_argument(hparams, "num_enc_emb_partitions", num_embeddings_partitions)
_add_argument(hparams, "num_dec_emb_partitions", num_embeddings_partitions)
# Pretrained Embeddings
_add_argument(hparams, "src_embed_file", "")
_add_argument(hparams, "tgt_embed_file", "")
if getattr(hparams, "embed_prefix", None):
src_embed_file = hparams.embed_prefix + "." + hparams.src
tgt_embed_file = hparams.embed_prefix + "." + hparams.tgt
if tf.gfile.Exists(src_embed_file):
utils.print_out(" src_embed_file %s exist" % src_embed_file)
hparams.src_embed_file = src_embed_file
utils.print_out(
"For pretrained embeddings, set num_enc_emb_partitions to 1")
hparams.num_enc_emb_partitions = 1
else:
utils.print_out(" src_embed_file %s doesn't exist" % src_embed_file)
if tf.gfile.Exists(tgt_embed_file):
utils.print_out(" tgt_embed_file %s exist" % tgt_embed_file)
hparams.tgt_embed_file = tgt_embed_file
utils.print_out(
"For pretrained embeddings, set num_dec_emb_partitions to 1")
hparams.num_dec_emb_partitions = 1
else:
utils.print_out(" tgt_embed_file %s doesn't exist" % tgt_embed_file)
# Evaluation
for metric in hparams.metrics:
best_metric_dir = os.path.join(hparams.out_dir, "best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "best_" + metric, 0, update=False)
_add_argument(hparams, "best_" + metric + "_dir", best_metric_dir)
if getattr(hparams, "avg_ckpts", None):
best_metric_dir = os.path.join(hparams.out_dir, "avg_best_" + metric)
tf.gfile.MakeDirs(best_metric_dir)
_add_argument(hparams, "avg_best_" + metric, 0, update=False)
_add_argument(hparams, "avg_best_" + metric + "_dir", best_metric_dir)
return hparams
def ensure_compatible_hparams(hparams, default_hparams, hparams_path=""):
"""Make sure the loaded hparams is compatible with new changes."""
default_hparams = utils.maybe_parse_standard_hparams(
default_hparams, hparams_path)
# Set num encoder/decoder layers (for old checkpoints)
if hasattr(hparams, "num_layers"):
if not hasattr(hparams, "num_encoder_layers"):
hparams.add_hparam("num_encoder_layers", hparams.num_layers)
if not hasattr(hparams, "num_decoder_layers"):
hparams.add_hparam("num_decoder_layers", hparams.num_layers)
# For compatible reason, if there are new fields in default_hparams,
# we add them to the current hparams
default_config = default_hparams.values()
config = hparams.values()
for key in default_config:
if key not in config:
hparams.add_hparam(key, default_config[key])
# Update all hparams' keys if override_loaded_hparams=True
if getattr(default_hparams, "override_loaded_hparams", None):
overwritten_keys = default_config.keys()
else:
# For inference
overwritten_keys = INFERENCE_KEYS
for key in overwritten_keys:
if getattr(hparams, key) != default_config[key]:
utils.print_out("# Updating hparams.%s: %s -> %s" %
(key, str(getattr(hparams, key)),
str(default_config[key])))
setattr(hparams, key, default_config[key])
return hparams
def create_or_load_hparams(
out_dir, default_hparams, hparams_path, save_hparams=True):
"""Create hparams or load hparams from out_dir."""
hparams = utils.load_hparams(out_dir)
if not hparams:
hparams = default_hparams
hparams = utils.maybe_parse_standard_hparams(
hparams, hparams_path)
else:
hparams = ensure_compatible_hparams(hparams, default_hparams, hparams_path)
hparams = extend_hparams(hparams)
# Save HParams
if save_hparams:
utils.save_hparams(out_dir, hparams)
for metric in hparams.metrics:
utils.save_hparams(getattr(hparams, "best_" + metric + "_dir"), hparams)
# Print HParams
utils.print_hparams(hparams)
return hparams
def run_main(flags, default_hparams, train_fn, inference_fn, target_session=""):
"""Run main."""
# Job
jobid = flags.jobid
num_workers = flags.num_workers
utils.print_out("# Job id %d" % jobid)
# GPU device
utils.print_out(
"# Devices visible to TensorFlow: %s" % repr(tf.Session().list_devices()))
# Random
random_seed = flags.random_seed
if random_seed is not None and random_seed > 0:
utils.print_out("# Set random seed to %d" % random_seed)
random.seed(random_seed + jobid)
np.random.seed(random_seed + jobid)
# Model output directory
out_dir = flags.out_dir
if out_dir and not tf.gfile.Exists(out_dir):
utils.print_out("# Creating output directory %s ..." % out_dir)
tf.gfile.MakeDirs(out_dir)
# Load hparams.
loaded_hparams = False
if flags.ckpt: # Try to load hparams from the same directory as ckpt
ckpt_dir = os.path.dirname(flags.ckpt)
ckpt_hparams_file = os.path.join(ckpt_dir, "hparams")
if tf.gfile.Exists(ckpt_hparams_file) or flags.hparams_path:
hparams = create_or_load_hparams(
ckpt_dir, default_hparams, flags.hparams_path,
save_hparams=False)
loaded_hparams = True
if not loaded_hparams: # Try to load from out_dir
assert out_dir
hparams = create_or_load_hparams(
out_dir, default_hparams, flags.hparams_path,
save_hparams=(jobid == 0))
## Train / Decode
if flags.inference_input_file:
# Inference output directory
trans_file = flags.inference_output_file
assert trans_file
trans_dir = os.path.dirname(trans_file)
if not tf.gfile.Exists(trans_dir): tf.gfile.MakeDirs(trans_dir)
# Inference indices
hparams.inference_indices = None
if flags.inference_list:
(hparams.inference_indices) = (
[int(token) for token in flags.inference_list.split(",")])
# Inference
ckpt = flags.ckpt
if not ckpt:
ckpt = tf.train.latest_checkpoint(out_dir)
inference_fn(ckpt, flags.inference_input_file,
trans_file, hparams, num_workers, jobid)
# Evaluation
ref_file = flags.inference_ref_file
if ref_file and tf.gfile.Exists(trans_file):
for metric in hparams.metrics:
score = evaluation_utils.evaluate(
ref_file,
trans_file,
metric,
hparams.subword_option)
utils.print_out(" %s: %.1f" % (metric, score))
else:
# Train
train_fn(hparams, target_session=target_session)
def main(unused_argv):
default_hparams = create_hparams(FLAGS)
train_fn = train.train
inference_fn = inference.inference
run_main(FLAGS, default_hparams, train_fn, inference_fn)
if __name__ == "__main__":
nmt_parser = argparse.ArgumentParser()
add_arguments(nmt_parser)
FLAGS, unparsed = nmt_parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| tensorflow/nmt | nmt/nmt.py | Python | apache-2.0 | 29,369 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Subscription',
fields=[
('id', models.AutoField(serialize=False, verbose_name='ID', auto_created=True, primary_key=True)),
('email', models.EmailField(max_length=254, unique=True)),
],
),
]
| byteweaver/django-newsletters | newsletters/migrations/0001_initial.py | Python | bsd-3-clause | 503 |
from django.http import HttpResponse
from models import CeleryHealthCheck
def CeleryHealthCheckView(request):
return HttpResponse(CeleryHealthCheck.get_healthcheck_string())
| globocom/database-as-a-service | dbaas/system/views.py | Python | bsd-3-clause | 180 |
import sys
import unittest
import tutil
import webvulnscan
FORM_HTML = b'''<html>this is a form:
<form action="./delete" method="post">
<input type="submit" />
</form>
</html>'''
class ClickjackTest(unittest.TestCase):
attack = webvulnscan.attacks.clickjack
@tutil.webtest(False)
def test_clickjack():
return {
'/': u'''<html>
<a href="./go">Links are (supposed to be) idempotent</a>
</html>''',
'/go': u'''<html><body>Nothing here!</body></html>'''
}
@tutil.webtest(False)
def test_clickjack_get_form():
return {
'/': u'''<html>
<form>
The default method is GET, so this should be fine
<input type="submit" />
</form>
</html>'''
}
@tutil.webtest(False)
def test_clickjack_get_form_second():
return {
'/': u'''<html>
<form method="GET">
Explicitly specifying GET works too
<input type="submit" />
</form>
</html>'''
}
@tutil.webtest(True)
def test_clickjack_vulnerable_site():
return {
'/': (
200, FORM_HTML,
{'Content-Type': 'text/html; charset=utf-8'}),
'/delete': u'''<html><body>Executed!</body></html>'''
}
@tutil.webtest(True)
def test_clickjack_vulnerable_alternative_content_type():
return {
'/': (
200, FORM_HTML,
{'Content-Type': 'application/xhtml+xml; charset=utf-8'}),
'/delete': u'''<html><body>Executed!</body></html>'''
}
@tutil.webtest(False)
def test_clickjack_secured_site():
return {
'/': (
200, FORM_HTML,
{'Content-Type': 'text/html; charset=utf-8',
'X-Frame-Options': 'DENY'}),
'/delete': u'''<html><body>Executed!</body></html>'''
}
@tutil.webtest(False)
def test_clickjack_sameorigin_site():
return {
'/': (
200, FORM_HTML,
{'Content-Type': 'text/html; charset=utf-8',
'X-Frame-Options': 'SAMEORIGIN'}),
'/delete': u'''<html><body>Executed!</body></html>'''
}
@tutil.webtest(False)
def test_clickjack_allowfrom_site():
return {
'/': (
200, FORM_HTML,
{'Content-Type': 'text/html; charset=utf-8',
'X-Frame-Options': 'ALLOW-FROM http://safe.example.org/'}),
'/delete': u'''<html><body>Executed!</body></html>'''
}
@tutil.webtest(True)
def test_invalid_header():
return {
'/': (
200, FORM_HTML,
{'Content-Type': 'text/html; charset=utf-8',
'X-Frame-Options': 'None please!'}),
'/delete': u'''<html><body>Executed!</body></html>'''
}
| hhucn/webvulnscan | test/test_clickjack.py | Python | mit | 3,145 |
#!/usr/bin/env python3
import os
import io
import json
import requests
__configfile_fd = os.open("eddnlistener-config.json", os.O_RDONLY)
__configfile = os.fdopen(__configfile_fd)
__config = json.load(__configfile)
os.close(__configfile_fd)
for s in __config['schemas']:
r = requests.get(s['message_schema'])
if (r.status_code == 200):
sf = io.open("schemas/" + s['local_schema'], "w")
sf.writelines(r.text)
sf.close()
| Athanasius/eddn-listener | scripts/update-schemas.py | Python | mit | 438 |
"""
WSGI config for timberline project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "timberline.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| micolous/timberline | timberline/wsgi.py | Python | agpl-3.0 | 1,142 |
#/* author:@shivkrthakur */
# Enter your code here. Read input from STDIN. Print output to STDOUT
inputNumber = int(raw_input().strip())
list = []
while inputNumber != 0:
input = raw_input().split(' ');
if input[0] == 'insert':
list.insert(int(input[1]),int(input[2]))
elif input[0] == 'print':
print list
elif input[0] == 'remove':
list.remove(int(input[1]))
elif input[0] == 'append':
list.append(int(input[1]))
elif input[0] == 'sort':
list.sort()
elif input[0] == 'pop':
list.pop()
elif input[0] == 'reverse':
list.reverse()
inputNumber -= 1
| shivkrthakur/HackerRankSolutions | Practice/AllDomains/Languages/Python/BasicDataTypes/Lists.py | Python | mit | 644 |
# Copyright 2010 Jacob Kaplan-Moss
# Copyright 2011 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception definitions.
.. py:exception:: AuthorizationFailure
.. py:exception:: ClientException
.. py:exception:: HttpError
.. py:exception:: ValidationError
.. py:exception:: Unauthorized
"""
from keystoneclient.i18n import _
from keystoneclient.openstack.common.apiclient.exceptions import * # noqa
# NOTE(akurilin): This alias should be left here to support backwards
# compatibility until we are sure that usage of these exceptions in
# projects is correct.
ConnectionError = ConnectionRefused
HTTPNotImplemented = HttpNotImplemented
Timeout = RequestTimeout
HTTPError = HttpError
class CertificateConfigError(Exception):
"""Error reading the certificate."""
def __init__(self, output):
self.output = output
msg = _('Unable to load certificate.')
super(CertificateConfigError, self).__init__(msg)
class CMSError(Exception):
"""Error reading the certificate."""
def __init__(self, output):
self.output = output
msg = _('Unable to sign or verify data.')
super(CMSError, self).__init__(msg)
class EmptyCatalog(EndpointNotFound):
"""The service catalog is empty."""
pass
class SSLError(ConnectionRefused):
"""An SSL error occurred."""
class DiscoveryFailure(ClientException):
"""Discovery of client versions failed."""
class VersionNotAvailable(DiscoveryFailure):
"""Discovery failed as the version you requested is not available."""
class MethodNotImplemented(ClientException):
"""Method not implemented by the keystoneclient API."""
class MissingAuthPlugin(ClientException):
"""An authenticated request is required but no plugin available."""
class NoMatchingPlugin(ClientException):
"""There were no auth plugins that could be created from the parameters
provided.
:param str name: The name of the plugin that was attempted to load.
.. py:attribute:: name
The name of the plugin that was attempted to load.
"""
def __init__(self, name):
self.name = name
msg = _('The plugin %s could not be found') % name
super(NoMatchingPlugin, self).__init__(msg)
class InvalidResponse(ClientException):
"""The response from the server is not valid for this request."""
def __init__(self, response):
super(InvalidResponse, self).__init__()
self.response = response
| darren-wang/ksc | keystoneclient/exceptions.py | Python | apache-2.0 | 3,002 |
# pylint: disable=invalid-name
import ast
from warning import Warnings
from backend import visit_expression, assign, unify_types, ExtendedContext, \
Scope, static_evaluate, UnknownValue, NoneType, Bool, List, Instance, \
Class, Unknown, maybe_inferences, Symbol, type_subset, Context, \
construct_function_type, FunctionSignature, ClassEvaluator, Union, Set, \
Dict, Str
class ScopeVisitor(ast.NodeVisitor):
def __init__(self, filepath='', context=None, imported=[], warnings=None):
ast.NodeVisitor.__init__(self)
self._filepath = filepath
self._warnings = Warnings(filepath) if warnings is None else warnings
self._context = context if context is not None else Context()
self._imported = imported
self._annotations = []
self._class_instance = None
def clone(self):
return ScopeVisitor(self._filepath, self.context(), self._imported)
def scope(self):
return self._context.get_top_scope()
def context(self):
return ExtendedContext(self._context)
def warnings(self):
return self._warnings
def annotations(self):
return self._annotations
def report(self):
return self.scope(), self.warnings(), self.annotations()
def begin_scope(self, scope=None):
self._context.begin_scope(scope)
def end_scope(self):
return self._context.end_scope()
def merge_scope(self, scope):
self._context.merge_scope(scope)
def warn(self, category, node, details=None):
self._warnings.warn(node, category, details)
def evaluate(self, node):
return static_evaluate(node, self.context())
def check_type(self, node, expected_type=Unknown()):
computed_type = visit_expression(node, expected_type, self.context(),
self._warnings)
if (not type_subset(computed_type, expected_type)
and not isinstance(computed_type, Unknown)):
details = '{0} vs {1}'.format(computed_type, expected_type)
self.warn('type-error', node, details)
return computed_type
def check_assign(self, node, target, value, generator=False):
assignments = assign(target, value, self._context,
self._warnings, generator=generator)
for name, old_symbol, new_symbol in assignments:
if old_symbol is not None:
self.warn('reassignment', node, name)
if new_symbol.get_type() != old_symbol.get_type():
details = '{0}: {1} -> {2}'.format(
name, old_symbol.get_type(), new_symbol.get_type())
self.warn('type-change', node, details)
def visit_ClassDef(self, node):
# ignore warnings on the first pass because we don't have an
# instance to pass in as "self"
visitor = ScopeVisitor(self._filepath, self.context())
visitor.begin_scope()
visitor.generic_visit(node)
scope = visitor.end_scope()
if '__init__' in scope:
signature = scope.get_type('__init__').signature
else:
signature = FunctionSignature('__init__') # TODO: add self arg?
return_type = Instance(node.name, Scope()) # dummy instance
# TODO: separate class/static methods and attributes from the rest
class_type = Class(node.name, signature, return_type, None, scope)
class_type.evaluator = ClassEvaluator(class_type)
self._context.add(Symbol(node.name, class_type))
# now visit the class contents to generate warnings
argument_scope = signature.generic_scope()
self._class_instance = class_type.evaluator.evaluate(argument_scope)[0]
self.begin_scope()
self.generic_visit(node) # now all functiondefs have access
self.end_scope() # to class instance to load "self"
self._class_instance = None
def visit_FunctionDef(self, node):
visitor = ScopeVisitor(self._filepath, self.context(),
warnings=self._warnings)
function_type = construct_function_type(node, visitor,
self._class_instance)
self._context.add(Symbol(node.name, function_type))
# now check that all the types are consistent between
# the default types, annotated types, and constrained types
signature = function_type.signature
types = zip(signature.names, signature.types,
signature.annotated_types, signature.default_types)
for name, _, annotated_type, default_type in types:
if (annotated_type != Unknown() and default_type != Unknown() and
default_type != annotated_type):
self.warn('default-argument-type-error', node, name)
def _check_return(self, return_type, static_value=None):
previous_type = self._context.get_type()
new_type = (unify_types([previous_type, return_type])
if previous_type is not None else return_type)
value = (static_value or UnknownValue() if previous_type is None else
UnknownValue())
self._context.set_return(Symbol('return', new_type, value))
def check_return(self, node, is_yield=False):
if node.value is None:
value_type = NoneType()
else:
value_type = self.check_type(node.value, Unknown())
return_type = List(value_type) if is_yield else value_type
static_value = self.evaluate(node.value)
self._check_return(return_type, static_value)
def visit_Return(self, node):
self.check_return(node)
self.generic_visit(node)
def visit_Yield(self, node): # not sure why python makes yield an expr
self.check_return(node, is_yield=True)
self.generic_visit(node)
def visit_Assign(self, node):
for target in node.targets:
self.check_assign(node, target, node.value)
self.generic_visit(node)
def visit_AugAssign(self, node):
self.check_assign(node, node.target, node.value)
self.generic_visit(node)
def visit_Delete(self, node):
# TODO: need to support identifiers, dict items, attributes, list items
#names = [target.id for target in node.targets]
self.warn('delete', node)
self.generic_visit(node)
def _visit_branch(self, body, inferences):
# Note: need two scope layers, first for inferences and
# second for symbols that are assigned within the branch
if body is None:
return Scope()
self.begin_scope(Scope(inferences))
self.begin_scope()
for stmt in body:
self.visit(stmt)
scope = self.end_scope()
self.end_scope()
return_type = scope.get_type()
if return_type is not None:
self._check_return(return_type, scope.get_value())
return scope
def visit_If(self, node):
self.check_type(node.test, Bool())
test_value = static_evaluate(node.test, self.context())
if not isinstance(test_value, UnknownValue):
self.warn('constant-if-condition', node)
ext_ctx = self.context()
if_inferences, else_inferences = maybe_inferences(node.test, ext_ctx)
# don't visit unreachable code
if test_value is True:
self._visit_branch(node.body, if_inferences)
return
if test_value is False:
self._visit_branch(node.orelse, else_inferences)
return
if_scope = self._visit_branch(node.body, if_inferences)
else_scope = self._visit_branch(node.orelse, else_inferences)
diffs = set(if_scope.names()) ^ set(else_scope.names())
for diff in diffs:
if diff not in self._context:
self.warn('conditionally-assigned', node, diff)
common = set(if_scope.names()) & set(else_scope.names())
for name in common:
types = [if_scope.get_type(name), else_scope.get_type(name)]
unified_type = unify_types(types)
self._context.add(Symbol(name, unified_type))
if isinstance(unified_type, Unknown):
if not any(isinstance(x, Unknown) for x in types):
self.warn('conditional-type', node, name)
def visit_While(self, node):
self.check_type(node.test, Bool())
self.generic_visit(node)
def visit_For(self, node):
# Python doesn't create a scope for "for", but we will
# treat it as if it did because it should
union_type = Union(List(Unknown()), Set(Unknown()),
Dict(Unknown(), Unknown()), Str())
self.check_type(node.iter, union_type)
self.begin_scope()
self.check_assign(node, node.target, node.iter, generator=True)
self.generic_visit(node)
self.end_scope()
def visit_With(self, node):
self.begin_scope()
if node.optional_vars:
self.check_assign(node, node.optional_vars, node.context_expr)
self.generic_visit(node)
self.end_scope()
def visit_Expr(self, node):
self.check_type(node.value, Unknown())
def visit_Import(self, node):
self.warn('non-global-import', node)
def visit_ImportFrom(self, node):
self.warn('non-global-import', node)
| clark800/pystarch | visitor.py | Python | mit | 9,484 |
from datetime import datetime
from urllib.request import urlopen, Request
from django.db import transaction
from bs4 import BeautifulSoup
from mutual_funds.finance.models import NAV
class FundParser:
"""
Class for updating history data with bs4 and urllib
"""
def __init__(self, fund):
self.fund = fund
self.isin_ticker = self.fund.isin_ticker
self.price_history = list()
def __download_url(self, url):
headers = {'X-Requested-With': 'XMLHttpRequest'}
with urlopen(Request(url, headers=headers)) as webContent:
webContent = webContent.read()
self.soup = BeautifulSoup(webContent, "html5lib")
def __routes(self, goto):
if goto == "bloomberg":
url = "https://www.bloomberg.com/quote/{}".format(self.bloombreg_ticker)
self.__download_url(url)
elif goto == "markets":
url = "http://markets.ft.com/data/funds/tearsheet/summary?s={}".format(self.isin_ticker)
self.__download_url(url)
elif goto == "history":
url = "http://markets.ft.com/data/funds/tearsheet/historical?s={}".format(self.isin_ticker)
self.__download_url(url)
def update(self):
self.__routes("history")
unique_history_dates = set()
# parsing main page
for i in range(1, 100):
try:
str_date = self.soup("table")[0]('tr')[i].td.span.text
str_float = self.soup("table")[0]('tr')[i]('td')[4].text
except IndexError:
break
real_date = datetime.strptime(str_date, "%A, %B %d, %Y")
real_float = float(str_float)
if real_date.date() not in unique_history_dates:
self.price_history.append((real_date, real_float))
unique_history_dates.add(real_date.date())
# adding new NAVS to database
last_nav = self.fund.navs.latest()
new_navs = []
for item in self.price_history:
if last_nav.date < item[0].date():
new_navs.append(item)
else:
break
with transaction.atomic():
# saving history data
for price_item in new_navs:
NAV(
fund=self.fund,
date=price_item[0],
price=round(float(price_item[1]), 2),
).save()
| ArtemBernatskyy/FundExpert.NET | mutual_funds/finance/parsers/funds_updater.py | Python | gpl-3.0 | 2,438 |
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""googledatastore client."""
import os
import threading
from . import helper
from . import connection
from .connection import *
# Import the Datastore protos. These are listed separately to avoid importing
# the Datastore service, which conflicts with our Datastore class.
from google.datastore.v1beta3.datastore_pb2 import (
LookupRequest,
LookupResponse,
RunQueryRequest,
RunQueryResponse,
BeginTransactionRequest,
BeginTransactionResponse,
CommitRequest,
CommitResponse,
RollbackRequest,
RollbackResponse,
AllocateIdsRequest,
AllocateIdsResponse,
Mutation,
MutationResult,
ReadOptions)
from google.datastore.v1beta3.entity_pb2 import *
from google.datastore.v1beta3.query_pb2 import *
from google.protobuf.timestamp_pb2 import Timestamp
from google.protobuf.struct_pb2 import NULL_VALUE
from google.rpc.status_pb2 import Status
from google.rpc import code_pb2
from google.type.latlng_pb2 import LatLng
__version__ = '5.0.0-beta'
VERSION = (5, 0, 0, 'beta')
_conn_holder = {} # thread id -> thread-local connection.
_options = {} # Global options.
# Guards all access to _options and writes to _conn_holder.
_rlock = threading.RLock()
def set_options(**kwargs):
"""Set datastore connection options.
Args:
project_id: the Cloud project to connect to. Exactly one of
project_endpoint and project_id must be set.
credentials: oauth2client.Credentials to authorize the
connection.
project_endpoint: the endpoint of the datastore API, default to Google
APIs production server.
"""
with(_rlock):
_options.update(kwargs)
_conn_holder.clear()
def get_default_connection():
"""Returns the default datastore connection.
Defaults endpoint to helper.get_project_endpoint_from_env() and
credentials to helper.get_credentials_from_env().
Use set_options to override defaults.
"""
tid = id(threading.current_thread())
conn = _conn_holder.get(tid)
if not conn:
with(_rlock):
# No other thread would insert a value in our slot, so no need
# to recheck existence inside the lock.
if 'project_endpoint' not in _options and 'project_id' not in _options:
_options['project_endpoint'] = helper.get_project_endpoint_from_env()
if 'credentials' not in _options:
_options['credentials'] = helper.get_credentials_from_env()
# We still need the lock when caching the thread local connection so we
# don't race with _conn_holder.clear() in set_options().
_conn_holder[tid] = conn = connection.Datastore(**_options)
return conn
def lookup(request):
"""See connection.Datastore.lookup."""
return get_default_connection().lookup(request)
def run_query(request):
"""See connection.Datastore.run_query."""
return get_default_connection().run_query(request)
def begin_transaction(request):
"""See connection.Datastore.begin_transaction."""
return get_default_connection().begin_transaction(request)
def commit(request):
"""See connection.Datastore.commit."""
return get_default_connection().commit(request)
def rollback(request):
"""See connection.Datastore.rollback."""
return get_default_connection().rollback(request)
def allocate_ids(request):
"""See connection.Datastore.allocate_ids."""
return get_default_connection().allocate_ids(request)
| pcostell/google-cloud-datastore | python/googledatastore/__init__.py | Python | apache-2.0 | 3,958 |
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Michael DeHaan <michael.dehaan@gmail.com>, 2012-2013
# Copyright (c), Toshio Kuratomi <tkuratomi@ansible.com> 2016
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
SIZE_RANGES = {
'Y': 1 << 80,
'Z': 1 << 70,
'E': 1 << 60,
'P': 1 << 50,
'T': 1 << 40,
'G': 1 << 30,
'M': 1 << 20,
'K': 1 << 10,
'B': 1,
}
FILE_ATTRIBUTES = {
'A': 'noatime',
'a': 'append',
'c': 'compressed',
'C': 'nocow',
'd': 'nodump',
'D': 'dirsync',
'e': 'extents',
'E': 'encrypted',
'h': 'blocksize',
'i': 'immutable',
'I': 'indexed',
'j': 'journalled',
'N': 'inline',
's': 'zero',
'S': 'synchronous',
't': 'notail',
'T': 'blockroot',
'u': 'undelete',
'X': 'compressedraw',
'Z': 'compresseddirty',
}
# ansible modules can be written in any language. To simplify
# development of Python modules, the functions available here can
# be used to do many common tasks
import locale
import os
import re
import shlex
import subprocess
import sys
import types
import time
import select
import shutil
import stat
import tempfile
import traceback
import grp
import pwd
import platform
import errno
import datetime
from collections import deque
from collections import Mapping, MutableMapping, Sequence, MutableSequence, Set, MutableSet
from itertools import repeat, chain
try:
import syslog
HAS_SYSLOG = True
except ImportError:
HAS_SYSLOG = False
try:
from systemd import journal
has_journal = True
except ImportError:
has_journal = False
HAVE_SELINUX = False
try:
import selinux
HAVE_SELINUX = True
except ImportError:
pass
# Python2 & 3 way to get NoneType
NoneType = type(None)
# Note: When getting Sequence from collections, it matches with strings. If
# this matters, make sure to check for strings before checking for sequencetype
try:
from collections.abc import KeysView
SEQUENCETYPE = (Sequence, frozenset, KeysView)
except:
SEQUENCETYPE = (Sequence, frozenset)
try:
import json
# Detect the python-json library which is incompatible
# Look for simplejson if that's the case
try:
if not isinstance(json.loads, types.FunctionType) or not isinstance(json.dumps, types.FunctionType):
raise ImportError
except AttributeError:
raise ImportError
except ImportError:
try:
import simplejson as json
except ImportError:
print('\n{"msg": "Error: ansible requires the stdlib json or simplejson module, neither was found!", "failed": true}')
sys.exit(1)
except SyntaxError:
print('\n{"msg": "SyntaxError: probably due to installed simplejson being for a different python version", "failed": true}')
sys.exit(1)
AVAILABLE_HASH_ALGORITHMS = dict()
try:
import hashlib
# python 2.7.9+ and 2.7.0+
for attribute in ('available_algorithms', 'algorithms'):
algorithms = getattr(hashlib, attribute, None)
if algorithms:
break
if algorithms is None:
# python 2.5+
algorithms = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
for algorithm in algorithms:
AVAILABLE_HASH_ALGORITHMS[algorithm] = getattr(hashlib, algorithm)
except ImportError:
import sha
AVAILABLE_HASH_ALGORITHMS = {'sha1': sha.sha}
try:
import md5
AVAILABLE_HASH_ALGORITHMS['md5'] = md5.md5
except ImportError:
pass
from ansible.module_utils.pycompat24 import get_exception, literal_eval
from ansible.module_utils.six import (
PY2,
PY3,
b,
binary_type,
integer_types,
iteritems,
string_types,
text_type,
)
from ansible.module_utils.six.moves import map, reduce, shlex_quote
from ansible.module_utils._text import to_native, to_bytes, to_text
from ansible.module_utils.parsing.convert_bool import BOOLEANS, BOOLEANS_FALSE, BOOLEANS_TRUE, boolean
PASSWORD_MATCH = re.compile(r'^(?:.+[-_\s])?pass(?:[-_\s]?(?:word|phrase|wrd|wd)?)(?:[-_\s].+)?$', re.I)
_NUMBERTYPES = tuple(list(integer_types) + [float])
# Deprecated compat. Only kept in case another module used these names Using
# ansible.module_utils.six is preferred
NUMBERTYPES = _NUMBERTYPES
imap = map
try:
# Python 2
unicode
except NameError:
# Python 3
unicode = text_type
try:
# Python 2.6+
bytes
except NameError:
# Python 2.4
bytes = binary_type
try:
# Python 2
basestring
except NameError:
# Python 3
basestring = string_types
_literal_eval = literal_eval
# End of deprecated names
# Internal global holding passed in params. This is consulted in case
# multiple AnsibleModules are created. Otherwise each AnsibleModule would
# attempt to read from stdin. Other code should not use this directly as it
# is an internal implementation detail
_ANSIBLE_ARGS = None
FILE_COMMON_ARGUMENTS = dict(
src=dict(),
mode=dict(type='raw'),
owner=dict(),
group=dict(),
seuser=dict(),
serole=dict(),
selevel=dict(),
setype=dict(),
follow=dict(type='bool', default=False),
# not taken by the file module, but other modules call file so it must ignore them.
content=dict(no_log=True),
backup=dict(),
force=dict(),
remote_src=dict(), # used by assemble
regexp=dict(), # used by assemble
delimiter=dict(), # used by assemble
directory_mode=dict(), # used by copy
unsafe_writes=dict(type='bool'), # should be available to any module using atomic_move
attributes=dict(aliases=['attr']),
)
PASSWD_ARG_RE = re.compile(r'^[-]{0,2}pass[-]?(word|wd)?')
# Used for parsing symbolic file perms
MODE_OPERATOR_RE = re.compile(r'[+=-]')
USERS_RE = re.compile(r'[^ugo]')
PERMS_RE = re.compile(r'[^rwxXstugo]')
PERM_BITS = 0o7777 # file mode permission bits
EXEC_PERM_BITS = 0o0111 # execute permission bits
DEFAULT_PERM = 0o0666 # default file permission bits
def get_platform():
''' what's the platform? example: Linux is a platform. '''
return platform.system()
def get_distribution():
''' return the distribution name '''
if platform.system() == 'Linux':
try:
supported_dists = platform._supported_dists + ('arch', 'alpine', 'devuan')
distribution = platform.linux_distribution(supported_dists=supported_dists)[0].capitalize()
if not distribution and os.path.isfile('/etc/system-release'):
distribution = platform.linux_distribution(supported_dists=['system'])[0].capitalize()
if 'Amazon' in distribution:
distribution = 'Amazon'
else:
distribution = 'OtherLinux'
except:
# FIXME: MethodMissing, I assume?
distribution = platform.dist()[0].capitalize()
else:
distribution = None
return distribution
def get_distribution_version():
''' return the distribution version '''
if platform.system() == 'Linux':
try:
distribution_version = platform.linux_distribution()[1]
if not distribution_version and os.path.isfile('/etc/system-release'):
distribution_version = platform.linux_distribution(supported_dists=['system'])[1]
except:
# FIXME: MethodMissing, I assume?
distribution_version = platform.dist()[1]
else:
distribution_version = None
return distribution_version
def get_all_subclasses(cls):
'''
used by modules like Hardware or Network fact classes to retrieve all subclasses of a given class.
__subclasses__ return only direct sub classes. This one go down into the class tree.
'''
# Retrieve direct subclasses
subclasses = cls.__subclasses__()
to_visit = list(subclasses)
# Then visit all subclasses
while to_visit:
for sc in to_visit:
# The current class is now visited, so remove it from list
to_visit.remove(sc)
# Appending all subclasses to visit and keep a reference of available class
for ssc in sc.__subclasses__():
subclasses.append(ssc)
to_visit.append(ssc)
return subclasses
def load_platform_subclass(cls, *args, **kwargs):
'''
used by modules like User to have different implementations based on detected platform. See User
module for an example.
'''
this_platform = get_platform()
distribution = get_distribution()
subclass = None
# get the most specific superclass for this platform
if distribution is not None:
for sc in get_all_subclasses(cls):
if sc.distribution is not None and sc.distribution == distribution and sc.platform == this_platform:
subclass = sc
if subclass is None:
for sc in get_all_subclasses(cls):
if sc.platform == this_platform and sc.distribution is None:
subclass = sc
if subclass is None:
subclass = cls
return super(cls, subclass).__new__(subclass)
def json_dict_unicode_to_bytes(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, text_type):
return to_bytes(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_unicode_to_bytes, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_unicode_to_bytes, d, repeat(encoding), repeat(errors)))
else:
return d
def json_dict_bytes_to_unicode(d, encoding='utf-8', errors='surrogate_or_strict'):
''' Recursively convert dict keys and values to byte str
Specialized for json return because this only handles, lists, tuples,
and dict container types (the containers that the json module returns)
'''
if isinstance(d, binary_type):
# Warning, can traceback
return to_text(d, encoding=encoding, errors=errors)
elif isinstance(d, dict):
return dict(map(json_dict_bytes_to_unicode, iteritems(d), repeat(encoding), repeat(errors)))
elif isinstance(d, list):
return list(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
elif isinstance(d, tuple):
return tuple(map(json_dict_bytes_to_unicode, d, repeat(encoding), repeat(errors)))
else:
return d
def return_values(obj):
""" Return native stringified values from datastructures.
For use with removing sensitive values pre-jsonification."""
if isinstance(obj, (text_type, binary_type)):
if obj:
yield to_native(obj, errors='surrogate_or_strict')
return
elif isinstance(obj, SEQUENCETYPE):
for element in obj:
for subelement in return_values(element):
yield subelement
elif isinstance(obj, Mapping):
for element in obj.items():
for subelement in return_values(element[1]):
yield subelement
elif isinstance(obj, (bool, NoneType)):
# This must come before int because bools are also ints
return
elif isinstance(obj, NUMBERTYPES):
yield to_native(obj, nonstring='simplerepr')
else:
raise TypeError('Unknown parameter type: %s, %s' % (type(obj), obj))
def _remove_values_conditions(value, no_log_strings, deferred_removals):
"""
Helper function for :meth:`remove_values`.
:arg value: The value to check for strings that need to be stripped
:arg no_log_strings: set of strings which must be stripped out of any values
:arg deferred_removals: List which holds information about nested
containers that have to be iterated for removals. It is passed into
this function so that more entries can be added to it if value is
a container type. The format of each entry is a 2-tuple where the first
element is the ``value`` parameter and the second value is a new
container to copy the elements of ``value`` into once iterated.
:returns: if ``value`` is a scalar, returns ``value`` with two exceptions:
1. :class:`~datetime.datetime` objects which are changed into a string representation.
2. objects which are in no_log_strings are replaced with a placeholder
so that no sensitive data is leaked.
If ``value`` is a container type, returns a new empty container.
``deferred_removals`` is added to as a side-effect of this function.
.. warning:: It is up to the caller to make sure the order in which value
is passed in is correct. For instance, higher level containers need
to be passed in before lower level containers. For example, given
``{'level1': {'level2': 'level3': [True]} }`` first pass in the
dictionary for ``level1``, then the dict for ``level2``, and finally
the list for ``level3``.
"""
if isinstance(value, (text_type, binary_type)):
# Need native str type
native_str_value = value
if isinstance(value, text_type):
value_is_text = True
if PY2:
native_str_value = to_bytes(value, errors='surrogate_or_strict')
elif isinstance(value, binary_type):
value_is_text = False
if PY3:
native_str_value = to_text(value, errors='surrogate_or_strict')
if native_str_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
native_str_value = native_str_value.replace(omit_me, '*' * 8)
if value_is_text and isinstance(native_str_value, binary_type):
value = to_text(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
elif not value_is_text and isinstance(native_str_value, text_type):
value = to_bytes(native_str_value, encoding='utf-8', errors='surrogate_then_replace')
else:
value = native_str_value
elif isinstance(value, Sequence):
if isinstance(value, MutableSequence):
new_value = type(value)()
else:
new_value = [] # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Set):
if isinstance(value, MutableSet):
new_value = type(value)()
else:
new_value = set() # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, Mapping):
if isinstance(value, MutableMapping):
new_value = type(value)()
else:
new_value = {} # Need a mutable value
deferred_removals.append((value, new_value))
value = new_value
elif isinstance(value, tuple(chain(NUMBERTYPES, (bool, NoneType)))):
stringy_value = to_native(value, encoding='utf-8', errors='surrogate_or_strict')
if stringy_value in no_log_strings:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
for omit_me in no_log_strings:
if omit_me in stringy_value:
return 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
elif isinstance(value, datetime.datetime):
value = value.isoformat()
else:
raise TypeError('Value of unknown type: %s, %s' % (type(value), value))
return value
def remove_values(value, no_log_strings):
""" Remove strings in no_log_strings from value. If value is a container
type, then remove a lot more"""
deferred_removals = deque()
no_log_strings = [to_native(s, errors='surrogate_or_strict') for s in no_log_strings]
new_value = _remove_values_conditions(value, no_log_strings, deferred_removals)
while deferred_removals:
old_data, new_data = deferred_removals.popleft()
if isinstance(new_data, Mapping):
for old_key, old_elem in old_data.items():
new_elem = _remove_values_conditions(old_elem, no_log_strings, deferred_removals)
new_data[old_key] = new_elem
else:
for elem in old_data:
new_elem = _remove_values_conditions(elem, no_log_strings, deferred_removals)
if isinstance(new_data, MutableSequence):
new_data.append(new_elem)
elif isinstance(new_data, MutableSet):
new_data.add(new_elem)
else:
raise TypeError('Unknown container type encountered when removing private values from output')
return new_value
def heuristic_log_sanitize(data, no_log_values=None):
''' Remove strings that look like passwords from log messages '''
# Currently filters:
# user:pass@foo/whatever and http://username:pass@wherever/foo
# This code has false positives and consumes parts of logs that are
# not passwds
# begin: start of a passwd containing string
# end: end of a passwd containing string
# sep: char between user and passwd
# prev_begin: where in the overall string to start a search for
# a passwd
# sep_search_end: where in the string to end a search for the sep
data = to_native(data)
output = []
begin = len(data)
prev_begin = begin
sep = 1
while sep:
# Find the potential end of a passwd
try:
end = data.rindex('@', 0, begin)
except ValueError:
# No passwd in the rest of the data
output.insert(0, data[0:begin])
break
# Search for the beginning of a passwd
sep = None
sep_search_end = end
while not sep:
# URL-style username+password
try:
begin = data.rindex('://', 0, sep_search_end)
except ValueError:
# No url style in the data, check for ssh style in the
# rest of the string
begin = 0
# Search for separator
try:
sep = data.index(':', begin + 3, end)
except ValueError:
# No separator; choices:
if begin == 0:
# Searched the whole string so there's no password
# here. Return the remaining data
output.insert(0, data[0:begin])
break
# Search for a different beginning of the password field.
sep_search_end = begin
continue
if sep:
# Password was found; remove it.
output.insert(0, data[end:prev_begin])
output.insert(0, '********')
output.insert(0, data[begin:sep + 1])
prev_begin = begin
output = ''.join(output)
if no_log_values:
output = remove_values(output, no_log_values)
return output
def bytes_to_human(size, isbits=False, unit=None):
base = 'Bytes'
if isbits:
base = 'bits'
suffix = ''
for suffix, limit in sorted(iteritems(SIZE_RANGES), key=lambda item: -item[1]):
if (unit is None and size >= limit) or unit is not None and unit.upper() == suffix[0]:
break
if limit != 1:
suffix += base[0]
else:
suffix = base
return '%.2f %s' % (float(size) / limit, suffix)
def human_to_bytes(number, default_unit=None, isbits=False):
'''
Convert number in string format into bytes (ex: '2K' => 2048) or using unit argument
ex:
human_to_bytes('10M') <=> human_to_bytes(10, 'M')
'''
m = re.search('^\s*(\d*\.?\d*)\s*([A-Za-z]+)?', str(number), flags=re.IGNORECASE)
if m is None:
raise ValueError("human_to_bytes() can't interpret following string: %s" % str(number))
try:
num = float(m.group(1))
except:
raise ValueError("human_to_bytes() can't interpret following number: %s (original input string: %s)" % (m.group(1), number))
unit = m.group(2)
if unit is None:
unit = default_unit
if unit is None:
''' No unit given, returning raw number '''
return int(round(num))
range_key = unit[0].upper()
try:
limit = SIZE_RANGES[range_key]
except:
raise ValueError("human_to_bytes() failed to convert %s (unit = %s). The suffix must be one of %s" % (number, unit, ", ".join(SIZE_RANGES.keys())))
# default value
unit_class = 'B'
unit_class_name = 'byte'
# handling bits case
if isbits:
unit_class = 'b'
unit_class_name = 'bit'
# check unit value if more than one character (KB, MB)
if len(unit) > 1:
expect_message = 'expect %s%s or %s' % (range_key, unit_class, range_key)
if range_key == 'B':
expect_message = 'expect %s or %s' % (unit_class, unit_class_name)
if unit_class_name in unit.lower():
pass
elif unit[1] != unit_class:
raise ValueError("human_to_bytes() failed to convert %s. Value is not a valid string (%s)" % (number, expect_message))
return int(round(num * limit))
def is_executable(path):
'''is the given path executable?
Limitations:
* Does not account for FSACLs.
* Most times we really want to know "Can the current user execute this
file" This function does not tell us that, only if an execute bit is set.
'''
# These are all bitfields so first bitwise-or all the permissions we're
# looking for, then bitwise-and with the file's mode to determine if any
# execute bits are set.
return ((stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) & os.stat(path)[stat.ST_MODE])
def _load_params():
''' read the modules parameters and store them globally.
This function may be needed for certain very dynamic custom modules which
want to process the parameters that are being handed the module. Since
this is so closely tied to the implementation of modules we cannot
guarantee API stability for it (it may change between versions) however we
will try not to break it gratuitously. It is certainly more future-proof
to call this function and consume its outputs than to implement the logic
inside it as a copy in your own code.
'''
global _ANSIBLE_ARGS
if _ANSIBLE_ARGS is not None:
buffer = _ANSIBLE_ARGS
else:
# debug overrides to read args from file or cmdline
# Avoid tracebacks when locale is non-utf8
# We control the args and we pass them as utf8
if len(sys.argv) > 1:
if os.path.isfile(sys.argv[1]):
fd = open(sys.argv[1], 'rb')
buffer = fd.read()
fd.close()
else:
buffer = sys.argv[1]
if PY3:
buffer = buffer.encode('utf-8', errors='surrogateescape')
# default case, read from stdin
else:
if PY2:
buffer = sys.stdin.read()
else:
buffer = sys.stdin.buffer.read()
_ANSIBLE_ARGS = buffer
try:
params = json.loads(buffer.decode('utf-8'))
except ValueError:
# This helper used too early for fail_json to work.
print('\n{"msg": "Error: Module unable to decode valid JSON on stdin. Unable to figure out what parameters were passed", "failed": true}')
sys.exit(1)
if PY2:
params = json_dict_unicode_to_bytes(params)
try:
return params['ANSIBLE_MODULE_ARGS']
except KeyError:
# This helper does not have access to fail_json so we have to print
# json output on our own.
print('\n{"msg": "Error: Module unable to locate ANSIBLE_MODULE_ARGS in json data from stdin. Unable to figure out what parameters were passed", '
'"failed": true}')
sys.exit(1)
def env_fallback(*args, **kwargs):
''' Load value from environment '''
for arg in args:
if arg in os.environ:
return os.environ[arg]
raise AnsibleFallbackNotFound
def _lenient_lowercase(lst):
"""Lowercase elements of a list.
If an element is not a string, pass it through untouched.
"""
lowered = []
for value in lst:
try:
lowered.append(value.lower())
except AttributeError:
lowered.append(value)
return lowered
def format_attributes(attributes):
attribute_list = []
for attr in attributes:
if attr in FILE_ATTRIBUTES:
attribute_list.append(FILE_ATTRIBUTES[attr])
return attribute_list
def get_flags_from_attributes(attributes):
flags = []
for key, attr in FILE_ATTRIBUTES.items():
if attr in attributes:
flags.append(key)
return ''.join(flags)
class AnsibleFallbackNotFound(Exception):
pass
class _SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, Set):
return list(obj)
return super(_SetEncoder, self).default(obj)
class AnsibleModule(object):
def __init__(self, argument_spec, bypass_checks=False, no_log=False,
check_invalid_arguments=True, mutually_exclusive=None, required_together=None,
required_one_of=None, add_file_common_args=False, supports_check_mode=False,
required_if=None):
'''
common code for quickly building an ansible module in Python
(although you can write modules in anything that can return JSON)
see library/* for examples
'''
self._name = os.path.basename(__file__) # initialize name until we can parse from options
self.argument_spec = argument_spec
self.supports_check_mode = supports_check_mode
self.check_mode = False
self.bypass_checks = bypass_checks
self.no_log = no_log
self.check_invalid_arguments = check_invalid_arguments
self.mutually_exclusive = mutually_exclusive
self.required_together = required_together
self.required_one_of = required_one_of
self.required_if = required_if
self.cleanup_files = []
self._debug = False
self._diff = False
self._socket_path = None
self._verbosity = 0
# May be used to set modifications to the environment for any
# run_command invocation
self.run_command_environ_update = {}
self._warnings = []
self._deprecations = []
self.aliases = {}
self._legal_inputs = ['_ansible_check_mode', '_ansible_no_log', '_ansible_debug', '_ansible_diff', '_ansible_verbosity',
'_ansible_selinux_special_fs', '_ansible_module_name', '_ansible_version', '_ansible_syslog_facility',
'_ansible_socket']
self._options_context = list()
if add_file_common_args:
for k, v in FILE_COMMON_ARGUMENTS.items():
if k not in self.argument_spec:
self.argument_spec[k] = v
self._load_params()
self._set_fallbacks()
# append to legal_inputs and then possibly check against them
try:
self.aliases = self._handle_aliases()
except Exception as e:
# Use exceptions here because it isn't safe to call fail_json until no_log is processed
print('\n{"failed": true, "msg": "Module alias error: %s"}' % to_native(e))
sys.exit(1)
# Save parameter values that should never be logged
self.no_log_values = set()
self._handle_no_log_values()
# check the locale as set by the current environment, and reset to
# a known valid (LANG=C) if it's an invalid/unavailable locale
self._check_locale()
self._check_arguments(check_invalid_arguments)
# check exclusive early
if not bypass_checks:
self._check_mutually_exclusive(mutually_exclusive)
self._set_defaults(pre=True)
self._CHECK_ARGUMENT_TYPES_DISPATCHER = {
'str': self._check_type_str,
'list': self._check_type_list,
'dict': self._check_type_dict,
'bool': self._check_type_bool,
'int': self._check_type_int,
'float': self._check_type_float,
'path': self._check_type_path,
'raw': self._check_type_raw,
'jsonarg': self._check_type_jsonarg,
'json': self._check_type_jsonarg,
'bytes': self._check_type_bytes,
'bits': self._check_type_bits,
}
if not bypass_checks:
self._check_required_arguments()
self._check_argument_types()
self._check_argument_values()
self._check_required_together(required_together)
self._check_required_one_of(required_one_of)
self._check_required_if(required_if)
self._set_defaults(pre=False)
# deal with options sub-spec
self._handle_options()
if not self.no_log:
self._log_invocation()
# finally, make sure we're in a sane working dir
self._set_cwd()
def warn(self, warning):
if isinstance(warning, string_types):
self._warnings.append(warning)
self.log('[WARNING] %s' % warning)
else:
raise TypeError("warn requires a string not a %s" % type(warning))
def deprecate(self, msg, version=None):
if isinstance(msg, string_types):
self._deprecations.append({
'msg': msg,
'version': version
})
self.log('[DEPRECATION WARNING] %s %s' % (msg, version))
else:
raise TypeError("deprecate requires a string not a %s" % type(msg))
def load_file_common_arguments(self, params):
'''
many modules deal with files, this encapsulates common
options that the file module accepts such that it is directly
available to all modules and they can share code.
'''
path = params.get('path', params.get('dest', None))
if path is None:
return {}
else:
path = os.path.expanduser(os.path.expandvars(path))
b_path = to_bytes(path, errors='surrogate_or_strict')
# if the path is a symlink, and we're following links, get
# the target of the link instead for testing
if params.get('follow', False) and os.path.islink(b_path):
b_path = os.path.realpath(b_path)
path = to_native(b_path)
mode = params.get('mode', None)
owner = params.get('owner', None)
group = params.get('group', None)
# selinux related options
seuser = params.get('seuser', None)
serole = params.get('serole', None)
setype = params.get('setype', None)
selevel = params.get('selevel', None)
secontext = [seuser, serole, setype]
if self.selinux_mls_enabled():
secontext.append(selevel)
default_secontext = self.selinux_default_context(path)
for i in range(len(default_secontext)):
if i is not None and secontext[i] == '_default':
secontext[i] = default_secontext[i]
attributes = params.get('attributes', None)
return dict(
path=path, mode=mode, owner=owner, group=group,
seuser=seuser, serole=serole, setype=setype,
selevel=selevel, secontext=secontext, attributes=attributes,
)
# Detect whether using selinux that is MLS-aware.
# While this means you can set the level/range with
# selinux.lsetfilecon(), it may or may not mean that you
# will get the selevel as part of the context returned
# by selinux.lgetfilecon().
def selinux_mls_enabled(self):
if not HAVE_SELINUX:
return False
if selinux.is_selinux_mls_enabled() == 1:
return True
else:
return False
def selinux_enabled(self):
if not HAVE_SELINUX:
seenabled = self.get_bin_path('selinuxenabled')
if seenabled is not None:
(rc, out, err) = self.run_command(seenabled)
if rc == 0:
self.fail_json(msg="Aborting, target uses selinux but python bindings (libselinux-python) aren't installed!")
return False
if selinux.is_selinux_enabled() == 1:
return True
else:
return False
# Determine whether we need a placeholder for selevel/mls
def selinux_initial_context(self):
context = [None, None, None]
if self.selinux_mls_enabled():
context.append(None)
return context
# If selinux fails to find a default, return an array of None
def selinux_default_context(self, path, mode=0):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.matchpathcon(to_native(path, errors='surrogate_or_strict'), mode)
except OSError:
return context
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def selinux_context(self, path):
context = self.selinux_initial_context()
if not HAVE_SELINUX or not self.selinux_enabled():
return context
try:
ret = selinux.lgetfilecon_raw(to_native(path, errors='surrogate_or_strict'))
except OSError as e:
if e.errno == errno.ENOENT:
self.fail_json(path=path, msg='path %s does not exist' % path)
else:
self.fail_json(path=path, msg='failed to retrieve selinux context')
if ret[0] == -1:
return context
# Limit split to 4 because the selevel, the last in the list,
# may contain ':' characters
context = ret[1].split(':', 3)
return context
def user_and_group(self, path, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
st = os.lstat(b_path)
uid = st.st_uid
gid = st.st_gid
return (uid, gid)
def find_mount_point(self, path):
path_is_bytes = False
if isinstance(path, binary_type):
path_is_bytes = True
b_path = os.path.realpath(to_bytes(os.path.expanduser(os.path.expandvars(path)), errors='surrogate_or_strict'))
while not os.path.ismount(b_path):
b_path = os.path.dirname(b_path)
if path_is_bytes:
return b_path
return to_text(b_path, errors='surrogate_or_strict')
def is_special_selinux_path(self, path):
"""
Returns a tuple containing (True, selinux_context) if the given path is on a
NFS or other 'special' fs mount point, otherwise the return will be (False, None).
"""
try:
f = open('/proc/mounts', 'r')
mount_data = f.readlines()
f.close()
except:
return (False, None)
path_mount_point = self.find_mount_point(path)
for line in mount_data:
(device, mount_point, fstype, options, rest) = line.split(' ', 4)
if path_mount_point == mount_point:
for fs in self._selinux_special_fs:
if fs in fstype:
special_context = self.selinux_context(path_mount_point)
return (True, special_context)
return (False, None)
def set_default_selinux_context(self, path, changed):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
context = self.selinux_default_context(path)
return self.set_context_if_different(path, context, False)
def set_context_if_different(self, path, context, changed, diff=None):
if not HAVE_SELINUX or not self.selinux_enabled():
return changed
cur_context = self.selinux_context(path)
new_context = list(cur_context)
# Iterate over the current context instead of the
# argument context, which may have selevel.
(is_special_se, sp_context) = self.is_special_selinux_path(path)
if is_special_se:
new_context = sp_context
else:
for i in range(len(cur_context)):
if len(context) > i:
if context[i] is not None and context[i] != cur_context[i]:
new_context[i] = context[i]
elif context[i] is None:
new_context[i] = cur_context[i]
if cur_context != new_context:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['secontext'] = cur_context
if 'after' not in diff:
diff['after'] = {}
diff['after']['secontext'] = new_context
try:
if self.check_mode:
return True
rc = selinux.lsetfilecon(to_native(path), ':'.join(new_context))
except OSError as e:
self.fail_json(path=path, msg='invalid selinux context: %s' % to_native(e),
new_context=new_context, cur_context=cur_context, input_was=context)
if rc != 0:
self.fail_json(path=path, msg='set selinux context failed')
changed = True
return changed
def set_owner_if_different(self, path, owner, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if owner is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
uid = int(owner)
except ValueError:
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed: failed to look up user %s' % owner)
if orig_uid != uid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['owner'] = orig_uid
if 'after' not in diff:
diff['after'] = {}
diff['after']['owner'] = uid
if self.check_mode:
return True
try:
os.lchown(b_path, uid, -1)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chown failed')
changed = True
return changed
def set_group_if_different(self, path, group, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
if group is None:
return changed
orig_uid, orig_gid = self.user_and_group(b_path, expand)
try:
gid = int(group)
except ValueError:
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed: failed to look up group %s' % group)
if orig_gid != gid:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['group'] = orig_gid
if 'after' not in diff:
diff['after'] = {}
diff['after']['group'] = gid
if self.check_mode:
return True
try:
os.lchown(b_path, -1, gid)
except OSError:
path = to_text(b_path)
self.fail_json(path=path, msg='chgrp failed')
changed = True
return changed
def set_mode_if_different(self, path, mode, changed, diff=None, expand=True):
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
path_stat = os.lstat(b_path)
if mode is None:
return changed
if not isinstance(mode, int):
try:
mode = int(mode, 8)
except Exception:
try:
mode = self._symbolic_mode_to_octal(path_stat, mode)
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path,
msg="mode must be in octal or symbolic form",
details=to_native(e))
if mode != stat.S_IMODE(mode):
# prevent mode from having extra info orbeing invalid long number
path = to_text(b_path)
self.fail_json(path=path, msg="Invalid mode supplied, only permission info is allowed", details=mode)
prev_mode = stat.S_IMODE(path_stat.st_mode)
if prev_mode != mode:
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['mode'] = '0%03o' % prev_mode
if 'after' not in diff:
diff['after'] = {}
diff['after']['mode'] = '0%03o' % mode
if self.check_mode:
return True
# FIXME: comparison against string above will cause this to be executed
# every time
try:
if hasattr(os, 'lchmod'):
os.lchmod(b_path, mode)
else:
if not os.path.islink(b_path):
os.chmod(b_path, mode)
else:
# Attempt to set the perms of the symlink but be
# careful not to change the perms of the underlying
# file while trying
underlying_stat = os.stat(b_path)
os.chmod(b_path, mode)
new_underlying_stat = os.stat(b_path)
if underlying_stat.st_mode != new_underlying_stat.st_mode:
os.chmod(b_path, stat.S_IMODE(underlying_stat.st_mode))
except OSError as e:
if os.path.islink(b_path) and e.errno == errno.EPERM: # Can't set mode on symbolic links
pass
elif e.errno in (errno.ENOENT, errno.ELOOP): # Can't set mode on broken symbolic links
pass
else:
raise
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chmod failed', details=to_native(e),
exception=traceback.format_exc())
path_stat = os.lstat(b_path)
new_mode = stat.S_IMODE(path_stat.st_mode)
if new_mode != prev_mode:
changed = True
return changed
def set_attributes_if_different(self, path, attributes, changed, diff=None, expand=True):
if attributes is None:
return changed
b_path = to_bytes(path, errors='surrogate_or_strict')
if expand:
b_path = os.path.expanduser(os.path.expandvars(b_path))
existing = self.get_file_attributes(b_path)
if existing.get('attr_flags', '') != attributes:
attrcmd = self.get_bin_path('chattr')
if attrcmd:
attrcmd = [attrcmd, '=%s' % attributes, b_path]
changed = True
if diff is not None:
if 'before' not in diff:
diff['before'] = {}
diff['before']['attributes'] = existing.get('attr_flags')
if 'after' not in diff:
diff['after'] = {}
diff['after']['attributes'] = attributes
if not self.check_mode:
try:
rc, out, err = self.run_command(attrcmd)
if rc != 0 or err:
raise Exception("Error while setting attributes: %s" % (out + err))
except Exception as e:
path = to_text(b_path)
self.fail_json(path=path, msg='chattr failed', details=to_native(e),
exception=traceback.format_exc())
return changed
def get_file_attributes(self, path):
output = {}
attrcmd = self.get_bin_path('lsattr', False)
if attrcmd:
attrcmd = [attrcmd, '-vd', path]
try:
rc, out, err = self.run_command(attrcmd)
if rc == 0:
res = out.split(' ')[0:2]
output['attr_flags'] = res[1].replace('-', '').strip()
output['version'] = res[0].strip()
output['attributes'] = format_attributes(output['attr_flags'])
except:
pass
return output
@classmethod
def _symbolic_mode_to_octal(cls, path_stat, symbolic_mode):
"""
This enables symbolic chmod string parsing as stated in the chmod man-page
This includes things like: "u=rw-x+X,g=r-x+X,o=r-x+X"
"""
new_mode = stat.S_IMODE(path_stat.st_mode)
# Now parse all symbolic modes
for mode in symbolic_mode.split(','):
# Per single mode. This always contains a '+', '-' or '='
# Split it on that
permlist = MODE_OPERATOR_RE.split(mode)
# And find all the operators
opers = MODE_OPERATOR_RE.findall(mode)
# The user(s) where it's all about is the first element in the
# 'permlist' list. Take that and remove it from the list.
# An empty user or 'a' means 'all'.
users = permlist.pop(0)
use_umask = (users == '')
if users == 'a' or users == '':
users = 'ugo'
# Check if there are illegal characters in the user list
# They can end up in 'users' because they are not split
if USERS_RE.match(users):
raise ValueError("bad symbolic permission for mode: %s" % mode)
# Now we have two list of equal length, one contains the requested
# permissions and one with the corresponding operators.
for idx, perms in enumerate(permlist):
# Check if there are illegal characters in the permissions
if PERMS_RE.match(perms):
raise ValueError("bad symbolic permission for mode: %s" % mode)
for user in users:
mode_to_apply = cls._get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask)
new_mode = cls._apply_operation_to_mode(user, opers[idx], mode_to_apply, new_mode)
return new_mode
@staticmethod
def _apply_operation_to_mode(user, operator, mode_to_apply, current_mode):
if operator == '=':
if user == 'u':
mask = stat.S_IRWXU | stat.S_ISUID
elif user == 'g':
mask = stat.S_IRWXG | stat.S_ISGID
elif user == 'o':
mask = stat.S_IRWXO | stat.S_ISVTX
# mask out u, g, or o permissions from current_mode and apply new permissions
inverse_mask = mask ^ PERM_BITS
new_mode = (current_mode & inverse_mask) | mode_to_apply
elif operator == '+':
new_mode = current_mode | mode_to_apply
elif operator == '-':
new_mode = current_mode - (current_mode & mode_to_apply)
return new_mode
@staticmethod
def _get_octal_mode_from_symbolic_perms(path_stat, user, perms, use_umask):
prev_mode = stat.S_IMODE(path_stat.st_mode)
is_directory = stat.S_ISDIR(path_stat.st_mode)
has_x_permissions = (prev_mode & EXEC_PERM_BITS) > 0
apply_X_permission = is_directory or has_x_permissions
# Get the umask, if the 'user' part is empty, the effect is as if (a) were
# given, but bits that are set in the umask are not affected.
# We also need the "reversed umask" for masking
umask = os.umask(0)
os.umask(umask)
rev_umask = umask ^ PERM_BITS
# Permission bits constants documented at:
# http://docs.python.org/2/library/stat.html#stat.S_ISUID
if apply_X_permission:
X_perms = {
'u': {'X': stat.S_IXUSR},
'g': {'X': stat.S_IXGRP},
'o': {'X': stat.S_IXOTH},
}
else:
X_perms = {
'u': {'X': 0},
'g': {'X': 0},
'o': {'X': 0},
}
user_perms_to_modes = {
'u': {
'r': rev_umask & stat.S_IRUSR if use_umask else stat.S_IRUSR,
'w': rev_umask & stat.S_IWUSR if use_umask else stat.S_IWUSR,
'x': rev_umask & stat.S_IXUSR if use_umask else stat.S_IXUSR,
's': stat.S_ISUID,
't': 0,
'u': prev_mode & stat.S_IRWXU,
'g': (prev_mode & stat.S_IRWXG) << 3,
'o': (prev_mode & stat.S_IRWXO) << 6},
'g': {
'r': rev_umask & stat.S_IRGRP if use_umask else stat.S_IRGRP,
'w': rev_umask & stat.S_IWGRP if use_umask else stat.S_IWGRP,
'x': rev_umask & stat.S_IXGRP if use_umask else stat.S_IXGRP,
's': stat.S_ISGID,
't': 0,
'u': (prev_mode & stat.S_IRWXU) >> 3,
'g': prev_mode & stat.S_IRWXG,
'o': (prev_mode & stat.S_IRWXO) << 3},
'o': {
'r': rev_umask & stat.S_IROTH if use_umask else stat.S_IROTH,
'w': rev_umask & stat.S_IWOTH if use_umask else stat.S_IWOTH,
'x': rev_umask & stat.S_IXOTH if use_umask else stat.S_IXOTH,
's': 0,
't': stat.S_ISVTX,
'u': (prev_mode & stat.S_IRWXU) >> 6,
'g': (prev_mode & stat.S_IRWXG) >> 3,
'o': prev_mode & stat.S_IRWXO},
}
# Insert X_perms into user_perms_to_modes
for key, value in X_perms.items():
user_perms_to_modes[key].update(value)
def or_reduce(mode, perm):
return mode | user_perms_to_modes[user][perm]
return reduce(or_reduce, perms, 0)
def set_fs_attributes_if_different(self, file_args, changed, diff=None, expand=True):
# set modes owners and context as needed
changed = self.set_context_if_different(
file_args['path'], file_args['secontext'], changed, diff
)
changed = self.set_owner_if_different(
file_args['path'], file_args['owner'], changed, diff, expand
)
changed = self.set_group_if_different(
file_args['path'], file_args['group'], changed, diff, expand
)
changed = self.set_mode_if_different(
file_args['path'], file_args['mode'], changed, diff, expand
)
changed = self.set_attributes_if_different(
file_args['path'], file_args['attributes'], changed, diff, expand
)
return changed
def set_directory_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def set_file_attributes_if_different(self, file_args, changed, diff=None, expand=True):
return self.set_fs_attributes_if_different(file_args, changed, diff, expand)
def add_path_info(self, kwargs):
'''
for results that are files, supplement the info about the file
in the return path with stats about the file path.
'''
path = kwargs.get('path', kwargs.get('dest', None))
if path is None:
return kwargs
b_path = to_bytes(path, errors='surrogate_or_strict')
if os.path.exists(b_path):
(uid, gid) = self.user_and_group(path)
kwargs['uid'] = uid
kwargs['gid'] = gid
try:
user = pwd.getpwuid(uid)[0]
except KeyError:
user = str(uid)
try:
group = grp.getgrgid(gid)[0]
except KeyError:
group = str(gid)
kwargs['owner'] = user
kwargs['group'] = group
st = os.lstat(b_path)
kwargs['mode'] = '0%03o' % stat.S_IMODE(st[stat.ST_MODE])
# secontext not yet supported
if os.path.islink(b_path):
kwargs['state'] = 'link'
elif os.path.isdir(b_path):
kwargs['state'] = 'directory'
elif os.stat(b_path).st_nlink > 1:
kwargs['state'] = 'hard'
else:
kwargs['state'] = 'file'
if HAVE_SELINUX and self.selinux_enabled():
kwargs['secontext'] = ':'.join(self.selinux_context(path))
kwargs['size'] = st[stat.ST_SIZE]
else:
kwargs['state'] = 'absent'
return kwargs
def _check_locale(self):
'''
Uses the locale module to test the currently set locale
(per the LANG and LC_CTYPE environment settings)
'''
try:
# setting the locale to '' uses the default locale
# as it would be returned by locale.getdefaultlocale()
locale.setlocale(locale.LC_ALL, '')
except locale.Error:
# fallback to the 'C' locale, which may cause unicode
# issues but is preferable to simply failing because
# of an unknown locale
locale.setlocale(locale.LC_ALL, 'C')
os.environ['LANG'] = 'C'
os.environ['LC_ALL'] = 'C'
os.environ['LC_MESSAGES'] = 'C'
except Exception as e:
self.fail_json(msg="An unknown error was encountered while attempting to validate the locale: %s" %
to_native(e), exception=traceback.format_exc())
def _handle_aliases(self, spec=None, param=None):
# this uses exceptions as it happens before we can safely call fail_json
aliases_results = {} # alias:canon
if param is None:
param = self.params
if spec is None:
spec = self.argument_spec
for (k, v) in spec.items():
self._legal_inputs.append(k)
aliases = v.get('aliases', None)
default = v.get('default', None)
required = v.get('required', False)
if default is not None and required:
# not alias specific but this is a good place to check this
raise Exception("internal error: required and default are mutually exclusive for %s" % k)
if aliases is None:
continue
if not isinstance(aliases, SEQUENCETYPE) or isinstance(aliases, (binary_type, text_type)):
raise Exception('internal error: aliases must be a list or tuple')
for alias in aliases:
self._legal_inputs.append(alias)
aliases_results[alias] = k
if alias in param:
param[k] = param[alias]
return aliases_results
def _handle_no_log_values(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
# Use the argspec to determine which args are no_log
for arg_name, arg_opts in spec.items():
if arg_opts.get('no_log', False):
# Find the value for the no_log'd param
no_log_object = param.get(arg_name, None)
if no_log_object:
self.no_log_values.update(return_values(no_log_object))
if arg_opts.get('removed_in_version') is not None and arg_name in param:
self._deprecations.append({
'msg': "Param '%s' is deprecated. See the module docs for more information" % arg_name,
'version': arg_opts.get('removed_in_version')
})
def _check_arguments(self, check_invalid_arguments, spec=None, param=None, legal_inputs=None):
self._syslog_facility = 'LOG_USER'
unsupported_parameters = set()
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
if legal_inputs is None:
legal_inputs = self._legal_inputs
for (k, v) in list(param.items()):
if k == '_ansible_check_mode' and v:
self.check_mode = True
elif k == '_ansible_no_log':
self.no_log = self.boolean(v)
elif k == '_ansible_debug':
self._debug = self.boolean(v)
elif k == '_ansible_diff':
self._diff = self.boolean(v)
elif k == '_ansible_verbosity':
self._verbosity = v
elif k == '_ansible_selinux_special_fs':
self._selinux_special_fs = v
elif k == '_ansible_syslog_facility':
self._syslog_facility = v
elif k == '_ansible_version':
self.ansible_version = v
elif k == '_ansible_module_name':
self._name = v
elif k == '_ansible_socket':
self._socket_path = v
elif check_invalid_arguments and k not in legal_inputs:
unsupported_parameters.add(k)
# clean up internal params:
if k.startswith('_ansible_'):
del self.params[k]
if unsupported_parameters:
msg = "Unsupported parameters for (%s) module: %s" % (self._name, ','.join(sorted(list(unsupported_parameters))))
if self._options_context:
msg += " found in %s." % " -> ".join(self._options_context)
msg += " Supported parameters include: %s" % (','.join(sorted(spec.keys())))
self.fail_json(msg=msg)
if self.check_mode and not self.supports_check_mode:
self.exit_json(skipped=True, msg="remote module (%s) does not support check mode" % self._name)
def _count_terms(self, check, param=None):
count = 0
if param is None:
param = self.params
for term in check:
if term in param:
count += 1
return count
def _check_mutually_exclusive(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count > 1:
msg = "parameters are mutually exclusive: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_one_of(self, spec, param=None):
if spec is None:
return
for check in spec:
count = self._count_terms(check, param)
if count == 0:
msg = "one of the following is required: %s" % ','.join(check)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_together(self, spec, param=None):
if spec is None:
return
for check in spec:
counts = [self._count_terms([field], param) for field in check]
non_zero = [c for c in counts if c > 0]
if len(non_zero) > 0:
if 0 in counts:
msg = "parameters are required together: %s" % (check,)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_arguments(self, spec=None, param=None):
''' ensure all required arguments are present '''
missing = []
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
required = v.get('required', False)
if required and k not in param:
missing.append(k)
if len(missing) > 0:
msg = "missing required arguments: %s" % ",".join(missing)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_required_if(self, spec, param=None):
''' ensure that parameters which conditionally required are present '''
if spec is None:
return
if param is None:
param = self.params
for sp in spec:
missing = []
max_missing_count = 0
is_one_of = False
if len(sp) == 4:
key, val, requirements, is_one_of = sp
else:
key, val, requirements = sp
# is_one_of is True at least one requirement should be
# present, else all requirements should be present.
if is_one_of:
max_missing_count = len(requirements)
if key in param and param[key] == val:
for check in requirements:
count = self._count_terms((check,), param)
if count == 0:
missing.append(check)
if len(missing) and len(missing) >= max_missing_count:
msg = "%s is %s but the following are missing: %s" % (key, val, ','.join(missing))
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def _check_argument_values(self, spec=None, param=None):
''' ensure all arguments have the requested values, and there are no stray arguments '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
choices = v.get('choices', None)
if choices is None:
continue
if isinstance(choices, SEQUENCETYPE) and not isinstance(choices, (binary_type, text_type)):
if k in param:
if param[k] not in choices:
# PyYaml converts certain strings to bools. If we can unambiguously convert back, do so before checking
# the value. If we can't figure this out, module author is responsible.
lowered_choices = None
if param[k] == 'False':
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_FALSE.intersection(choices)
if len(overlap) == 1:
# Extract from a set
(param[k],) = overlap
if param[k] == 'True':
if lowered_choices is None:
lowered_choices = _lenient_lowercase(choices)
overlap = BOOLEANS_TRUE.intersection(choices)
if len(overlap) == 1:
(param[k],) = overlap
if param[k] not in choices:
choices_str = ",".join([to_native(c) for c in choices])
msg = "value of %s must be one of: %s, got: %s" % (k, choices_str, param[k])
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
else:
msg = "internal error: choices for argument %s are not iterable: %s" % (k, choices)
if self._options_context:
msg += " found in %s" % " -> ".join(self._options_context)
self.fail_json(msg=msg)
def safe_eval(self, value, locals=None, include_exceptions=False):
# do not allow method calls to modules
if not isinstance(value, string_types):
# already templated to a datavaluestructure, perhaps?
if include_exceptions:
return (value, None)
return value
if re.search(r'\w\.\w+\(', value):
if include_exceptions:
return (value, None)
return value
# do not allow imports
if re.search(r'import \w+', value):
if include_exceptions:
return (value, None)
return value
try:
result = literal_eval(value)
if include_exceptions:
return (result, None)
else:
return result
except Exception as e:
if include_exceptions:
return (value, e)
return value
def _check_type_str(self, value):
if isinstance(value, string_types):
return value
# Note: This could throw a unicode error if value's __str__() method
# returns non-ascii. Have to port utils.to_bytes() if that happens
return str(value)
def _check_type_list(self, value):
if isinstance(value, list):
return value
if isinstance(value, string_types):
return value.split(",")
elif isinstance(value, int) or isinstance(value, float):
return [str(value)]
raise TypeError('%s cannot be converted to a list' % type(value))
def _check_type_dict(self, value):
if isinstance(value, dict):
return value
if isinstance(value, string_types):
if value.startswith("{"):
try:
return json.loads(value)
except:
(result, exc) = self.safe_eval(value, dict(), include_exceptions=True)
if exc is not None:
raise TypeError('unable to evaluate string as dictionary')
return result
elif '=' in value:
fields = []
field_buffer = []
in_quote = False
in_escape = False
for c in value.strip():
if in_escape:
field_buffer.append(c)
in_escape = False
elif c == '\\':
in_escape = True
elif not in_quote and c in ('\'', '"'):
in_quote = c
elif in_quote and in_quote == c:
in_quote = False
elif not in_quote and c in (',', ' '):
field = ''.join(field_buffer)
if field:
fields.append(field)
field_buffer = []
else:
field_buffer.append(c)
field = ''.join(field_buffer)
if field:
fields.append(field)
return dict(x.split("=", 1) for x in fields)
else:
raise TypeError("dictionary requested, could not parse JSON or key=value")
raise TypeError('%s cannot be converted to a dict' % type(value))
def _check_type_bool(self, value):
if isinstance(value, bool):
return value
if isinstance(value, string_types) or isinstance(value, int):
return self.boolean(value)
raise TypeError('%s cannot be converted to a bool' % type(value))
def _check_type_int(self, value):
if isinstance(value, int):
return value
if isinstance(value, string_types):
return int(value)
raise TypeError('%s cannot be converted to an int' % type(value))
def _check_type_float(self, value):
if isinstance(value, float):
return value
if isinstance(value, (binary_type, text_type, int)):
return float(value)
raise TypeError('%s cannot be converted to a float' % type(value))
def _check_type_path(self, value):
value = self._check_type_str(value)
return os.path.expanduser(os.path.expandvars(value))
def _check_type_jsonarg(self, value):
# Return a jsonified string. Sometimes the controller turns a json
# string into a dict/list so transform it back into json here
if isinstance(value, (text_type, binary_type)):
return value.strip()
else:
if isinstance(value, (list, tuple, dict)):
return self.jsonify(value)
raise TypeError('%s cannot be converted to a json string' % type(value))
def _check_type_raw(self, value):
return value
def _check_type_bytes(self, value):
try:
self.human_to_bytes(value)
except ValueError:
raise TypeError('%s cannot be converted to a Byte value' % type(value))
def _check_type_bits(self, value):
try:
self.human_to_bytes(value, isbits=True)
except ValueError:
raise TypeError('%s cannot be converted to a Bit value' % type(value))
def _handle_options(self, argument_spec=None, params=None):
''' deal with options to create sub spec '''
if argument_spec is None:
argument_spec = self.argument_spec
if params is None:
params = self.params
for (k, v) in argument_spec.items():
wanted = v.get('type', None)
if wanted == 'dict' or (wanted == 'list' and v.get('elements', '') == 'dict'):
spec = v.get('options', None)
if spec is None or not params[k]:
continue
self._options_context.append(k)
if isinstance(params[k], dict):
elements = [params[k]]
else:
elements = params[k]
for param in elements:
if not isinstance(param, dict):
self.fail_json(msg="value of %s must be of type dict or list of dict" % k)
self._set_fallbacks(spec, param)
options_aliases = self._handle_aliases(spec, param)
self._handle_no_log_values(spec, param)
options_legal_inputs = list(spec.keys()) + list(options_aliases.keys())
self._check_arguments(self.check_invalid_arguments, spec, param, options_legal_inputs)
# check exclusive early
if not self.bypass_checks:
self._check_mutually_exclusive(v.get('mutually_exclusive', None), param)
self._set_defaults(pre=True, spec=spec, param=param)
if not self.bypass_checks:
self._check_required_arguments(spec, param)
self._check_argument_types(spec, param)
self._check_argument_values(spec, param)
self._check_required_together(v.get('required_together', None), param)
self._check_required_one_of(v.get('required_one_of', None), param)
self._check_required_if(v.get('required_if', None), param)
self._set_defaults(pre=False, spec=spec, param=param)
# handle multi level options (sub argspec)
self._handle_options(spec, param)
self._options_context.pop()
def _check_argument_types(self, spec=None, param=None):
''' ensure all arguments have the requested type '''
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
wanted = v.get('type', None)
if k not in param:
continue
value = param[k]
if value is None:
continue
if not callable(wanted):
if wanted is None:
# Mostly we want to default to str.
# For values set to None explicitly, return None instead as
# that allows a user to unset a parameter
if param[k] is None:
continue
wanted = 'str'
try:
type_checker = self._CHECK_ARGUMENT_TYPES_DISPATCHER[wanted]
except KeyError:
self.fail_json(msg="implementation error: unknown type %s requested for %s" % (wanted, k))
else:
# set the type_checker to the callable, and reset wanted to the callable's name (or type if it doesn't have one, ala MagicMock)
type_checker = wanted
wanted = getattr(wanted, '__name__', to_native(type(wanted)))
try:
param[k] = type_checker(value)
except (TypeError, ValueError) as e:
self.fail_json(msg="argument %s is of type %s and we were unable to convert to %s: %s" %
(k, type(value), wanted, to_native(e)))
def _set_defaults(self, pre=True, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
default = v.get('default', None)
if pre is True:
# this prevents setting defaults on required items
if default is not None and k not in param:
param[k] = default
else:
# make sure things without a default still get set None
if k not in param:
param[k] = default
def _set_fallbacks(self, spec=None, param=None):
if spec is None:
spec = self.argument_spec
if param is None:
param = self.params
for (k, v) in spec.items():
fallback = v.get('fallback', (None,))
fallback_strategy = fallback[0]
fallback_args = []
fallback_kwargs = {}
if k not in param and fallback_strategy is not None:
for item in fallback[1:]:
if isinstance(item, dict):
fallback_kwargs = item
else:
fallback_args = item
try:
param[k] = fallback_strategy(*fallback_args, **fallback_kwargs)
except AnsibleFallbackNotFound:
continue
def _load_params(self):
''' read the input and set the params attribute.
This method is for backwards compatibility. The guts of the function
were moved out in 2.1 so that custom modules could read the parameters.
'''
# debug overrides to read args from file or cmdline
self.params = _load_params()
def _log_to_syslog(self, msg):
if HAS_SYSLOG:
module = 'ansible-%s' % self._name
facility = getattr(syslog, self._syslog_facility, syslog.LOG_USER)
syslog.openlog(str(module), 0, facility)
syslog.syslog(syslog.LOG_INFO, msg)
def debug(self, msg):
if self._debug:
self.log('[debug] %s' % msg)
def log(self, msg, log_args=None):
if not self.no_log:
if log_args is None:
log_args = dict()
module = 'ansible-%s' % self._name
if isinstance(module, binary_type):
module = module.decode('utf-8', 'replace')
# 6655 - allow for accented characters
if not isinstance(msg, (binary_type, text_type)):
raise TypeError("msg should be a string (got %s)" % type(msg))
# We want journal to always take text type
# syslog takes bytes on py2, text type on py3
if isinstance(msg, binary_type):
journal_msg = remove_values(msg.decode('utf-8', 'replace'), self.no_log_values)
else:
# TODO: surrogateescape is a danger here on Py3
journal_msg = remove_values(msg, self.no_log_values)
if PY3:
syslog_msg = journal_msg
else:
syslog_msg = journal_msg.encode('utf-8', 'replace')
if has_journal:
journal_args = [("MODULE", os.path.basename(__file__))]
for arg in log_args:
journal_args.append((arg.upper(), str(log_args[arg])))
try:
journal.send(u"%s %s" % (module, journal_msg), **dict(journal_args))
except IOError:
# fall back to syslog since logging to journal failed
self._log_to_syslog(syslog_msg)
else:
self._log_to_syslog(syslog_msg)
def _log_invocation(self):
''' log that ansible ran the module '''
# TODO: generalize a separate log function and make log_invocation use it
# Sanitize possible password argument when logging.
log_args = dict()
for param in self.params:
canon = self.aliases.get(param, param)
arg_opts = self.argument_spec.get(canon, {})
no_log = arg_opts.get('no_log', False)
if self.boolean(no_log):
log_args[param] = 'NOT_LOGGING_PARAMETER'
# try to capture all passwords/passphrase named fields missed by no_log
elif PASSWORD_MATCH.search(param) and arg_opts.get('type', 'str') != 'bool' and not arg_opts.get('choices', False):
# skip boolean and enums as they are about 'password' state
log_args[param] = 'NOT_LOGGING_PASSWORD'
self.warn('Module did not set no_log for %s' % param)
else:
param_val = self.params[param]
if not isinstance(param_val, (text_type, binary_type)):
param_val = str(param_val)
elif isinstance(param_val, text_type):
param_val = param_val.encode('utf-8')
log_args[param] = heuristic_log_sanitize(param_val, self.no_log_values)
msg = ['%s=%s' % (to_native(arg), to_native(val)) for arg, val in log_args.items()]
if msg:
msg = 'Invoked with %s' % ' '.join(msg)
else:
msg = 'Invoked'
self.log(msg, log_args=log_args)
def _set_cwd(self):
try:
cwd = os.getcwd()
if not os.access(cwd, os.F_OK | os.R_OK):
raise Exception()
return cwd
except:
# we don't have access to the cwd, probably because of sudo.
# Try and move to a neutral location to prevent errors
for cwd in [os.path.expandvars('$HOME'), tempfile.gettempdir()]:
try:
if os.access(cwd, os.F_OK | os.R_OK):
os.chdir(cwd)
return cwd
except:
pass
# we won't error here, as it may *not* be a problem,
# and we don't want to break modules unnecessarily
return None
def get_bin_path(self, arg, required=False, opt_dirs=None):
'''
find system executable in PATH.
Optional arguments:
- required: if executable is not found and required is true, fail_json
- opt_dirs: optional list of directories to search in addition to PATH
if found return full path; otherwise return None
'''
opt_dirs = [] if opt_dirs is None else opt_dirs
sbin_paths = ['/sbin', '/usr/sbin', '/usr/local/sbin']
paths = []
for d in opt_dirs:
if d is not None and os.path.exists(d):
paths.append(d)
paths += os.environ.get('PATH', '').split(os.pathsep)
bin_path = None
# mangle PATH to include /sbin dirs
for p in sbin_paths:
if p not in paths and os.path.exists(p):
paths.append(p)
for d in paths:
if not d:
continue
path = os.path.join(d, arg)
if os.path.exists(path) and not os.path.isdir(path) and is_executable(path):
bin_path = path
break
if required and bin_path is None:
self.fail_json(msg='Failed to find required executable %s in paths: %s' % (arg, os.pathsep.join(paths)))
return bin_path
def boolean(self, arg):
''' return a bool for the arg '''
if arg is None:
return arg
try:
return boolean(arg)
except TypeError as e:
self.fail_json(msg=to_native(e))
def jsonify(self, data):
for encoding in ("utf-8", "latin-1"):
try:
return json.dumps(data, encoding=encoding, cls=_SetEncoder)
# Old systems using old simplejson module does not support encoding keyword.
except TypeError:
try:
new_data = json_dict_bytes_to_unicode(data, encoding=encoding)
except UnicodeDecodeError:
continue
return json.dumps(new_data, cls=_SetEncoder)
except UnicodeDecodeError:
continue
self.fail_json(msg='Invalid unicode encoding encountered')
def from_json(self, data):
return json.loads(data)
def add_cleanup_file(self, path):
if path not in self.cleanup_files:
self.cleanup_files.append(path)
def do_cleanup_files(self):
for path in self.cleanup_files:
self.cleanup(path)
def _return_formatted(self, kwargs):
self.add_path_info(kwargs)
if 'invocation' not in kwargs:
kwargs['invocation'] = {'module_args': self.params}
if 'warnings' in kwargs:
if isinstance(kwargs['warnings'], list):
for w in kwargs['warnings']:
self.warn(w)
else:
self.warn(kwargs['warnings'])
if self._warnings:
kwargs['warnings'] = self._warnings
if 'deprecations' in kwargs:
if isinstance(kwargs['deprecations'], list):
for d in kwargs['deprecations']:
if isinstance(d, SEQUENCETYPE) and len(d) == 2:
self.deprecate(d[0], version=d[1])
else:
self.deprecate(d)
else:
self.deprecate(kwargs['deprecations'])
if self._deprecations:
kwargs['deprecations'] = self._deprecations
kwargs = remove_values(kwargs, self.no_log_values)
print('\n%s' % self.jsonify(kwargs))
def exit_json(self, **kwargs):
''' return from the module, without error '''
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(0)
def fail_json(self, **kwargs):
''' return from the module, with an error message '''
assert 'msg' in kwargs, "implementation error -- msg to explain the error is required"
kwargs['failed'] = True
# add traceback if debug or high verbosity and it is missing
# Note: badly named as exception, it is really always been 'traceback'
if 'exception' not in kwargs and sys.exc_info()[2] and (self._debug or self._verbosity >= 3):
kwargs['exception'] = ''.join(traceback.format_tb(sys.exc_info()[2]))
self.do_cleanup_files()
self._return_formatted(kwargs)
sys.exit(1)
def fail_on_missing_params(self, required_params=None):
''' This is for checking for required params when we can not check via argspec because we
need more information than is simply given in the argspec.
'''
if not required_params:
return
missing_params = []
for required_param in required_params:
if not self.params.get(required_param):
missing_params.append(required_param)
if missing_params:
self.fail_json(msg="missing required arguments: %s" % ','.join(missing_params))
def digest_from_file(self, filename, algorithm):
''' Return hex digest of local file for a digest_method specified by name, or None if file is not present. '''
if not os.path.exists(filename):
return None
if os.path.isdir(filename):
self.fail_json(msg="attempted to take checksum of directory: %s" % filename)
# preserve old behaviour where the third parameter was a hash algorithm object
if hasattr(algorithm, 'hexdigest'):
digest_method = algorithm
else:
try:
digest_method = AVAILABLE_HASH_ALGORITHMS[algorithm]()
except KeyError:
self.fail_json(msg="Could not hash file '%s' with algorithm '%s'. Available algorithms: %s" %
(filename, algorithm, ', '.join(AVAILABLE_HASH_ALGORITHMS)))
blocksize = 64 * 1024
infile = open(os.path.realpath(filename), 'rb')
block = infile.read(blocksize)
while block:
digest_method.update(block)
block = infile.read(blocksize)
infile.close()
return digest_method.hexdigest()
def md5(self, filename):
''' Return MD5 hex digest of local file using digest_from_file().
Do not use this function unless you have no other choice for:
1) Optional backwards compatibility
2) Compatibility with a third party protocol
This function will not work on systems complying with FIPS-140-2.
Most uses of this function can use the module.sha1 function instead.
'''
if 'md5' not in AVAILABLE_HASH_ALGORITHMS:
raise ValueError('MD5 not available. Possibly running in FIPS mode')
return self.digest_from_file(filename, 'md5')
def sha1(self, filename):
''' Return SHA1 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha1')
def sha256(self, filename):
''' Return SHA-256 hex digest of local file using digest_from_file(). '''
return self.digest_from_file(filename, 'sha256')
def backup_local(self, fn):
'''make a date-marked backup of the specified file, return True or False on success or failure'''
backupdest = ''
if os.path.exists(fn):
# backups named basename.PID.YYYY-MM-DD@HH:MM:SS~
ext = time.strftime("%Y-%m-%d@%H:%M:%S~", time.localtime(time.time()))
backupdest = '%s.%s.%s' % (fn, os.getpid(), ext)
try:
self.preserved_copy(fn, backupdest)
except (shutil.Error, IOError) as e:
self.fail_json(msg='Could not make backup of %s to %s: %s' % (fn, backupdest, to_native(e)))
return backupdest
def cleanup(self, tmpfile):
if os.path.exists(tmpfile):
try:
os.unlink(tmpfile)
except OSError as e:
sys.stderr.write("could not cleanup %s: %s" % (tmpfile, to_native(e)))
def preserved_copy(self, src, dest):
"""Copy a file with preserved ownership, permissions and context"""
# shutil.copy2(src, dst)
# Similar to shutil.copy(), but metadata is copied as well - in fact,
# this is just shutil.copy() followed by copystat(). This is similar
# to the Unix command cp -p.
#
# shutil.copystat(src, dst)
# Copy the permission bits, last access time, last modification time,
# and flags from src to dst. The file contents, owner, and group are
# unaffected. src and dst are path names given as strings.
shutil.copy2(src, dest)
# Set the context
if self.selinux_enabled():
context = self.selinux_context(src)
self.set_context_if_different(dest, context, False)
# chown it
try:
dest_stat = os.stat(src)
tmp_stat = os.stat(dest)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(dest, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
# Set the attributes
current_attribs = self.get_file_attributes(src)
current_attribs = current_attribs.get('attr_flags', [])
current_attribs = ''.join(current_attribs)
self.set_attributes_if_different(dest, current_attribs, True)
def atomic_move(self, src, dest, unsafe_writes=False):
'''atomically move src to dest, copying attributes from dest, returns true on success
it uses os.rename to ensure this as it is an atomic operation, rest of the function is
to work around limitations, corner cases and ensure selinux context is saved if possible'''
context = None
dest_stat = None
b_src = to_bytes(src, errors='surrogate_or_strict')
b_dest = to_bytes(dest, errors='surrogate_or_strict')
if os.path.exists(b_dest):
try:
dest_stat = os.stat(b_dest)
# copy mode and ownership
os.chmod(b_src, dest_stat.st_mode & PERM_BITS)
os.chown(b_src, dest_stat.st_uid, dest_stat.st_gid)
# try to copy flags if possible
if hasattr(os, 'chflags') and hasattr(dest_stat, 'st_flags'):
try:
os.chflags(b_src, dest_stat.st_flags)
except OSError as e:
for err in 'EOPNOTSUPP', 'ENOTSUP':
if hasattr(errno, err) and e.errno == getattr(errno, err):
break
else:
raise
except OSError as e:
if e.errno != errno.EPERM:
raise
if self.selinux_enabled():
context = self.selinux_context(dest)
else:
if self.selinux_enabled():
context = self.selinux_default_context(dest)
creating = not os.path.exists(b_dest)
try:
# Optimistically try a rename, solves some corner cases and can avoid useless work, throws exception if not atomic.
os.rename(b_src, b_dest)
except (IOError, OSError) as e:
if e.errno not in [errno.EPERM, errno.EXDEV, errno.EACCES, errno.ETXTBSY, errno.EBUSY]:
# only try workarounds for errno 18 (cross device), 1 (not permitted), 13 (permission denied)
# and 26 (text file busy) which happens on vagrant synced folders and other 'exotic' non posix file systems
self.fail_json(msg='Could not replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
else:
b_dest_dir = os.path.dirname(b_dest)
# Use bytes here. In the shippable CI, this fails with
# a UnicodeError with surrogateescape'd strings for an unknown
# reason (doesn't happen in a local Ubuntu16.04 VM)
native_dest_dir = b_dest_dir
native_suffix = os.path.basename(b_dest)
native_prefix = b('.ansible_tmp')
error_msg = None
tmp_dest_name = None
try:
tmp_dest_fd, tmp_dest_name = tempfile.mkstemp(prefix=native_prefix, dir=native_dest_dir, suffix=native_suffix)
except (OSError, IOError) as e:
error_msg = 'The destination directory (%s) is not writable by the current user. Error was: %s' % (os.path.dirname(dest), to_native(e))
except TypeError:
# We expect that this is happening because python3.4.x and
# below can't handle byte strings in mkstemp(). Traceback
# would end in something like:
# file = _os.path.join(dir, pre + name + suf)
# TypeError: can't concat bytes to str
error_msg = ('Failed creating temp file for atomic move. This usually happens when using Python3 less than Python3.5. '
'Please use Python2.x or Python3.5 or greater.')
finally:
if error_msg:
if unsafe_writes:
self._unsafe_writes(b_src, b_dest)
else:
self.fail_json(msg=error_msg, exception=traceback.format_exc())
if tmp_dest_name:
b_tmp_dest_name = to_bytes(tmp_dest_name, errors='surrogate_or_strict')
try:
try:
# close tmp file handle before file operations to prevent text file busy errors on vboxfs synced folders (windows host)
os.close(tmp_dest_fd)
# leaves tmp file behind when sudo and not root
try:
shutil.move(b_src, b_tmp_dest_name)
except OSError:
# cleanup will happen by 'rm' of tempdir
# copy2 will preserve some metadata
shutil.copy2(b_src, b_tmp_dest_name)
if self.selinux_enabled():
self.set_context_if_different(
b_tmp_dest_name, context, False)
try:
tmp_stat = os.stat(b_tmp_dest_name)
if dest_stat and (tmp_stat.st_uid != dest_stat.st_uid or tmp_stat.st_gid != dest_stat.st_gid):
os.chown(b_tmp_dest_name, dest_stat.st_uid, dest_stat.st_gid)
except OSError as e:
if e.errno != errno.EPERM:
raise
try:
os.rename(b_tmp_dest_name, b_dest)
except (shutil.Error, OSError, IOError) as e:
if unsafe_writes and e.errno == errno.EBUSY:
self._unsafe_writes(b_tmp_dest_name, b_dest)
else:
self.fail_json(msg='Unable to rename file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Failed to replace file: %s to %s: %s' % (src, dest, to_native(e)),
exception=traceback.format_exc())
finally:
self.cleanup(b_tmp_dest_name)
if creating:
# make sure the file has the correct permissions
# based on the current value of umask
umask = os.umask(0)
os.umask(umask)
os.chmod(b_dest, DEFAULT_PERM & ~umask)
try:
os.chown(b_dest, os.geteuid(), os.getegid())
except OSError:
# We're okay with trying our best here. If the user is not
# root (or old Unices) they won't be able to chown.
pass
if self.selinux_enabled():
# rename might not preserve context
self.set_context_if_different(dest, context, False)
def _unsafe_writes(self, src, dest):
# sadly there are some situations where we cannot ensure atomicity, but only if
# the user insists and we get the appropriate error we update the file unsafely
try:
try:
out_dest = open(dest, 'wb')
in_src = open(src, 'rb')
shutil.copyfileobj(in_src, out_dest)
finally: # assuring closed files in 2.4 compatible way
if out_dest:
out_dest.close()
if in_src:
in_src.close()
except (shutil.Error, OSError, IOError) as e:
self.fail_json(msg='Could not write data to file (%s) from (%s): %s' % (dest, src, to_native(e)),
exception=traceback.format_exc())
def _read_from_pipes(self, rpipes, rfds, file_descriptor):
data = b('')
if file_descriptor in rfds:
data = os.read(file_descriptor.fileno(), 9000)
if data == b(''):
rpipes.remove(file_descriptor)
return data
def run_command(self, args, check_rc=False, close_fds=True, executable=None, data=None, binary_data=False, path_prefix=None, cwd=None,
use_unsafe_shell=False, prompt_regex=None, environ_update=None, umask=None, encoding='utf-8', errors='surrogate_or_strict'):
'''
Execute a command, returns rc, stdout, and stderr.
:arg args: is the command to run
* If args is a list, the command will be run with shell=False.
* If args is a string and use_unsafe_shell=False it will split args to a list and run with shell=False
* If args is a string and use_unsafe_shell=True it runs with shell=True.
:kw check_rc: Whether to call fail_json in case of non zero RC.
Default False
:kw close_fds: See documentation for subprocess.Popen(). Default True
:kw executable: See documentation for subprocess.Popen(). Default None
:kw data: If given, information to write to the stdin of the command
:kw binary_data: If False, append a newline to the data. Default False
:kw path_prefix: If given, additional path to find the command in.
This adds to the PATH environment vairable so helper commands in
the same directory can also be found
:kw cwd: If given, working directory to run the command inside
:kw use_unsafe_shell: See `args` parameter. Default False
:kw prompt_regex: Regex string (not a compiled regex) which can be
used to detect prompts in the stdout which would otherwise cause
the execution to hang (especially if no input data is specified)
:kw environ_update: dictionary to *update* os.environ with
:kw umask: Umask to be used when running the command. Default None
:kw encoding: Since we return native strings, on python3 we need to
know the encoding to use to transform from bytes to text. If you
want to always get bytes back, use encoding=None. The default is
"utf-8". This does not affect transformation of strings given as
args.
:kw errors: Since we return native strings, on python3 we need to
transform stdout and stderr from bytes to text. If the bytes are
undecodable in the ``encoding`` specified, then use this error
handler to deal with them. The default is ``surrogate_or_strict``
which means that the bytes will be decoded using the
surrogateescape error handler if available (available on all
python3 versions we support) otherwise a UnicodeError traceback
will be raised. This does not affect transformations of strings
given as args.
:returns: A 3-tuple of return code (integer), stdout (native string),
and stderr (native string). On python2, stdout and stderr are both
byte strings. On python3, stdout and stderr are text strings converted
according to the encoding and errors parameters. If you want byte
strings on python3, use encoding=None to turn decoding to text off.
'''
if isinstance(args, list):
if use_unsafe_shell:
args = " ".join([shlex_quote(x) for x in args])
shell = True
elif isinstance(args, (binary_type, text_type)):
if use_unsafe_shell:
shell = True
else:
# On python2.6 and below, shlex has problems with text type
# On python3, shlex needs a text type.
if PY2:
args = to_bytes(args, errors='surrogate_or_strict')
elif PY3:
args = to_text(args, errors='surrogateescape')
args = shlex.split(args)
else:
msg = "Argument 'args' to run_command must be list or string"
self.fail_json(rc=257, cmd=args, msg=msg)
shell = False
if use_unsafe_shell:
if executable is None:
executable = os.environ.get('SHELL')
if executable:
args = [executable, '-c', args]
else:
shell = True
prompt_re = None
if prompt_regex:
if isinstance(prompt_regex, text_type):
if PY3:
prompt_regex = to_bytes(prompt_regex, errors='surrogateescape')
elif PY2:
prompt_regex = to_bytes(prompt_regex, errors='surrogate_or_strict')
try:
prompt_re = re.compile(prompt_regex, re.MULTILINE)
except re.error:
self.fail_json(msg="invalid prompt regular expression given to run_command")
# expand things like $HOME and ~
if not shell:
args = [os.path.expanduser(os.path.expandvars(x)) for x in args if x is not None]
rc = 0
msg = None
st_in = None
# Manipulate the environ we'll send to the new process
old_env_vals = {}
# We can set this from both an attribute and per call
for key, val in self.run_command_environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if environ_update:
for key, val in environ_update.items():
old_env_vals[key] = os.environ.get(key, None)
os.environ[key] = val
if path_prefix:
old_env_vals['PATH'] = os.environ['PATH']
os.environ['PATH'] = "%s:%s" % (path_prefix, os.environ['PATH'])
# If using test-module and explode, the remote lib path will resemble ...
# /tmp/test_module_scratch/debug_dir/ansible/module_utils/basic.py
# If using ansible or ansible-playbook with a remote system ...
# /tmp/ansible_vmweLQ/ansible_modlib.zip/ansible/module_utils/basic.py
# Clean out python paths set by ansiballz
if 'PYTHONPATH' in os.environ:
pypaths = os.environ['PYTHONPATH'].split(':')
pypaths = [x for x in pypaths
if not x.endswith('/ansible_modlib.zip') and
not x.endswith('/debug_dir')]
os.environ['PYTHONPATH'] = ':'.join(pypaths)
if not os.environ['PYTHONPATH']:
del os.environ['PYTHONPATH']
# create a printable version of the command for use
# in reporting later, which strips out things like
# passwords from the args list
to_clean_args = args
if PY2:
if isinstance(args, text_type):
to_clean_args = to_bytes(args)
else:
if isinstance(args, binary_type):
to_clean_args = to_text(args)
if isinstance(args, (text_type, binary_type)):
to_clean_args = shlex.split(to_clean_args)
clean_args = []
is_passwd = False
for arg in (to_native(a) for a in to_clean_args):
if is_passwd:
is_passwd = False
clean_args.append('********')
continue
if PASSWD_ARG_RE.match(arg):
sep_idx = arg.find('=')
if sep_idx > -1:
clean_args.append('%s=********' % arg[:sep_idx])
continue
else:
is_passwd = True
arg = heuristic_log_sanitize(arg, self.no_log_values)
clean_args.append(arg)
clean_args = ' '.join(shlex_quote(arg) for arg in clean_args)
if data:
st_in = subprocess.PIPE
kwargs = dict(
executable=executable,
shell=shell,
close_fds=close_fds,
stdin=st_in,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
# store the pwd
prev_dir = os.getcwd()
# make sure we're in the right working directory
if cwd and os.path.isdir(cwd):
cwd = os.path.abspath(os.path.expanduser(cwd))
kwargs['cwd'] = cwd
try:
os.chdir(cwd)
except (OSError, IOError) as e:
self.fail_json(rc=e.errno, msg="Could not open %s, %s" % (cwd, to_native(e)),
exception=traceback.format_exc())
old_umask = None
if umask:
old_umask = os.umask(umask)
try:
if self._debug:
self.log('Executing: ' + clean_args)
cmd = subprocess.Popen(args, **kwargs)
# the communication logic here is essentially taken from that
# of the _communicate() function in ssh.py
stdout = b('')
stderr = b('')
rpipes = [cmd.stdout, cmd.stderr]
if data:
if not binary_data:
data += '\n'
if isinstance(data, text_type):
data = to_bytes(data)
cmd.stdin.write(data)
cmd.stdin.close()
while True:
rfds, wfds, efds = select.select(rpipes, [], rpipes, 1)
stdout += self._read_from_pipes(rpipes, rfds, cmd.stdout)
stderr += self._read_from_pipes(rpipes, rfds, cmd.stderr)
# if we're checking for prompts, do it now
if prompt_re:
if prompt_re.search(stdout) and not data:
if encoding:
stdout = to_native(stdout, encoding=encoding, errors=errors)
else:
stdout = stdout
return (257, stdout, "A prompt was encountered while running a command, but no input data was specified")
# only break out if no pipes are left to read or
# the pipes are completely read and
# the process is terminated
if (not rpipes or not rfds) and cmd.poll() is not None:
break
# No pipes are left to read but process is not yet terminated
# Only then it is safe to wait for the process to be finished
# NOTE: Actually cmd.poll() is always None here if rpipes is empty
elif not rpipes and cmd.poll() is None:
cmd.wait()
# The process is terminated. Since no pipes to read from are
# left, there is no need to call select() again.
break
cmd.stdout.close()
cmd.stderr.close()
rc = cmd.returncode
except (OSError, IOError) as e:
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(e)))
self.fail_json(rc=e.errno, msg=to_native(e), cmd=clean_args)
except Exception as e:
self.log("Error Executing CMD:%s Exception:%s" % (clean_args, to_native(traceback.format_exc())))
self.fail_json(rc=257, msg=to_native(e), exception=traceback.format_exc(), cmd=clean_args)
# Restore env settings
for key, val in old_env_vals.items():
if val is None:
del os.environ[key]
else:
os.environ[key] = val
if old_umask:
os.umask(old_umask)
if rc != 0 and check_rc:
msg = heuristic_log_sanitize(stderr.rstrip(), self.no_log_values)
self.fail_json(cmd=clean_args, rc=rc, stdout=stdout, stderr=stderr, msg=msg)
# reset the pwd
os.chdir(prev_dir)
if encoding is not None:
return (rc, to_native(stdout, encoding=encoding, errors=errors),
to_native(stderr, encoding=encoding, errors=errors))
return (rc, stdout, stderr)
def append_to_file(self, filename, str):
filename = os.path.expandvars(os.path.expanduser(filename))
fh = open(filename, 'a')
fh.write(str)
fh.close()
def bytes_to_human(self, size):
return bytes_to_human(size)
# for backwards compatibility
pretty_bytes = bytes_to_human
def human_to_bytes(self, number, isbits=False):
return human_to_bytes(number, isbits)
#
# Backwards compat
#
# In 2.0, moved from inside the module to the toplevel
is_executable = is_executable
def get_module_path():
return os.path.dirname(os.path.realpath(__file__))
| SteveHNH/ansible | lib/ansible/module_utils/basic.py | Python | gpl-3.0 | 112,311 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
"""
core.sshd_guard.guard
~~~~~~~~~~~~~~~~~~~~~
Watch SSHD's event and make honeypots.
:author: lightless <root@lightless.me>
:homepage: https://github.com/LiGhT1EsS/kokkuri
:license: GPL-3.0, see LICENSE for more details.
:copyright: Copyright (c) 2017 lightless. All rights reserved
"""
import time
import queue
import datetime
import threading
from models import Session, KokkuriSSHEvent, KokkuriSSHPot
from config import server_config, mmap
from utils import logger
class SSHDGuard(object):
def __init__(self):
super(SSHDGuard, self).__init__()
self._exit_flag = False
self.evil_task_queue = queue.Queue()
self.docker_pot = mmap.docker_pot
self.make_pots_thread = threading.Thread(target=self.__make_pots_thread, name="MakePotsThread")
self.make_pots_thread.start()
self.guard_thread = threading.Thread(target=self.__guard_thread, name="SSHDGuardThread")
self.guard_thread.start()
def stop(self):
self._exit_flag = True
def __guard_thread(self):
logger.info("SSHD Guard start.")
session = Session()
while not self._exit_flag:
logger.debug("Checking...")
# 拿出刚刚1分钟内的登录失败记录
current_time = datetime.datetime.now()
last_time = datetime.datetime.now() - datetime.timedelta(minutes=2)
logger.debug("current_time")
logger.debug(current_time)
logger.debug("last_time")
logger.debug(last_time)
ssh_event_qs = session.query(KokkuriSSHEvent).filter(
KokkuriSSHEvent.is_deleted == 0, KokkuriSSHEvent.created_time < current_time,
KokkuriSSHEvent.created_time > last_time, KokkuriSSHEvent.result == 0
).all()
session.commit()
analyze_dict = dict()
logger.debug("ssh event qs")
logger.debug(ssh_event_qs)
# 计算每个来源IP失败的次数
for qs in ssh_event_qs:
source_ip = qs.source_ip
if source_ip not in analyze_dict.keys():
analyze_dict[source_ip] = 0
analyze_dict[source_ip] += 1
logger.debug(analyze_dict)
# todo: 次数改为可配置
# todo: 跳过本地回环
# 将失败5次以上的IP扔到队列中,等待蜜罐线程处理
evil_ip = list()
for ip, fail_count in analyze_dict.items():
if fail_count >= 5:
evil_ip.append(ip)
if len(evil_ip) != 0:
self.evil_task_queue.put(evil_ip)
# 进入sleep状态
time.sleep(5)
def __make_pots_thread(self):
db_session = Session()
while not self._exit_flag:
if self.evil_task_queue.empty():
time.sleep(5)
evil_ip = self.evil_task_queue.get()
logger.info("Get An Evil IP: {0}".format(evil_ip))
for ip in evil_ip:
# 检查这个attacker ip是不是已经绑定了蜜罐了
qs = db_session.query(KokkuriSSHPot).filter(KokkuriSSHPot.attacker_ip == ip).first()
if qs:
continue
container_name = "{0}_honeypot".format(ip.replace(".", "_"))
container_id, ssh_port = self.docker_pot.create_container(container_name)
qs = KokkuriSSHPot(
container_name=container_name, container_id=container_id,
pot_ip=server_config.HONEYPOT_IP, attacker_ip=ip, ssh_port=ssh_port, status=1
)
db_session.add(qs)
db_session.commit()
| LiGhT1EsS/kokkuri | Kokkuri-server/core/sshd_guard/guard.py | Python | gpl-3.0 | 3,844 |
# -*- coding: utf-8 -*-
from flask import Flask
from flask import render_template, jsonify
from random import randint
# Connects the board
from teensyserial import TeensySerial
teensy_board = TeensySerial(115200)
app = Flask(__name__)
@app.route('/data', methods=['GET'])
def get_temp():
"""Get the temperature from sensor and create a JSON file on */data."""
temperature = float(teensy_board.recv())
return jsonify(temperature=temperature)
@app.route('/')
def termo_flask():
"""Render the main page and send the Teensy 2 port info."""
return render_template('termo_flask.html', teensy_port=teensy_board.get_port())
if __name__ == '__main__':
app.run()
teensy_board.finish()
| fazedores/termo-flask | termoflask.py | Python | mit | 709 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.