text stringlengths 4 1.02M | meta dict |
|---|---|
"""
Tests for L{game.network} (Network support for Game).
"""
import numpy
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet.task import Clock
from game.test.util import (
ArrayMixin, PlayerCreationMixin, PlayerVisibilityObserver)
from game.environment import Environment
from game.network import (Direction, Introduce, SetDirectionOf,
NetworkController, NewPlayer, SetMyDirection,
RemovePlayer, GetTerrain, SetTerrain, Terrain)
from game.direction import FORWARD, BACKWARD, LEFT, RIGHT
from game.terrain import (
WATER, GRASS, DESERT, MOUNTAIN, loadTerrainFromString)
from game.vector import Vector
class DirectionArgumentTests(TestCase):
"""
Tests for L{Direction}.
"""
def test_cardinalConversion(self):
"""
The cardinal directions should round trip through the L{Direction}
argument.
"""
argument = Direction()
for direction in (FORWARD, BACKWARD, LEFT, RIGHT):
netrepr = argument.toString(direction)
self.assertIdentical(type(netrepr), str)
self.assertEqual(argument.fromString(netrepr), direction)
def test_intercardinalConversion(self):
"""
The intercardinal directions should round trip through the L{Direction}
argument.
"""
argument = Direction()
for latitudinalSign in (FORWARD, BACKWARD):
for longitudinalSign in (LEFT, RIGHT):
direction = latitudinalSign + longitudinalSign
netrepr = argument.toString(direction)
self.assertIdentical(type(netrepr), str)
self.assertEqual(argument.fromString(netrepr), direction)
def test_stationality(self):
"""
Direction should be able to deal with C{None} as a direction,
which means "not moving".
"""
argument = Direction()
self.assertIdentical(argument.fromString(argument.toString(None)), None)
class CommandTestMixin(object):
"""
Mixin for testcases for serialization and parsing of Commands.
@cvar command: L{Command} subclass to test.
@type argumentObjects: L{dict}
@cvar argumentObjects: The unserialized forms of arguments matching the
argument schema of your Command.
@type argumentStrings: L{dict}
@cvar argumentStrings: The serialized forms of arguments matching the
argument schema of your Command.
@type responseObjects: L{dict}
@cvar responseObjects: The unserialized forms of responses matching the
response schema of your Command.
@type responseStrings: L{dict}
@cvar responseStrings: The serialized forms of responses matching the
response schema of your Command.
"""
def test_makeResponse(self):
"""
C{self.responseObjects} should serialize to C{self.responseStrings}.
"""
box = self.command.makeResponse(self.responseObjects, None)
self.assertEqual(box, self.responseStrings)
def test_parseResponse(self):
"""
C{self.responseStrings} should parse to C{self.responseObjects}.
"""
from twisted.protocols.amp import _stringsToObjects
objects = _stringsToObjects(self.responseStrings,
self.command.response, None)
self.assertEqual(objects, self.responseObjects)
def test_makeArguments(self):
"""
C{self.argumentObjects} should serialize to C{self.argumentStrings}.
"""
from twisted.protocols.amp import _objectsToStrings
strings = _objectsToStrings(self.argumentObjects,
self.command.arguments, {}, None)
self.assertEqual(strings, self.argumentStrings)
def test_parseArguments(self):
"""
Parsing C{self.argumentStrings} should result in
C{self.argumentObjects}.
"""
from twisted.protocols.amp import _stringsToObjects
box = _stringsToObjects(self.argumentStrings,
self.command.arguments, None)
self.assertEqual(box, self.argumentObjects)
def stringifyDictValues(schema):
"""
Return a dict like C{schema} but whose values have been str()ed.
"""
return dict([(k, str(v)) for k, v in schema.items()])
class IntroduceCommandTests(CommandTestMixin, TestCase):
"""
Tests for L{Introduce}.
"""
command = Introduce
responseObjects = {
'identifier': 123,
'granularity': 20,
'speed': 12,
'x': -3.5,
'y': 2.5,
'z': 0.5}
responseStrings = stringifyDictValues(responseObjects)
argumentObjects = argumentStrings = {}
class NewPlayerCommandTests(CommandTestMixin, TestCase):
"""
Tests for L{NewPlayer}.
"""
command = NewPlayer
argumentObjects = {
'identifier': 123,
'x': 505.5,
'y': 23489.5,
'z': -10.5,
'speed': 3999}
argumentStrings = stringifyDictValues(argumentObjects)
responseObjects = responseStrings = {}
class TerrainArgumentTests(TestCase):
"""
Tests for L{Terrain}, an AMP argument serializer for 3d numpy arrays.
"""
data = ''.join(map(chr, range(2 * 3 * 4)))
array = numpy.fromstring(data, 'b').reshape((2, 3, 4))
serialized = {
"voxels-dx": "2",
"voxels-dy": "3",
"voxels-dz": "4",
"voxels-type": "int8",
"voxels-data": data}
del data
def test_toBox(self):
"""
L{Terrain.toBox} serializes the numpy array from C{objects} associated
with the key C{name}. It adds C{I{name}-dx}, C{I{name}-dy}, and
C{I{name}-dz} keys to the C{strings} dict, with values representing the
size of the array in each of those dimensions. It adds a
C{I{name}-type} key with the numpy array element type as a value. It
adds a C{I{name}-data} key with the raw array data as a value.
"""
objects = {"voxels": self.array}
strings = {}
argument = Terrain()
argument.toBox("voxels", strings, objects, None)
self.assertEquals(strings, self.serialized)
def test_fromBox(self):
"""
L{Terrain.fromBox} reads the C{name}-prefixed I{dx}, I{dy}, I{dz},
I{type}, and I{data} keys from the strings dict passed to it and
constructs a numpy array with the indicated shape and type from the
data. The array is put into the objects dict associated with the
C{name} key.
"""
objects = {}
argument = Terrain()
argument.fromBox("voxels", self.serialized, objects, None)
# Can't compare dicts directly, because numpy arrays are weird.
self.assertEquals(objects.keys(), ["voxels"])
self.assertTrue((objects["voxels"] == self.array).all())
class GetTerrainCommandTests(CommandTestMixin, TestCase):
"""
Tests for L{GetTerrain}.
"""
command = GetTerrain
argumentObjects = {'x': 2, 'y': 7, 'z': 13}
argumentStrings = {'x': '2', 'y': '7', 'z': '13'}
responseObjects = responseStrings = {}
class SetTerrainCommandTests(CommandTestMixin, TestCase):
"""
Tests for L{SetTerrain}.
"""
command = SetTerrain
_shape = (1, 2, 10)
_data = range(10) + range(16, 26)
argumentObjects = {
'x': 393,
'y': 292,
'z': 12,
'voxels': numpy.fromstring(
''.join(map(chr, _data)), 'b').reshape(_shape)
}
argumentStrings = {
'x': '393',
'y': '292',
'z': '12',
'voxels-dx': '1',
'voxels-dy': '2',
'voxels-dz': '10',
'voxels-type': 'int8',
'voxels-data': (
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09'
'\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19'),
}
responseObjects = responseStrings = {}
def test_parseArguments(self):
"""
Parsing C{self.argumentStrings} should result in
C{self.argumentObjects}. Duplicated here to implement correct
numpy.array comparison, since we cannot just use C{==}.
"""
from twisted.protocols.amp import _stringsToObjects
box = _stringsToObjects(self.argumentStrings,
self.command.arguments, None)
voxels = box.pop("voxels")
argumentObjects = self.argumentObjects.copy()
expected = argumentObjects.pop("voxels")
self.assertEquals(box, argumentObjects)
self.assertTrue((voxels == expected).all())
class RemovePlayerCommandTests(CommandTestMixin, TestCase):
"""
Tests for L{RemovePlayer}.
"""
command = RemovePlayer
responseObjects = responseStrings = {}
argumentObjects = {'identifier': 123}
argumentStrings = stringifyDictValues(argumentObjects)
class SetMyDirectionTests(CommandTestMixin, TestCase):
"""
Tests for L{SetMyDirection}.
"""
command = SetMyDirection
argumentObjects = {'direction': RIGHT, 'y': 1.5}
argumentStrings = {
'direction': Direction().toString(RIGHT),
'y': '1.5'}
responseObjects = {'x': 32.5, 'y': 939.5, 'z': 5.5}
responseStrings = stringifyDictValues(responseObjects)
class SetDirectionOfTests(CommandTestMixin, TestCase):
"""
Tests for L{SetDirectionOf}.
"""
command = SetDirectionOf
argumentObjects = {
'identifier': 595,
'direction': RIGHT,
'x': 939.5,
'y': -93999.5,
'z': 10.5,
'orientation': 2.25}
argumentStrings = stringifyDictValues(argumentObjects)
argumentStrings['direction'] = Direction().toString(RIGHT)
responseObjects = responseStrings = {}
class ControllerTests(TestCase, PlayerCreationMixin, ArrayMixin):
"""
L{NetworkController} takes network input and makes local changes to model
objects.
@ivar calls: A list of three-tuples consisting of a Deferred, a command
subclass, and a dictionary of keyword arguments representing attempted
Command invocations.
"""
def setUp(self):
self.calls = []
self.identifier = 123
self.player = self.makePlayer(Vector(1, 2, 3))
self.clock = Clock()
self.controller = NetworkController(self.clock)
self.controller.callRemote = self.callRemote
def callRemote(self, commandType, **kw):
"""
Record an attempt to invoke a remote command.
"""
result = Deferred()
self.calls.append((result, commandType, kw))
return result
def test_addModelObject(self):
"""
L{NetworkController.addModelObject} should add a new item to the model
objects mapping.
"""
self.controller.addModelObject(self.identifier, self.player)
self.assertEqual(
self.controller.modelObjects, {self.identifier: self.player})
def test_objectByIdentifier(self):
"""
L{NetworkController.objectByIdentifier} should locate the correct
object based on the mapping it maintains between objects and object
identifiers.
"""
self.controller.addModelObject(self.identifier, self.player)
self.assertIdentical(
self.controller.objectByIdentifier(self.identifier),
self.player)
def test_unknownObjectIdentifier(self):
"""
L{NetworkController.objectByIdentifier} should raise L{KeyError} when
given an identifier which corresponds to no model object.
"""
self.assertRaises(
KeyError,
self.controller.objectByIdentifier, self.identifier)
def test_identifierByObject(self):
"""
L{NetworkController.identifierByObject} should locate the correct
network identifier based on the reverse mapping it maintains between
objects and object identifiers.
"""
self.controller.addModelObject(self.identifier, self.player)
self.controller.addModelObject(321, self.makePlayer(Vector(3, 2, 1)))
self.assertEquals(
self.controller.identifierByObject(self.player),
self.identifier)
def test_unknownObject(self):
"""
L{NetworkController.identifierByObject} should raise L{ValueError} when
given an object with no associated network identifier.
"""
self.assertRaises(
ValueError,
self.controller.identifierByObject, self.player)
def test_setDirectionOf(self):
"""
When L{SetDirectionOf} is issued, the L{Player}'s direction, position,
and orientation should be set.
"""
self.controller.addModelObject(self.identifier, self.player)
responder = self.controller.lookupFunction(SetDirectionOf.commandName)
direction = Direction().toString(FORWARD)
x, y, z = (234.5, 5985.5, 12.5)
orientation = 3.5
d = responder({
'identifier': str(self.identifier),
'direction': direction,
'x': str(x),
'y': str(y),
'z': str(z),
'orientation': str(orientation)})
def gotDirectionSetting(ign):
self.assertEquals(self.player.direction, FORWARD)
self.assertEquals(self.player.getPosition(), Vector(x, y, z))
self.assertEquals(self.player.orientation.y, orientation)
d.addCallback(gotDirectionSetting)
return d
def _assertThingsAboutPlayerCreation(self, environment, position, speed):
player = self.controller.modelObjects[self.identifier]
self.assertEqual(player.getPosition(), position)
self.assertEqual(player.speed, speed)
self.assertEqual(player.seconds, environment.seconds)
self.assertIdentical(environment.initialPlayer, player)
def test_createInitialPlayer(self):
"""
L{NetworkController._createInitialPlayer} should create the player
object for this client.
"""
x, y, z = (3, 2, 12)
speed = 40
granularity = 22
environment = Environment(granularity, self.clock)
observer = PlayerVisibilityObserver()
environment.addObserver(observer)
self.controller.createInitialPlayer(
environment, self.identifier, Vector(x, y, z), speed)
self.assertEqual(len(observer.createdPlayers), 1)
self._assertThingsAboutPlayerCreation(
environment, Vector(x, y, z), speed)
def test_greetServer(self):
"""
L{NetworkController.introduce} should send an L{Introduce} command to
the server and handle the result by populating its model mapping with a
new entry.
"""
self.controller.modelObjects.clear()
x, y, z = (3, 2, -5)
speed = 40
granularity = 22
introduced = self.controller.introduce()
self.assertEqual(len(self.calls), 1)
result, command, kw = self.calls.pop()
self.assertIdentical(command, Introduce)
self.assertEqual(kw, {})
self.assertEqual(self.controller.modelObjects, {})
self.assertIdentical(self.controller.environment, None)
result.callback({'identifier': self.identifier,
'granularity': granularity,
'speed': speed,
'x': x,
'y': y,
'z': z})
self._assertThingsAboutPlayerCreation(
self.controller.environment, Vector(x, y, z), speed)
self.assertIsInstance(self.controller.environment, Environment)
self.assertEquals(self.controller.environment.granularity, granularity)
self.assertEquals(self.controller.environment.platformClock, self.clock)
self.assertIdentical(
self.controller.environment.network, self.controller)
introduced.addCallback(
self.assertIdentical, self.controller.environment)
return introduced
def test_movementDirectionChanged(self):
"""
Change of direction of movement by model objects should be translated
into a network call by L{NetworkController}.
"""
self.controller.addModelObject(self.identifier, self.player)
self.player.orientation.y = 2.0
self.player.setDirection(FORWARD)
self.assertEqual(len(self.calls), 1)
result, command, kw = self.calls.pop(0)
self.assertIdentical(command, SetMyDirection)
self.assertEqual(kw, {"direction": FORWARD, "y": 2.0})
def test_orientationDirectionChanged(self):
"""
Change of direction of orientation by model objects should be translated
into a network call by L{NetworkController}.
"""
self.controller.addModelObject(self.identifier, self.player)
self.player.turn(0.0, 1.5)
self.assertEqual(len(self.calls), 1)
result, command, kw = self.calls.pop(0)
self.assertIdentical(command, SetMyDirection)
self.assertEqual(kw, {"direction": None, "y": 1.5})
def test_directionChangedResponse(self):
"""
When the server responds to a L{SetMyDirection} command with new a
position, the L{NetworkController} should update the L{Player}'s
position.
"""
self.controller.directionChanged(self.player)
self.assertEquals(len(self.calls), 1)
x, y, z = (123, 5398, 10.5)
self.calls[0][0].callback({"x": x, "y": y, "z": z})
self.assertEqual(self.player.getPosition(), Vector(x, y, z))
def test_newPlayer(self):
"""
L{NetworkController} should respond to L{NewPlayer} commands
by introducing a new L{Player} object to the L{Environment}
and registering the L{Player}'s identifier.
"""
observer = PlayerVisibilityObserver()
self.controller.environment = Environment(10, self.clock)
self.controller.environment.addObserver(observer)
responder = self.controller.lookupFunction(NewPlayer.commandName)
x, y, z = (3, 500, 5)
speed = 999
d = responder({
"identifier": "123", "x": str(x), "y": str(y), "z": str(z),
"speed": str(speed)})
def gotResult(ign):
self.assertEqual(len(observer.createdPlayers), 1)
player = observer.createdPlayers[0]
self.assertEqual(player.getPosition(), Vector(x, y, z))
self.assertEqual(player.speed, speed)
obj = self.controller.objectByIdentifier(
self.controller.identifierByObject(player))
self.assertIdentical(obj, player)
d.addCallback(gotResult)
return d
# XXX Fix test name and clarify docstring.
def test_newPlayer2(self):
"""
The L{NewPlayer} responder should not cause the
L{NetworkController} to observe the new player.
"""
self.controller.environment = Environment(10, self.clock)
responder = self.controller.lookupFunction(NewPlayer.commandName)
identifier = 123
d = responder({"identifier": str(identifier),
"x": "1", "y": "2", "z": "3", "speed": "99"})
def gotResult(ign):
player = self.controller.objectByIdentifier(identifier)
player.setDirection(RIGHT)
self.assertEqual(self.calls, [])
d.addCallback(gotResult)
return d
def test_removePlayer(self):
"""
L{NetworkController} should respond to L{RemovePlayer}
commands by removing the identified L{Player} object from the
L{Environment} and forgetting the L{Player}'s identifier.
"""
environment = Environment(10, self.clock)
self.controller.environment = environment
observer = PlayerVisibilityObserver()
environment.addObserver(observer)
identifier = 123
self.controller.newPlayer(identifier, 23, 32, 13, 939)
responder = self.controller.lookupFunction(RemovePlayer.commandName)
d = responder({"identifier": str(identifier)})
def gotResult(ignored):
self.assertEqual(observer.removedPlayers, observer.createdPlayers)
self.assertRaises(
KeyError,
self.controller.objectByIdentifier, identifier)
d.addCallback(gotResult)
return d
def test_setTerrainAtOrigin(self):
"""
L{NetworkController} should respond to the L{SetTerrain}
command by updating its terrain model with the received data.
"""
environment = self.controller.environment = Environment(10, self.clock)
responder = self.controller.lookupFunction(SetTerrain.commandName)
terrainData = loadTerrainFromString('G')
terrainObjects = dict(x=0, y=0, z=0, voxels=terrainData)
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(), {(0, 0, 0): GRASS})
d.addCallback(gotResult)
return d
def test_overwriteTerrain(self):
"""
When L{NetworkController} receives a L{SetTerrain} which overlaps with
existing terrain data, the existing data is overwritten.
"""
environment = self.controller.environment = Environment(10, self.clock)
environment.terrain.set(0, 0, 0, loadTerrainFromString('G'))
responder = self.controller.lookupFunction(SetTerrain.commandName)
terrainObjects = dict(x=0, y=0, z=0, voxels=loadTerrainFromString('M'))
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(), {(0, 0, 0): MOUNTAIN})
d.addCallback(gotResult)
return d
def test_extendX(self):
"""
When L{NetworkController} receives terrain in a L{SetTerrain} command
which extends beyond the current maximum X coordinate, the terrain array
is extended in the X direction to contain it.
"""
environment = self.controller.environment = Environment(10, self.clock)
environment.terrain.set(0, 0, 0, loadTerrainFromString('DG'))
responder = self.controller.lookupFunction(SetTerrain.commandName)
terrainObjects = dict(x=3, y=0, z=0, voxels=loadTerrainFromString('W'))
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(),
{(0, 0, 0): DESERT, (1, 0, 0): GRASS, (3, 0, 0): WATER})
d.addCallback(gotResult)
return d
def test_extendY(self):
"""
When L{NetworkController} receives terrain in a L{SetTerrain} command
which extends beyond the current maximum Y coordinate, the terrain array
is extended in the Y direction to contain it.
"""
environment = self.controller.environment = Environment(10, self.clock)
environment.terrain.set(0, 0, 0, loadTerrainFromString('D\n\nG'))
responder = self.controller.lookupFunction(SetTerrain.commandName)
terrainObjects = dict(x=0, y=3, z=0, voxels=loadTerrainFromString('W'))
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(),
{(0, 0, 0): GRASS, (0, 1, 0): DESERT, (0, 3, 0): WATER})
d.addCallback(gotResult)
return d
def test_extendZ(self):
"""
When L{NetworkController} receives terrain in a L{SetTerrain} command
which extends beyond the current maximum Z coordinate, the terrain array
is extended in the Z direction to contain it.
"""
environment = self.controller.environment = Environment(10, self.clock)
environment.terrain.set(0, 0, 0, loadTerrainFromString('D\nG'))
responder = self.controller.lookupFunction(SetTerrain.commandName)
terrainObjects = dict(x=0, y=0, z=3, voxels=loadTerrainFromString('W'))
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(),
{(0, 0, 0): DESERT, (0, 0, 1): GRASS, (0, 0, 3): WATER})
d.addCallback(gotResult)
return d
def test_preserveX(self):
"""
When L{NetworkController} receives terrain in a L{SetTerrain} command
which doesn't extend as far in the X direction as the existing terrain
data, the existing terrain data beyond the new terrain data in the X
direction is preserved.
"""
environment = self.controller.environment = Environment(10, self.clock)
environment.terrain.set(0, 0, 0, loadTerrainFromString('DG\n\nMW'))
responder = self.controller.lookupFunction(SetTerrain.commandName)
# XXX Would be better if this terrain type were unique to ensure the
# right data ends up in the right place. But we're out of terrain types
# for now.
terrainObjects = dict(x=0, y=2, z=0, voxels=loadTerrainFromString('D'))
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(),
{(0, 0, 0): MOUNTAIN, (1, 0, 0): WATER,
(0, 1, 0): DESERT, (1, 1, 0): GRASS,
(0, 2, 0): DESERT})
d.addCallback(gotResult)
return d
def test_preserveY(self):
"""
When L{NetworkController} receives terrain in a L{SetTerrain} command
which doesn't extend as far in the Y direction as the existing terrain
data, the existing terrain data beyond the new terrain data in the Y
direction is preserved.
"""
environment = self.controller.environment = Environment(10, self.clock)
environment.terrain.set(0, 0, 0, loadTerrainFromString('DG\n\nMW'))
responder = self.controller.lookupFunction(SetTerrain.commandName)
# XXX Unique terrain type
terrainObjects = dict(x=2, y=0, z=0, voxels=loadTerrainFromString('M'))
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(),
{(0, 0, 0): MOUNTAIN, (1, 0, 0): WATER,
(0, 1, 0): DESERT, (1, 1, 0): GRASS,
(2, 0, 0): MOUNTAIN})
d.addCallback(gotResult)
return d
def test_preserveZ(self):
"""
When L{NetworkController} receives terrain in a L{SetTerrain} command
which doesn't extend as far in the Z direction as the existing terrain
data, the existing terrain data beyond the new terrain data in the Z
direction is preserved.
"""
environment = self.controller.environment = Environment(10, self.clock)
environment.terrain.set(0, 0, 0, loadTerrainFromString('DG\nMW'))
responder = self.controller.lookupFunction(SetTerrain.commandName)
# XXX Unique terrain type
terrainObjects = dict(x=2, y=0, z=0, voxels=loadTerrainFromString('G'))
terrainStrings = SetTerrain.makeArguments(terrainObjects, None)
d = responder(terrainStrings)
def gotResult(ignored):
self.assertEquals(
environment.terrain.dict(),
{(0, 0, 0): DESERT, (1, 0, 0): GRASS,
(0, 0, 1): MOUNTAIN, (1, 0, 1): WATER,
(2, 0, 0): GRASS})
d.addCallback(gotResult)
return d
| {
"content_hash": "b79393077da79d11b59254908d9d0d23",
"timestamp": "",
"source": "github",
"line_count": 788,
"max_line_length": 80,
"avg_line_length": 35.81979695431472,
"alnum_prop": 0.6233968681357613,
"repo_name": "eriknelson/gam3",
"id": "f8982ec9ec2d6c07cc3a4b9389c2200ee6506689",
"size": "28226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/test/test_network.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "215796"
}
],
"symlink_target": ""
} |
"""CodeShare URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.conf.urls import include
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^music/', include('music.urls')),
url(r'^CodeShareIo/', include('CodeShareIo.urls')),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | {
"content_hash": "26999a01ef09727ccdfb1a2b6acc7a2a",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 82,
"avg_line_length": 38.733333333333334,
"alnum_prop": 0.7185886402753873,
"repo_name": "xSash/SocialMediaProject",
"id": "791e6f13ebecc19d6461ae2a7799f93d3562a89f",
"size": "1162",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CodeShare/CodeShare/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2913756"
},
{
"name": "HTML",
"bytes": "288627"
},
{
"name": "JavaScript",
"bytes": "2512273"
},
{
"name": "Python",
"bytes": "25536"
}
],
"symlink_target": ""
} |
import SimpleHTTPServer
import threading
import SocketServer
import datetime
import urllib
import json
from core.term_test import TermTestManage
from core.flow import Flow
#import chronic
#from pprint import pprint
#import hadoop.hadoopget
DATE_FORMAT = '%Y-%m-%d'
TERM_FILE = 'data/term_list.txt'
DIM_FILE = 'data/dim_list.txt'
DB_NAME = 'RTBApp'
manage=None
f=None
lock = threading.Lock()
def parse_param(s):
param = {}
content = s.split('&')
for text in content:
print text
key, value = text.split('=')
param[key] = value
return param
def exec_result(length, data):
value_list = []
value = f.total_current()
#with chronic.Timer('exec_result'):
for i in range(length):
value_list.append(str(manage.estimate(term_map=data, total=value)))
value = f.future(value, i)
# 计算单次执行时间
#pprint(chronic.timings)
#print ','.join(value_list)
return ','.join(value_list)
def load():
global manage, f
# 锁定
lock.acquire()
manage = TermTestManage()
now = datetime.datetime.now()
day = now.strftime(DATE_FORMAT)
f = Flow(day, DB_NAME)
# 加载基础维度
fo = open(DIM_FILE, 'r')
lines = fo.readlines()
manage.load_dim(lines)
fo.close()
# 加载修正组合
fo = open(TERM_FILE, 'r')
lines = fo.readlines()
manage.load(lines)
fo.close()
#释放
lock.release()
class HTTPHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
return
def do_POST(self):
print "do_POST: thread",threading.current_thread().getName()
length = int(self.headers.getheader('content-length'))
content = urllib.unquote(self.rfile.read(length))
# param = parse_param(content)
param = json.loads(content)
#param['data'] = json.loads(param['data'])
print param
if 'reload' in param:
load()
return
if 'data' not in param:
return
elif 'length' in param:
self.wfile.write(exec_result(param['length'], param['data']))
#self.wfile.write("response")
return
else:
self.wfile.write(exec_result(1, param['data']))
return
if __name__ == '__main__':
PORT = 9112
httpd = SocketServer.TCPServer(("", PORT), HTTPHandler)
print "serving at port", PORT
load()
print "==========load finish=========="
print "MAIN: thread",threading.current_thread().getName()
httpd.serve_forever() | {
"content_hash": "5e0de194794ee95daa79c1bc82db3701",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 75,
"avg_line_length": 22.693693693693692,
"alnum_prop": 0.6010321556173085,
"repo_name": "gjgj821/traffic_prediction",
"id": "19968493eeda49a228daf042eadf3ade6b753d02",
"size": "2600",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "12854"
},
{
"name": "Python",
"bytes": "73229"
}
],
"symlink_target": ""
} |
"""
Fast cryptographic hash of Python objects, with a special case for fast
hashing of numpy arrays.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Copyright (c) 2009 Gael Varoquaux
# License: BSD Style, 3 clauses.
import warnings
import pickle
import hashlib
import sys
import types
import struct
import io
if sys.version_info[0] < 3:
Pickler = pickle.Pickler
else:
Pickler = pickle._Pickler
class _ConsistentSet(object):
""" Class used to ensure the hash of Sets is preserved
whatever the order of its items.
"""
def __init__(self, set_sequence):
self._sequence = sorted(set_sequence)
class _MyHash(object):
""" Class used to hash objects that won't normaly pickle """
def __init__(self, *args):
self.args = args
class Hasher(Pickler):
""" A subclass of pickler, to do cryptographic hashing, rather than
pickling.
"""
def __init__(self, hash_name='md5'):
self.stream = io.BytesIO()
Pickler.__init__(self, self.stream, protocol=2)
# Initialise the hash obj
self._hash = hashlib.new(hash_name)
def hash(self, obj, return_digest=True):
try:
self.dump(obj)
except pickle.PicklingError as e:
warnings.warn('PicklingError while hashing %r: %r' % (obj, e))
dumps = self.stream.getvalue()
self._hash.update(dumps)
if return_digest:
return self._hash.hexdigest()
def save(self, obj):
if isinstance(obj, (types.MethodType, type({}.pop))):
# the Pickler cannot pickle instance methods; here we decompose
# them into components that make them uniquely identifiable
if hasattr(obj, '__func__'):
func_name = obj.__func__.__name__
else:
func_name = obj.__name__
inst = obj.__self__
if type(inst) == type(pickle):
obj = _MyHash(func_name, inst.__name__)
elif inst is None:
# type(None) or type(module) do not pickle
obj = _MyHash(func_name, inst)
else:
cls = obj.__self__.__class__
obj = _MyHash(func_name, inst, cls)
Pickler.save(self, obj)
# The dispatch table of the pickler is not accessible in Python
# 3, as these lines are only bugware for IPython, we skip them.
def save_global(self, obj, name=None, pack=struct.pack):
# We have to override this method in order to deal with objects
# defined interactively in IPython that are not injected in
# __main__
try:
Pickler.save_global(self, obj, name=name, pack=pack)
except pickle.PicklingError:
Pickler.save_global(self, obj, name=name, pack=pack)
module = getattr(obj, "__module__", None)
if module == '__main__':
my_name = name
if my_name is None:
my_name = obj.__name__
mod = sys.modules[module]
if not hasattr(mod, my_name):
# IPython doesn't inject the variables define
# interactively in __main__
setattr(mod, my_name, obj)
dispatch = Pickler.dispatch.copy()
# builtin
dispatch[type(len)] = save_global
# type
dispatch[type(object)] = save_global
# classobj
dispatch[type(Pickler)] = save_global
# function
dispatch[type(pickle.dump)] = save_global
def _batch_setitems(self, items):
# forces order of keys in dict to ensure consistent hash
Pickler._batch_setitems(self, iter(sorted(items)))
def save_set(self, set_items):
# forces order of items in Set to ensure consistent hash
Pickler.save(self, _ConsistentSet(set_items))
dispatch[type(set())] = save_set
class NumpyHasher(Hasher):
""" Special case the hasher for when numpy is loaded.
"""
def __init__(self, hash_name='md5', coerce_mmap=False):
"""
Parameters
----------
hash_name: string
The hash algorithm to be used
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
objects.
"""
self.coerce_mmap = coerce_mmap
Hasher.__init__(self, hash_name=hash_name)
# delayed import of numpy, to avoid tight coupling
import numpy as np
self.np = np
if hasattr(np, 'getbuffer'):
self._getbuffer = np.getbuffer
else:
self._getbuffer = memoryview
def save(self, obj):
""" Subclass the save method, to hash ndarray subclass, rather
than pickling them. Off course, this is a total abuse of
the Pickler class.
"""
if isinstance(obj, self.np.ndarray) and not obj.dtype.hasobject:
# Compute a hash of the object:
try:
self._hash.update(self._getbuffer(obj))
except (TypeError, BufferError):
# Cater for non-single-segment arrays: this creates a
# copy, and thus aleviates this issue.
# XXX: There might be a more efficient way of doing this
self._hash.update(self._getbuffer(obj.flatten()))
# We store the class, to be able to distinguish between
# Objects with the same binary content, but different
# classes.
if self.coerce_mmap and isinstance(obj, self.np.memmap):
# We don't make the difference between memmap and
# normal ndarrays, to be able to reload previously
# computed results with memmap.
klass = self.np.ndarray
else:
klass = obj.__class__
# We also return the dtype and the shape, to distinguish
# different views on the same data with different dtypes.
# The object will be pickled by the pickler hashed at the end.
obj = (klass, ('HASHED', obj.dtype, obj.shape, obj.strides))
Hasher.save(self, obj)
def hash(obj, hash_name='md5', coerce_mmap=False):
""" Quick calculation of a hash to identify uniquely Python objects
containing numpy arrays.
Parameters
-----------
hash_name: 'md5' or 'sha1'
Hashing algorithm used. sha1 is supposedly safer, but md5 is
faster.
coerce_mmap: boolean
Make no difference between np.memmap and np.ndarray
"""
if 'numpy' in sys.modules:
hasher = NumpyHasher(hash_name=hash_name, coerce_mmap=coerce_mmap)
else:
hasher = Hasher(hash_name=hash_name)
return hasher.hash(obj)
| {
"content_hash": "19ea99c6c0abb4c5a44fe1040dd8adf4",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 75,
"avg_line_length": 34.66326530612245,
"alnum_prop": 0.5787459523108626,
"repo_name": "jmargeta/scikit-learn",
"id": "01e90aea4718e0385d3b9c66f83eb324ca17d151",
"size": "6794",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sklearn/externals/joblib/hashing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
""" Resource PDF Tools
@see: U{B{I{S3XRC}} <http://eden.sahanafoundation.org/wiki/S3XRC>}
@requires: U{B{I{ReportLab}} <http://www.reportlab.com>}
######################################################################
DEPRECATION WARNING
This class is being replaced by the S3RL_PDF codec
Initially the reporting features will be replaced, with the OCR
process being removed at a later stage.
######################################################################
@copyright: 2011-12 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3PDF"]
import re
import os
import sys
import math
import subprocess
import unicodedata
from copy import deepcopy
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from datetime import datetime, timedelta, date
# Not using soupparser's unescape for now as it adds BeautifulSoup module
# to the dependency list for just one utility
#from lxml.html.soupparser import unescape
from htmlentitydefs import name2codepoint
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import *
from gluon.storage import Storage
from gluon.contenttype import contenttype
from gluon.languages import lazyT
try:
from lxml import etree
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
from s3method import S3Method
from s3utils import S3DateTime
import s3codec
try:
from PIL import Image
from PIL import ImageOps
from PIL import ImageStat
PILImported = True
except(ImportError):
try:
import Image
import ImageOps
import ImageStat
PILImported = True
except(ImportError):
print >> sys.stderr, "S3 Debug: S3PDF: Python Image Library not installed"
PILImported = False
try:
from reportlab.lib.enums import TA_CENTER, TA_RIGHT
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfgen import canvas
from reportlab.lib.fonts import tt2ps
from reportlab.rl_config import canvas_basefontname as _baseFontName
from reportlab.platypus import BaseDocTemplate, SimpleDocTemplate, PageTemplate
from reportlab.platypus.frames import Frame
from reportlab.platypus import Spacer, PageBreak, Paragraph
from reportlab.platypus import Table, TableStyle
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib.units import inch
from reportlab.lib.units import cm
from reportlab.lib import colors
from reportlab.lib.colors import Color
from reportlab.lib.pagesizes import A4, LETTER, landscape, portrait
from reportlab.platypus.flowables import Flowable
reportLabImported = True
except ImportError:
print >> sys.stderr, "S3 Debug: S3PDF: Reportlab not installed"
reportLabImported = False
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3PDF: DEBUG MODE"
def _debug(m):
print >> sys.stderr, "S3PDF: %s" % m
else:
_debug = lambda m: None
# maximum number of options a field can have
MAX_FORM_OPTIONS_LIMIT = 20
# will be loaded with values during S3PDF apply_method
ERROR = Storage()
# *****************************************************************************
def checkDependencies(r):
T = current.T
ERROR = Storage(
PIL_ERROR=T("PIL (Python Image Library) not installed"),
REPORTLAB_ERROR=T("ReportLab not installed"),
)
# Check that the necessary reportLab classes were imported
if not reportLabImported:
r.error(501, ERROR.REPORTLAB_ERROR)
if not PILImported:
r.error(501, ERROR.PIL_ERROR)
# redirect() is not available in this scope
#current.session.error = self.ERROR.REPORTLAB_ERROR
#redirect(URL(extension=""))
if reportLabImported:
class ChangePageTitle(Flowable):
def __init__(self, doc, newTitle):
Flowable.__init__(self)
self.doc = doc
self.title = newTitle
def draw(self):
self.doc.title = self.title
class Overlay(Flowable):
def __init__(self, callback, data):
Flowable.__init__(self)
self.function = callback
self.data = data
def draw(self):
self.function(self.canv, self.data)
class EdenDocTemplate(BaseDocTemplate):
"""
The standard document template for eden reports
It allows for the following page templates:
1) First Page
2) Even Page
3) Odd Page
4) Landscape Page
"""
def setPageTemplates(self,
first,
firstEnd,
even = None,
odd = None,
landscape = None,
):
"""
Determine which page template to use
"""
self.onfirst = first
self.onfirstEnd = firstEnd
if even:
self.oneven = even
else:
self.oneven = first
if odd:
self.onodd = odd
else:
self.onodd = first
if landscape:
self.onlandscape = landscape
else:
self.onlandscape = first
self.needLandscape = False
def handle_pageBegin(self):
"""
Determine which page template to use
"""
self._handle_pageBegin()
if self.needLandscape:
self._handle_nextPageTemplate("landscape")
elif self.page %2 == 1:
self._handle_nextPageTemplate("odd")
else:
self._handle_nextPageTemplate("even")
def build(self, flowables, canvasmaker=canvas.Canvas):
"""
Build the document using the flowables.
Set up the page templates that the document can use
"""
self._calc() # in case we changed margins sizes etc
showBoundary = 0 # for debugging set to 1
frameT = Frame(self.leftMargin,
self.bottomMargin,
self.width,
self.height,
id="body",
showBoundary = showBoundary)
self.addPageTemplates([PageTemplate(id="first",
frames=frameT,
onPage=self.onfirst,
onPageEnd=self.onfirstEnd,
pagesize=self.pagesize),
PageTemplate(id="even",
frames=frameT,
onPage=self.oneven,
onPageEnd=self.onfirstEnd,
pagesize=self.pagesize),
PageTemplate(id="odd",
frames=frameT,
onPage=self.onodd,
onPageEnd=self.onfirstEnd,
pagesize=self.pagesize),
PageTemplate(id="landscape",
frames=frameT,
onPage=self.onlandscape,
pagesize=self.pagesize),
])
BaseDocTemplate.build(self, flowables, canvasmaker=canvasmaker)
class S3PDF(S3Method):
"""
Class to help generate PDF documents.
A typical implementation would be as follows:
exporter = s3base.S3PDF()
return exporter(xrequest, **attr)
Currently this class supports two types of reports:
A List: Typically called from the icon shown in a search
For example inv/warehouse
A Header plus List: Typically called from a button on a form
For example ???
Add additional generic forms to the apply_method() function
For specialist forms a S3PDF() object will need to be created.
See the apply_method() for ideas on how to create a form,
but as a minimum the following structure is required:
pdf = S3PDF()
pdf.newDocument(pdf.defaultTitle(resource))
# Add specific pages here
return pdf.buildDoc()
"""
def apply_method(self, r, **attr):
"""
Apply CRUD methods
@param r: the S3Request
@param attr: dictionary of parameters for the method handler
The attributes that it knows about are:
* componentname
* formname
* list_fields
* report_groupby
* report_hide_comments
@returns: output object to send to the view
"""
import uuid
def getParam(key):
"""
nested function to get the parameters passed into apply_method
@todo find out if this has been done better elsewhere! :(
This will first try and get the argument from the attr parameter,
if it's not here then try self._config()
"""
value = attr.get(key)
if value != None: return value
return self._config(key)
T = current.T
self.ERROR = ERROR = Storage(
NO_RECORDS=T("No records in this resource. Add one more records manually and then retry."),
TESSERACT_ERROR=T("%(app)s not installed. Ask the Server Administrator to install on Server.") % dict(app="Tesseract 3.01"),
EMPTY_OCR_FORM=T("Selected OCR Form has no pages. Use another revision of create a new revision by downloading a new Form."),
INVALID_IMAGE_TYPE=T("Uploaded file(s) are not Image(s). Supported image formats are '.png', '.jpg', '.bmp', '.gif'."),
OCR_DISABLED=T("OCR module is disabled. Ask the Server Administrator to enable it."),
IMAGE_MAGICK_ERROR=T("%(app)s not installed. Ask the Server Administrator to install on Server.") % dict(app="ImageMagick"),
NOT_PDF_FILE=T("Uploaded file is not a PDF file. Provide a Form in valid PDF Format."),
INVALID_PDF=T("Uploaded PDF file has more/less number of page(s) than required. Check if you have provided appropriate revision for your Form as well as check the Form contains appropriate number of pages."),
NO_UTC_OFFSET=T("No UTC offset found. Please set UTC offset in your 'User Profile' details. Example: UTC+0530"),
INVALID_JOBID=T("The provided 'jobuuid' is invalid. The session of Form upload is invalid. You should retry uploading."),
INVALID_FORMID=T("The provided 'formuuid' is invalid. You have selected a Form revision which does not exist on this server."),
UNRECOVERABLE_ERROR=T("The uploaded Form is unreadable, please do manual data entry."),
JOB_COMPLETE=T("This job has already been finished successfully."),
)
self.r = r
checkDependencies(r)
settings = current.deployment_settings
request = current.request
response = current.response
session = current.session
db = current.db
if DEBUG:
content_disposition = "inline"
else:
content_disposition = "attachment"
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
try:
self.logo = os.path.join(request.folder,
settings.get_pdf_logo())
except:
self.logo = None
self.headerBanner = None
method = self.method
statustablename = "ocr_form_status"
callback = getParam("callback")
if callback != None:
title = getParam("formname")
if title == None:
title = self.defaultTitle(self.resource)
header = getParam("header")
if header == None:
header = self.pageHeader
footer = getParam("footer")
if footer == None:
footer = self.pageFooter
filename = getParam("filename")
if filename == None:
filename = title
self.newDocument(title,
header=header,
footer=footer,
filename = filename)
try:
id = r.component_id
if id == None:
id = r.id
except:
try:
id = r.id
except:
id = None
callback(self, id=id)
# Build the document
doc = self.buildDoc()
# Set content type and disposition headers
if response:
response.headers["Content-Type"] = contenttype(".pdf")
response.headers["Content-disposition"] = \
"%s; filename=\"%s\"" % (content_disposition,
self.filename)
# Return the stream
return doc
elif r.http == "GET":
if self.method in ("read", "list"):
# Normal PDF output
# Get the configuration parameters
componentname = getParam("componentname")
title = getParam("formname")
list_fields = getParam("list_fields")
report_groupby = getParam("report_groupby")
report_hide_comments = getParam("report_hide_comments")
filename = getParam("filename")
if filename == None:
filename = title
# Create the document shell
if title == None:
title = self.defaultTitle(self.resource)
self.newDocument(title,
header=self.pageHeader,
footer=self.pageFooter,
filename = filename)
if "report_landscape" in attr:
self.setLandscape()
# get the header details, if appropriate
if "rheader" in attr and attr["rheader"]:
self.extractrHeader(attr["rheader"])
self.addSpacer(3)
elif componentname:
self.addrHeader(self.resource,
list_fields,
report_hide_comments=report_hide_comments)
self.addSpacer(3)
# Add details to the document
if componentname == None:
# Document that only has a resource list
self.addTable(self.resource,
list_fields=list_fields,
report_groupby=report_groupby,
report_hide_comments=report_hide_comments)
else:
# Document that has a resource header and component list
# Get the raw data for the component
ptable = self.resource.table
ctable = db[componentname]
raw_data = []
linkfield = None
for link in ptable._referenced_by:
if link[0] == componentname:
linkfield = link[1]
break
if linkfield != None:
query = ctable[linkfield] == self.record_id
records = db(query).select()
find_fields = []
for component in self.resource.components.values():
find_fields += component.readable_fields()
fields = []
if list_fields:
for lf in list_fields:
for field in find_fields:
if field.name == lf:
fields.append(field)
break
else:
for field in find_fields:
if field.type == "id":
continue
if report_hide_comments and field.name == "comments":
continue
fields.append(field)
if not fields:
fields = [table.id]
label_fields = [f.label for f in fields]
represent = current.manager.represent
for record in records:
data = []
for field in fields:
value = record[field.name]
text = represent(field,
value=value,
strip_markup=True,
non_xml_output=True,
extended_comments=True
)
data.append(text)
raw_data.append(data)
self.addTable(raw_data = raw_data,
list_fields=label_fields)
if "report_footer" in attr:
self.addSpacer(3)
self.extractrHeader(attr["report_footer"])
# Build the document
doc = self.buildDoc()
# Set content type and disposition headers
if response:
response.headers["Content-Type"] = contenttype(".pdf")
response.headers["Content-disposition"] = \
"%s; filename=\"%s\"" % (content_disposition,
self.filename)
# Return the stream
return doc
elif method == "create":
# Create an OCR PDF form
if not current.deployment_settings.has_module("ocr"):
r.error(501, self.ERROR.OCR_DISABLED)
current.s3db.table("ocr_meta")
formUUID = uuid.uuid1()
self.newOCRForm(formUUID)
# Put values
self.OCRPDFManager()
# Build the document
doc = self.buildDoc()
numPages = self.doc.numPages
layoutXML = self.__getOCRLayout()
self.__update_dbmeta(formUUID, layoutXML, numPages)
# Set content type and disposition headers
if response:
response.headers["Content-Type"] = contenttype(".pdf")
response.headers["Content-disposition"] = \
"%s; filename=\"%s\"" % (content_disposition,
self.filename)
# Return the stream
return doc
elif method == "import":
# Render a review UI
if not current.deployment_settings.has_module("ocr"):
r.error(501, self.ERROR.OCR_DISABLED)
authorised = self._permitted(method="create")
if not authorised:
r.unauthorised()
try:
if r.component:
trigger = r.args[3]
else:
trigger = r.args[1]
except(IndexError):
trigger = None
if trigger == "review":
try:
jobuuid = r.vars["jobuuid"]
except(KeyError):
r.error(501, current.manager.ERROR.BAD_REQUEST)
# Check if operation is valid on the given job_uuid
current.s3db.table("ocr_meta")
statustable = db[statustablename]
query = (statustable.job_uuid == jobuuid)
row = db(query).select().first()
if not row:
# No such job
r.error(501, self.ERROR.INVALID_JOBID)
if row.review_status == 1:
# Job has already been reviewed
r.error(501, self.ERROR.JOB_COMPLETE)
# Retrieve meta data
if row.job_has_errors == 1:
#print "error", "1"
job_has_errors = True
else:
#print "error", "0"
job_has_errors = False
self.setuuid = row.image_set_uuid
# Retrieve s3ocrxml
formuuid = row.form_uuid
metatable = db.ocr_meta
row = db(metatable.form_uuid == formuuid).select().first()
if not row:
r.error(501, self.ERROR.INVALID_FORMID)
s3ocrxml_filename = row.s3ocrxml_file
f = open(os.path.join(r.folder,
"uploads",
"ocr_meta",
s3ocrxml_filename),
"rb")
s3ocrxml = f.read()
f.close()
# print etree.tostring(etree.fromstring(s3ocrxml), pretty_print=True)
s3ocrdict = self.__s3ocrxml2dict(s3ocrxml)
# Retrieve the job
import_job = self.resource.import_tree(None, None,
job_id=jobuuid,
commit_job=False,
ignore_errors=True)
s3import_enabled = True
if s3import_enabled:
s3ocrdata = self.__importjob2data(import_job)
else:
# Retrive s3ocr data xml
table = db.ocr_data_xml
query = (table.image_set_uuid == self.setuuid)
row = db(query).select().first()
if not row:
r.error(501, current.manager.ERROR.BAD_RECORD)
s3ocrdataxml_filename = row.data_file
f = open(os.path.join(r.folder,
"uploads",
"ocr_payload",
s3ocrdataxml_filename),
"rb")
s3ocrdataxml = f.read()
f.close()
s3ocrdata = self.__temp_ocrdataxml_parser(s3ocrdataxml)
reviewform = self.__create_review_form(s3ocrdict, s3ocrdata)
return response.render("_ocr_review.html",
dict(reviewform=reviewform)
)
elif trigger == "image":
# Do import job
try:
setuuid = r.vars["setuuid"]
resource_table = r.vars["resource_table"]
field_name = r.vars["field_name"]
except(KeyError):
r.error(501, current.manager.ERROR.BAD_REQUEST)
try:
value = r.vars["value"]
except(KeyError):
value = None
try:
sequence = r.vars["sequence"]
except(KeyError):
r.error(501, current.manager.ERROR.BAD_REQUEST)
# Load ocr tables
current.s3db.table("ocr_meta")
table = db.ocr_field_crops
if value:
query = (table.image_set_uuid == setuuid) & \
(table.resource_table == resource_table) & \
(table.field_name == field_name) & \
(table.value == value)
row = db(query).select().first()
else:
query = (table.image_set_uuid == setuuid) & \
(table.resource_table == resource_table) & \
(table.field_name == field_name) & \
(table.sequence == sequence)
row = db(query).select().first()
if not row:
r.error(501, current.manager.ERROR.BAD_RECORD)
format = row.image_file[-4:]
image_file = open(os.path.join(r.folder,
"uploads",
"ocr_payload",
row.image_file))
image_file_content = image_file.read()
image_file.close()
# Set content type and disposition headers
if response:
response.headers["Content-Type"] = contenttype(format)
response.headers["Content-disposition"] = \
"%s; filename=\"%s\"" % ("inline",
"tempimage%s" % format)
# Return the stream
return image_file_content
elif trigger == "import":
# Do import job
try:
setuuid = r.vars["setuuid"]
except(KeyError):
r.error(501, current.manager.ERROR.BAD_REQUEST)
# Check if operation is valid on the given set_uuid
current.s3db.table("ocr_meta")
statustable = db[statustablename]
query = (statustable.image_set_uuid == setuuid)
row = db(query).select().first()
if row:
# This set of images has already been imported
jobuuid = row.job_uuid
if r.component:
# If component
request_args = request.get("args", ["", ""])
record_id = request_args[0]
component_name = request_args[1]
urlprefix = "%s/%s/%s" % (request.function,
record_id,
component_name)
else:
# Not a component
urlprefix = request.function
redirect(URL(request.controller,
"%s/upload.pdf" % urlprefix,
args="review",
vars={"jobuuid":jobuuid}))
table = db.ocr_data_xml
row = db(table.image_set_uuid == setuuid).select().first()
if not row:
r.error(501, current.manager.ERROR.BAD_RECORD)
data_file = open(os.path.join(r.folder,
"uploads",
"ocr_payload",
row.data_file))
formuuid = row.form_uuid
datafile_content = data_file.read()
data_file.close()
metatable = db.ocr_meta
row = db(metatable.form_uuid == formuuid).select().first()
if not row:
r.error(501, self.ERROR.INVALID_FORMID)
s3ocrxml_filename = row.s3ocrxml_file
f = open(os.path.join(r.folder,
"uploads",
"ocr_meta",
s3ocrxml_filename),
"rb")
s3ocrxml = f.read()
f.close()
s3ocrdict = self.__s3ocrxml2dict(s3ocrxml)
crosslimit_options = {}
for eachresource in s3ocrdict["$resource_seq"]:
resource = s3ocrdict[eachresource]
for eachfield in resource["$field_seq"]:
field = resource[eachfield]
if field.has_options:
if field.options and\
field.options.count > MAX_FORM_OPTIONS_LIMIT:
if not crosslimit_options.has_key(eachresource):
crosslimit_options[eachresource] = [eachfield]
else:
crosslimit_options[eachresource].append(eachfield)
if len(crosslimit_options) != 0:
s3xml_root = etree.fromstring(datafile_content)
resource_element = s3xml_root.getchildren()[0]
resourcename = resource_element.attrib.get("name")
for eachfield in resource_element:
if eachfield.tag == "data":
if crosslimit_options.has_key(resourcename):
fieldname = eachfield.attrib.get("field")
if fieldname in crosslimit_options[resourcename]:
match_status = {}
value = eachfield.text.encode("utf-8").lower()
for eachoption in s3ocrdict[resourcename][fieldname].options.list:
try:
fieldtext = eachoption.label.lower()
except:
fieldtext = ""
match_status[eachoption.value] =\
self.dameraulevenshtein(cast2ascii(fieldtext),
cast2ascii(value))
#print value, fieldtext, match_status[eachoption.value]
closematch_value = 1000000000
closematch = []
for eachmatch in match_status.keys():
if match_status[eachmatch] < closematch_value:
closematch = [eachmatch]
closematch_value = match_status[eachmatch]
elif match_status[eachmatch] == closematch_value:
closematch.append(eachmatch)
if len(closematch) > 0:
value = closematch[0]
else:
value = ""
eachfield.text = value
eachfield.attrib["value"] = value
elif eachfield.tag == "resource":
resourcename = eachfield.attrib.get("name")
for eachsubfield in eachfield:
if eachsubfield.tag == "data":
fieldname = eachsubfield.attrib.get("field")
if resourcename in crosslimit_options.keys() and\
fieldname in crosslimit_options[resourcename]:
match_status = {}
value = eachsubfield.text.encode("utf-8").lower()
for eachoption in s3ocrdict[resourcename][fieldname].options.list:
try:
fieldtext = eachoption.label.lower()
except:
fieldtext = ""
match_status[eachoption.value] =\
self.dameraulevenshtein(cast2ascii(fieldtext),
cast2ascii(value))
#print value, fieldtext, match_status[eachoption.value]
closematch_value = 1000000000
closematch = []
for eachmatch in match_status.keys():
if match_status[eachmatch] < closematch_value:
closematch = [eachmatch]
closematch_value = match_status[eachmatch]
elif match_status[eachmatch] == closematch_value:
closematch.append(eachmatch)
if len(closematch) > 0:
value = closematch[0]
else:
value = ""
eachsubfield.text = value
eachsubfield.attrib["value"] = value
datafile_content = etree.tostring(s3xml_root)
#print datafile_content
# import_xml routine
outputjson = self.resource.import_xml(StringIO(datafile_content),
commit_job=False,
ignore_errors=True)
#print etree.tostring(etree.fromstring(datafile_content), pretty_print=True)
# Get metadata for review
jobuuid = self.resource.job.job_id
json2dict = json.loads(outputjson, strict=False)
if json2dict.has_key("message"):
jobhaserrors = 1
else:
jobhaserrors = 0
# Check status code
if json2dict.get("statuscode") != "200":
r.error(501, self.ERROR.UNRECOVERABLE_ERROR)
# Store metadata for review
db[statustablename].insert(image_set_uuid=setuuid,
form_uuid=formuuid,
job_uuid=jobuuid,
job_has_errors=jobhaserrors)
if r.component:
request_args = request.get("args", ["", ""])
record_id = request_args[0]
component_name = request_args[1]
urlprefix = "%s/%s/%s" % (request.function,
record_id,
component_name)
else:
# Not a component
urlprefix = request.function
redirect(URL(request.controller,
"%s/upload.pdf" % urlprefix,
args="review",
vars={"jobuuid":jobuuid}))
else:
# Render upload UI
# Check if user has UTC offset in his profile
auth = current.auth
if auth.user:
utc_offset = auth.user.utc_offset
else:
r.error(501, self.ERROR.NO_UTC_OFFSET)
# Load OCR tables
current.s3db.table("ocr_meta")
# Create an html image upload form for user
formuuid = r.vars.get("formuuid", None)
uploadformat = r.vars.get("uploadformat", None)
requesturl = request.env.path_info
createurl = "%s/create.pdf" %\
requesturl[0:requesturl.rfind("/")]
if not (formuuid and uploadformat):
availForms = self.__getResourceForms()
return response.render("_ocr_upload.html",
dict(availForms=availForms,
createurl=createurl))
else:
try:
numpages = self.__getNumPages(formuuid)
except:
r.error(501, self.resource.ERROR.BAD_RECORD)
if not numpages:
r.error(501, self.ERROR.EMPTY_OCR_FORM)
return response.render("_ocr_page_upload.html",
dict(numpages=numpages,
posturl=createurl,
formuuid=formuuid,
uploadformat=uploadformat))
numpages = self.__getNumPages(formuuid)
if not numpages:
r.error(501, self.ERROR.EMPTY_OCR_FORM)
return response.render("_ocr_page_upload.html",
dict(numpages=numpages,
posturl=createurl,
formuuid=formuuid,
uploadformat=uploadformat))
else:
r.error(405, current.manager.ERROR.BAD_METHOD)
elif r.http == "POST":
if method == "create":
# Upload scanned OCR images
if not current.deployment_settings.has_module("ocr"):
r.error(501, self.ERROR.OCR_DISABLED)
# Form meta vars
formuuid = r.vars.formuuid
numpages = int(r.vars.numpages)
uploadformat = r.vars.uploadformat
# Set id for given form
setuuid = uuid.uuid1()
current.s3db.table("ocr_meta")
# Check for upload format
if uploadformat == "image":
# store each page into db/disk
payloadtable = db.ocr_payload
for eachpage in xrange(1, numpages+1):
varname = "page%s" % eachpage
fileholder = r.vars[varname]
pagenumber = eachpage
# server side file validation
imgfilename = fileholder.filename
extension = lambda m: m[m.rfind(".") + 1:]
imageformats = ["jpg", "png", "gif", "bmp"]
if extension(imgfilename) not in imageformats:
r.error(501, self.ERROR.INVALID_IMAGE_TYPE)
# store page
payloadtable.insert(
image_set_uuid=setuuid,
image_file=payloadtable["image_file"].store(\
fileholder.file,
fileholder.filename),
page_number=pagenumber)
elif uploadformat == "pdf":
fileholder = r.vars["pdffile"]
# server side file validation
filename = fileholder.filename
extension = lambda m: m[m.rfind(".")+1:]
if extension(filename) != "pdf":
r.error(501, self.ERROR.NOT_PDF_FILE)
# create temp dir to extract the images
uniqueuuid = setuuid # to make it thread safe
inputfilename = "%s_%s" % (uniqueuuid, fileholder.filename)
outputfilename = "%s_%s.png" % (uniqueuuid,
fileholder.filename[:-4])
ocr_temp_dir = os.path.join(self.r.folder,
"uploads", "ocr_temp")
try:
os.mkdir(ocr_temp_dir)
except(OSError):
pass
f = open(os.path.join(ocr_temp_dir, inputfilename), "w")
f.write(fileholder.file.read())
f.close()
success =\
subprocess.call(["convert",
os.path.join(ocr_temp_dir,
inputfilename),
os.path.join(ocr_temp_dir,
outputfilename)])
if success != 0:
self.r.error(501, self.ERROR.IMAGE_MAGICK_ERROR)
# store each page into db/disk
payloadtable = db.ocr_payload
if numpages == 1:
imagefilename = outputfilename
imgfilepath = os.path.join(ocr_temp_dir, imagefilename)
try:
imgfile = open(imgfilepath)
except(IOError):
self.r.error(501, self.ERROR.INVALID_PDF)
pagenumber = 1
# Store page
payloadtable.insert(
image_set_uuid=setuuid,
image_file=payloadtable["image_file"].store(\
imgfile,
imagefilename),
page_number=pagenumber)
imgfile.close()
os.remove(imgfilepath)
else:
for eachpage in xrange(0, numpages):
imagefilename = "%s-%s.png" % (outputfilename[:-4],
eachpage)
imgfilepath = os.path.join(ocr_temp_dir,
imagefilename)
try:
imgfile = open(imgfilepath, "r")
except(IOError):
self.r.error(501, self.ERROR.INVALID_PDF)
pagenumber = eachpage + 1
# Store page
payloadtable.insert(
image_set_uuid=setuuid,
image_file=payloadtable["image_file"].store(\
imgfile,
imagefilename),
page_number=pagenumber)
imgfile.close()
os.remove(imgfilepath)
os.remove(os.path.join(ocr_temp_dir, inputfilename))
try:
os.rmdir(ocr_temp_dir)
except(OSError):
import shutil
shutil.rmtree(ocr_temp_dir)
else:
r.error(501, self.ERROR.INVALID_IMAGE_TYPE)
# OCR it
s3ocrimageparser = S3OCRImageParser(self, r)
output = s3ocrimageparser.parse(formuuid, setuuid)
table = db.ocr_data_xml
table.insert(image_set_uuid=setuuid,
data_file=table["data_file"].store(
StringIO(output),
"%s-data.xml" % setuuid),
form_uuid=formuuid,
)
if r.component:
request_args = current.request.get("args", ["", ""])
record_id = request_args[0]
component_name = request_args[1]
urlprefix = "%s/%s/%s" % (request.function,
record_id,
component_name)
else:
# Not a component
urlprefix = request.function
redirect(URL(request.controller,
"%s/import.pdf" % urlprefix,
args="import",
vars={"setuuid":setuuid}))
elif method == "import":
if not current.deployment_settings.has_module("ocr"):
r.error(501, self.ERROR.OCR_DISABLED)
authorised = self._permitted(method="create")
if not authorised:
r.unauthorised()
try:
if r.component:
trigger = r.args[3]
else:
trigger = r.args[1]
except(IndexError):
trigger = None
if trigger == "review":
# Review UI post
jobuuid = r.vars.pop("jobuuid")
# Check if operation is valid on the given job_uuid
current.s3db.table("ocr_meta")
statustable = db["ocr_form_status"]
query = (statustable.job_uuid == jobuuid)
row = db(query).select().first()
if not row:
r.error(501, self.ERROR.INVALID_JOBID)
if row.review_status == 1:
# Job has already been reviewed
r.error(501, self.ERROR.JOB_COMPLETE)
try:
r.vars.pop("_utc_offset")
except:
pass
try:
ignore_fields = r.vars.pop("ignore-fields-list")
except:
ignore_fields = ""
if (ignore_fields == "") or (not ignore_fields):
ignore_fields = []
else:
try:
ignore_fields = ignore_fields.split("|")
except:
ignore_fields = [ignore_fields]
datadict = Storage()
for eachfield in r.vars.keys():
resourcetable, fieldname = eachfield.split("-")
if not datadict.has_key(resourcetable):
datadict[resourcetable] = Storage()
datadict[resourcetable][fieldname] = r.vars[eachfield]
for eachfield in ignore_fields:
resourcetable, fieldname = eachfield.split("-")
datadict[resourcetable].pop(fieldname)
if len(datadict[resourcetable]) == 0:
datadict.pop(resourcetable)
s3xml_etree_dict = Storage()
for eachresource in datadict.keys():
s3xml_root = etree.Element("s3xml")
resource_element = etree.SubElement(s3xml_root, "resource")
resource_element.attrib["name"] = eachresource
for eachfield in datadict[eachresource].keys():
fieldvalue = datadict[eachresource][eachfield]
fieldvalue = str(fieldvalue) if fieldvalue else ""
fieldtype = db[eachresource][eachfield].type
if fieldtype.startswith("reference "):
reference_resource_name = fieldtype[len("reference "):]
# reference element
reference_element =\
etree.SubElement(resource_element, "reference")
reference_element.attrib["field"] = eachfield
reference_element.attrib["resource"] = reference_resource_name
# resource element
ref_res_element =\
etree.SubElement(reference_element, "resource")
ref_res_element.attrib["name"] = reference_resource_name
# data element
ref_res_data_element =\
etree.SubElement(ref_res_element, "data")
ref_res_data_element.attrib["field"] = "name"
try:
ref_res_data_element.text = cast2ascii(fieldvalue)
except(ValueError):
ref_res_data_element.text = ""
else:
field_element = etree.SubElement(resource_element, "data")
field_element.attrib["field"] = eachfield
try:
field_element.attrib["value"] = cast2ascii(fieldvalue)
except(ValueError):
field_element.attrib["value"] = ""
try:
field_element.text = cast2ascii(fieldvalue)
except(ValueError):
field_element.text = ""
s3xml_etree_dict[eachresource] = s3xml_root
#print etree.tostring(s3xml_root, pretty_print=True)
errordict = {}
_record = current.xml.record
validate = current.manager.validate
s3record_dict = Storage()
for eachtable in s3xml_etree_dict.keys():
record = _record(db[eachtable],
s3xml_etree_dict[eachtable].getchildren()[0])
s3record_dict[eachtable] = record
import_job = r.resource.import_tree(None, None, job_id=jobuuid,
ignore_errors=False,
commit_job=False)
response.headers["Content-Type"] = contenttype(".json")
for eachtable in s3record_dict.keys():
record = s3record_dict[eachtable]
possible_items = []
our_item = None
for eachitem in import_job.items.keys():
item = import_job.items[eachitem]
if item.table == eachtable:
if item.data and (len(item.data) > 0):
our_item = item
else:
if item.data and (len(item.data) == 0):
possible_items.append(item)
if our_item:
our_item.update(record)
elif len(possible_items) > 0:
possible_items[0].update(record)
else:
import_job.add_item(s3xml_etree_dict[eachtable].getchildren()[0])
for eachresource in datadict.keys():
for eachfield in datadict[eachresource].keys():
if not db[eachresource][eachfield].type.startswith("reference "):
value, error =\
validate(db[eachresource],
None, eachfield,
datadict[eachresource][eachfield])
if error:
errordict["%s-%s" %\
(eachresource, eachfield)] = str(error)
if not import_job.error_tree:
store_success = import_job.store()
if store_success:
if import_job.error_tree:
#print etree.tostring(import_job.error_tree, pretty_print=True)
errordict = self.__parse_job_error_tree(import_job.error_tree)
success = False
else:
# Revalidate data
for eachresource in datadict.keys():
for eachfield in datadict[eachresource].keys():
if not db[eachresource][eachfield].type.startswith("reference "):
value, error =\
validate(db[eachresource],
None, eachfield,
datadict[eachresource][eachfield])
if error:
errordict["%s-%s" %\
(eachresource, eachfield)] = str(error)
if len(errordict) > 0:
success = False
else:
success = True
import_job.commit()
else:
#print etree.tostring(import_job.error_tree, pretty_print=True)
errordict = self.__parse_job_error_tree(import_job.error_tree)
success = False
else:
#print etree.tostring(import_job.error_tree, pretty_print=True)
errordict = self.__parse_job_error_tree(import_job.error_tree)
success = False
if success:
session.confirmation =\
T("OCR review data has been stored into the database successfully.")
# Perform cleanup
statustable = db["ocr_form_status"]
query = (statustable.job_uuid == jobuuid)
row = db(query).select(statustable.image_set_uuid).first()
image_set_uuid = row.image_set_uuid
# Set review status = true
db(query).update(review_status=1)
# Remove cropped images from the database
cropstable = db["ocr_field_crops"]
query = (cropstable.image_set_uuid == image_set_uuid)
# Delete uploaded files
rows = db(query).select()
for eachrow in rows:
filename = eachrow.image_file
filepath = os.path.join(self.r.folder,
"uploads",
"ocr_payload",
filename)
os.remove(filepath)
# Delete records
db(query).delete()
return json.dumps({"success": success,
"error": errordict})
else:
r.error(405, current.manager.ERROR.BAD_METHOD)
else:
r.error(501, current.manager.ERROR.BAD_REQUEST)
# End of apply_method()
def __parse_job_error_tree(self, tree):
"""
create a dictionary of fields with errors
@param tree: S3ImportJob.error_tree
@return: errordict
"""
errordict = {}
for eachresource in tree:
resourcename = eachresource.attrib.get("name")
for eachfield in eachresource:
fieldname = eachfield.attrib.get("field")
error = eachfield.attrib.get("error")
if error:
#print resourcename, fieldname
errordict["%s-%s" % (resourcename, fieldname)] = error
return errordict
def dameraulevenshtein(self, seq1, seq2):
"""
Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2
"""
# codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F
# Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.
# However, only the current and two previous rows are needed at once,
# so we only store those.
oneago = None
thisrow = range(1, len(seq2) + 1) + [0]
for x in xrange(len(seq1)):
# Python lists wrap around for negative indices, so put the
# leftmost column at the *end* of the list. This matches with
# the zero-indexed strings and saves extra calculation.
twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in xrange(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
# This block deals with transpositions
if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]
and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):
thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)
return thisrow[len(seq2) - 1]
def __temp_ocrdataxml_parser(self, s3ocrdataxml):
"""
convert data generated from ocr parser to a dictionary
@param s3dataxml: output of S3OCRImageParser
@return: python dictionary equalant to the input xml
"""
s3ocrdataxml_etree = etree.fromstring(s3ocrdataxml)
#print etree.tostring(s3ocrdataxml_etree, pretty_print=True)
s3ocrdatadict = Storage()
s3xml_root = s3ocrdataxml_etree
resource_element = s3xml_root.getchildren()[0]
s3ocr_root = etree.Element("s3ocr")
if self.r.component: # if it is a component
s3ocr_root.append(resource_element)
else: # if it is main resource
componentetrees = []
# mres is main resource etree
mres = etree.Element("resource")
for attr in resource_element.attrib.keys():
mres.set(attr, resource_element.attrib.get(attr))
for field_element in resource_element:
if field_element.tag in ["data", "reference"]: # main resource fields
mres.append(field_element)
elif field_element.tag == "resource": # component resource
componentetrees.append(field_element)
serialised_component_etrees = componentetrees
# create s3ocr tree
s3ocr_root.append(mres)
for res in serialised_component_etrees:
s3ocr_root.append(res)
for eachresource in s3ocr_root:
resourcename = eachresource.attrib.get("name")
s3ocrdatadict[resourcename] = Storage()
for eachfield in eachresource:
if eachfield.tag == "reference":
fieldname = eachfield.attrib.get("field")
ref_res_field = eachfield.getchildren()[0]
datafield = ref_res_field.getchildren()[0]
value = datafield.text
else:
fieldname = eachfield.attrib.get("field")
value = eachfield.attrib.get("value")
text = eachfield.text
if not value:
value = text
s3ocrdatadict[resourcename][fieldname] = value
#print s3ocrdatadict
return s3ocrdatadict
def __importjob2data(self, importjob):
"""
convert data from import job into a dictionary
@param importjob: S3ImportJob instance
@return: data of S3ImportJob into a dictionary
"""
s3ocrdata = Storage()
#print len(importjob.items)
#print importjob.items
#print importjob.error_tree
import_item_dict = importjob.items
for eachitem in import_item_dict.keys():
import_item = import_item_dict[eachitem]
if import_item.data and len(import_item.data) > 0:
s3ocrdata[str(import_item.table)] = import_item.data
return s3ocrdata
def __create_review_form(self, s3ocrdict, s3ocrdata):
"""
create a html review form using the available data
@param s3ocrdict: output of self.__s3ocrxml2dict()
@param s3ocrdict: output of self.__importjob2data()
@return: html review form
"""
ptablecontent = []
fieldnum = 1
request = current.request
T = current.T
r = self.r
setuuid = self.setuuid
if r.component:
request_args = request.get("args",["",""])
record_id = request_args[0]
component_name = request_args[1]
urlprefix = "%s/%s/%s" % (request.function,
record_id,
component_name)
else:
# Not a component
urlprefix = request.function
for eachresource in s3ocrdict["$resource_seq"]:
# resource title
resource = s3ocrdict[eachresource]
ptablecontent.append(TR(TD(DIV(eachresource, _class="resource_name"),
_colspan="4"),
_class="titletr")
)
ctablecontent = []
for eachfield in resource["$field_seq"]:
field = resource[eachfield]
comment = field.comment if field.comment else ""
try:
if s3ocrdata[eachresource][eachfield]:
condition = (isinstance(s3ocrdata[eachresource][eachfield], str) or\
isinstance(s3ocrdata[eachresource][eachfield], int))
if condition:
value = str(s3ocrdata[eachresource][eachfield])
elif isinstance(s3ocrdata[eachresource][eachfield], date):
value = date.strftime(s3ocrdata[eachresource][eachfield], "%Y-%m-%d")
elif isinstance(s3ocrdata[eachresource][eachfield], datetime):
value = datetime.strftime(s3ocrdata[eachresource][eachfield], "%Y-%m-%d %H:%M:%S")
else:
value = unicodedata.normalize("NFKD",
s3ocrdata[eachresource][eachfield]).encode("ascii",
"ignore")
else:
value = ""
except(KeyError):
value=""
name = "%s-%s" % (eachresource, eachfield)
if field.has_options:
if field.type == "multiselect":
if field.options.count <= MAX_FORM_OPTIONS_LIMIT:
options = []
optct = 1
try:
value = value.split("|")[1:-1]
except:
value = [str(value)]
chk = lambda m,n: "on" if str(m) in n else None
for eachoption in field.options.list:
options.append(TR(TD(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"value": eachoption.value
}
),
_style="border: solid #333 1px;"),
_style="text-align:center;"),
TD(INPUT(_id="%s-%s" %\
(name, optct),
_value=eachoption.value,
_type="checkbox",
_class="field-%s" %\
fieldnum,
_name=name,
value=chk(eachoption.value,
value))),
TD(LABEL(eachoption.label,
_for="%s-%s" %\
(name, optct)))))
optct+=1
input_area = TABLE(options,
_class="field-%s" % fieldnum)
else:
for eachline in xrange(1, 3):
ctablecontent.append(TR(TD(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"sequence": eachline
}
),
_style="border: solid #333 1px;"),
_style="text-align:center; padding:5px;",
_colspan="4")))
options = []
optct = 1
chk = lambda m,n: "on" if str(m) in n else None
for eachoption in field.options.list:
options.append(TR(TD(INPUT(_id="%s-%s" %\
(name, optct),
_value=eachoption.value,
_type="checkbox",
_class="field-%s" %\
fieldnum,
_name=name,
value=chk(eachoption.value,
value)
)),
TD(LABEL(eachoption.label,
_for="%s-%s" %\
(name, optct)))))
optct+=1
input_area = TABLE(options,
_class="field-%s" % fieldnum)
elif field.type == "boolean":
options = []
optct = 1
chk = lambda m,n: m if str(m) == str(n) else None
for eachoption in [Storage({"value": "yes",
"label": T("Yes")}),
Storage({"value": "no",
"label": T("No")})]:
options.append(TR(TD(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"value": eachoption.value
}
),
_style="border: solid #333 1px;"),
_style="text-align:center;"),
TD(INPUT(_id="%s-%s" %\
(name, optct),
_value=eachoption.value,
_type="radio",
_class="field-%s" %\
fieldnum,
_name=name,
value=chk(eachoption.value,
value))),
TD(LABEL(eachoption.label,
_for="%s-%s" %\
(name, optct)))))
optct+=1
input_area = TABLE(options,
_class="field-%s" % fieldnum)
else:
if field.options.count <= MAX_FORM_OPTIONS_LIMIT:
options = []
optct = 1
chk = lambda m,n: m if str(m) == str(n) else None
for eachoption in field.options.list:
options.append(TR(TD(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"value": eachoption.value
}
),
_style="border: solid #333 1px;"),
_style="text-align:center;"),
TD(INPUT(_id="%s-%s" %\
(name, optct),
_value=eachoption.value,
_type="radio",
_class="field-%s" %\
fieldnum,
_name=name,
value=chk(eachoption.value,
value))),
TD(LABEL(eachoption.label,
_for="%s-%s" %\
(name, optct)))))
optct+=1
input_area = TABLE(options,
_class="field-%s" % fieldnum)
else:
for eachline in xrange(1, 3):
ctablecontent.append(TR(TD(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"sequence": eachline
}
),
_style="border: solid #333 1px;"),
_style="text-align:center; padding:5px;",
_colspan="4")))
options = []
optct = 1
chk = lambda m,n: m if str(m) == str(n) else None
for eachoption in field.options.list:
options.append(TR(TD(INPUT(_id="%s-%s" %\
(name, optct),
_value=eachoption.value,
_type="radio",
_class="field-%s" %\
fieldnum,
_name=name,
value=chk(eachoption.value,
value)
)),
TD(LABEL(eachoption.label,
_for="%s-%s" %\
(name, optct)))))
optct+=1
input_area = TABLE(options,
_class="field-%s" % fieldnum)
else:
if field.type in ["string", "integer", "double"]:
for eachline in xrange(1, field.lines+1):
ctablecontent.append(TR(TD(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"sequence": eachline
}
),
_style="border: solid #333 1px;"),
_style="text-align:center; padding:5px;",
_colspan="4")))
input_area = INPUT(_id="%s-id" % name.replace("-", "_"),
_class="field-%s" % fieldnum,
_value=value, _name=name)
elif field.type == "date":
subsec = {"DD":1,
"MO":2,
"YYYY":3}
imglist = []
for eachsec in ["YYYY", "MO", "DD"]:
imglist.append(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"sequence": subsec[eachsec]}
),
_style="border: solid #333 1px;"))
ctablecontent.append(TR(TD(imglist,
_style="text-align:center; padding:5px;",
_colspan="4")))
try:
value = value.strftime("%Y-%m-%d")
except(AttributeError):
try:
value = datetime.strptime(value, "%Y-%m-%d")
value = value.strftime("%Y-%m-%d")
except(ValueError):
value = ""
input_area = INPUT(_id="%s-id" % name.replace("-", "_"),
_class="field-%s date" % fieldnum,
_value=value, _name=name)
elif field.type == "datetime":
subsec = {"HH":1,
"MM":2,
"DD":3,
"MO":4,
"YYYY":5}
imglist = []
for eachsec in ["YYYY", "MO", "DD", "HH", "MM"]:
imglist.append(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"sequence": subsec[eachsec],
}
),
_style="border: solid #333 1px;"))
ctablecontent.append(TR(TD(imglist,
_style="text-align:center; padding:5px;",
_colspan="4")))
try:
value = value.strftime("%Y-%m-%d %H:%M:%S")
except(AttributeError):
try:
value = datetime.strptime(value,"%Y-%m-%d %H:%M:%S")
value = value.strftime("%Y-%m-%d %H:%M:%S")
except(ValueError):
value = ""
input_area = INPUT(_id="%s-id" % name.replace("-", "_"),
_class="field-%s datetime" % fieldnum,
_value=value, _name=name)
elif field.type == "textbox":
for eachline in xrange(1, field.lines+1):
ctablecontent.append(TR(TD(IMG(_src=URL(request.application,
r.prefix,
"%s/upload.pdf" % urlprefix,
args="image",
vars={"setuuid": setuuid,
"resource_table": eachresource,
"field_name": eachfield,
"sequence": eachline
}
),
_style="border: solid #333 1px;"),
_style="text-align:center; padding:5px;",
_colspan="4")))
input_area = TEXTAREA(value,
_class="field-%s" % fieldnum,
_name=name)
else:
input_area = SPAN()
ctablecontent.append(TR(TD(TABLE(TR(TD(field.label)),
TR(TD(SPAN(_id="%s-error" % name,
_style="font-size: 12px; font-weight:bold; color: red;",
_class="error-span")))),
_class="label", _style="vertical-align: top;"),
TD(input_area, _class="infield"),
TD(comment, _class="comment", _style="vertical-align: top;"),
TD(TAG["BUTTON"](T("clear"),
_name="button-%s" % fieldnum,
_class="clrbutton"
),
TAG["BUTTON"](T("ignore"),
_name="ignore-%s" % name,
_class="ignore-button"),
_class="clear", _style="vertical-align: top;"),
_class="fieldtr"))
ctablecontent.append(TR(TD(_colspan="4",
_style="border: solid #999 3px;")))
fieldnum+=1
ptablecontent.extend(ctablecontent)
# Submit button
ptablecontent.append(TR(TD(TAG["button"](T("Submit"),
_class="submit-button",
_style="width: 70px; height: 20px;"),
_colspan="4",
_style="text-align:center; padding: 5px;")))
output = FORM(TABLE(ptablecontent, _class="ptable"),
_id="ocr-review-form")
return output
def __s3ocrxml2dict(self, s3ocrxml):
"""
convert s3ocrxml to dictionary so that it can be used in templates
@param s3ocrxml: content of a s3ocrxml file, in text
@return: equivalent dictionary for s3ocrxml file
"""
s3ocr_etree = etree.fromstring(s3ocrxml)
s3ocrdict = Storage()
resource_seq = []
for eachresource in s3ocr_etree:
resourcename = eachresource.attrib.get("name")
s3ocrdict[resourcename] = Storage()
resource_seq.append(resourcename)
field_seq = []
for eachfield in eachresource:
fieldname = eachfield.attrib.get("name")
if eachfield.attrib.get("readable") == "True"\
and eachfield.attrib.get("writable") == "True":
field_seq.append(fieldname)
fieldlabel = eachfield.attrib.get("label")
fieldtype = eachfield.attrib.get("type")
numlines = eachfield.attrib.get("lines", "1")
fieldreference =\
True if eachfield.attrib.get("reference") == "1" else False
fieldresource = eachfield.attrib.get("resource")
fieldhasoptions =\
True if eachfield.attrib.get("has_options") == "True" else False
# get html comment
fieldcomment = current.db[resourcename][fieldname].comment
if fieldhasoptions:
try:
s3ocrselect = eachfield.getchildren()[0]
options_found = True
except(IndexError):
fieldoptions = None
options_found = False
if options_found:
numoptions = len(s3ocrselect.getchildren())
optionlist = []
for eachoption in s3ocrselect:
optionlabel = eachoption.text
optionvalue = eachoption.attrib.get("value")
optionlist.append(Storage({"label": optionlabel,
"value": optionvalue}))
fieldoptions = Storage({"count":numoptions,
"list":optionlist})
else:
fieldoptions = None
else:
fieldoptions = None
s3ocrdict[resourcename][fieldname] =\
Storage({"label": fieldlabel,
"type": fieldtype,
"comment": fieldcomment,
"reference": fieldreference,
"resource": fieldresource,
"has_options": fieldhasoptions,
"options": fieldoptions,
"lines": int(numlines)})
s3ocrdict[resourcename]["$field_seq"] = field_seq
s3ocrdict["$resource_seq"] = resource_seq
return s3ocrdict
def newDocument(self,
title,
header,
footer,
filename = None,
heading=None,
):
"""
This will create a new empty PDF document.
Data then needs to be added to this document.
@param title: The title that will appear at the top of the document
and in the filename
@return: An empty pdf document
"""
# Get the document variables
now = self.request.now.isoformat()[:19].replace("T", " ")
docTitle = "%s %s" % (title, now)
if filename == None:
self.filename = "%s_%s.pdf" % (title, now)
else:
self.filename = "%s_%s.pdf" % (filename, now)
self.output = StringIO()
self.doc = EdenDocTemplate(self.output, title=docTitle)
self.doc.setPageTemplates(header,footer)
self.content = []
if heading == None:
heading = title
self.title = heading
self.prevtitle = heading
self.setPortrait()
self.leftMargin = inch
self.rightMargin = inch
self.topMargin = 0.5*inch + inch
self.bottomMargin = 0.5*inch + .5*inch
self.MINIMUM_MARGIN_SIZE = 0.3 * inch
self.setMargins()
def newOCRForm(self,
formUUID,
pdfname="ocrform.pdf",
top=65,
left=50,
bottom=None,
right=None,
**args):
self.content = []
self.output = StringIO()
self.layoutEtree = etree.Element("s3ocrlayout")
try:
pdfTitle = current.response.s3.crud_strings[self.tablename].title_list.decode("utf-8")
except:
pdfTitle = self.resource.tablename
formResourceName = self.resource.tablename
formRevision = self.__book_revision(formUUID, formResourceName)
self.filename = "%s_rev%s.pdf" % (formResourceName, formRevision)
self.doc = self.S3PDFOCRForm(self.output,
formUUID=formUUID,
pdfTitle = pdfTitle,
formRevision=formRevision,
formResourceName=formResourceName)
def __getResourceForms(self):
"""
Get all form UUIDs/Revs available for a given resource
@return: a list of all available forms for the given
resource, the list will contain tuples such
that the first value is form-uuid and the
second value is form-revision
"""
db = current.db
tablename = "ocr_meta"
table = db[tablename]
availForms = []
formResourceName = self.resource.tablename
query = (table.resource_name == formResourceName)
rows = db(query).select(orderby=~table.revision)
for eachrow in rows:
availForms.append({
"uuid" : eachrow.form_uuid,
"revision": eachrow.revision,
})
return availForms
def __getNumPages(self, formuuid):
"""
Gets Number of pages for given form UUID
@param formuuid: uuid of the form, for which
number of pages is required
@return: number of pages in a form identified
by uuid
"""
db = current.db
tablename = "ocr_meta"
table = db[tablename]
formResourceName = self.resource.tablename
formUUID = formuuid
rows = db(table.form_uuid == formUUID).select(table.pages)
row = rows[0]
return int(row.pages)
def __s3OCREtree(self):
"""
Optimise & Modifiy s3xml etree to and produce s3ocr etree
@return: s3ocr etree
"""
s3xml_etree = self.resource.struct(options=True,
references=True,
stylesheet=None,
as_json=False,
as_tree=True)
#print etree.tostring(s3xml_etree, pretty_print=True)
# xml tags
ITEXT = "label"
HINT = "comment"
TYPE = "type"
HASOPTIONS = "has_options"
LINES = "lines"
BOXES = "boxes"
REFERENCE = "reference"
RESOURCE = "resource"
# Components Localised Text added to the etree
# Convering s3xml to s3ocr_xml (nicer to traverse)
s3xml_root = s3xml_etree.getroot()
resource_element = s3xml_root.getchildren()[0]
s3ocr_root = etree.Element("s3ocr")
# store components which have to be excluded
settings = current.deployment_settings
self.exclude_component_list =\
settings.get_pdf_excluded_fields("%s_%s" % \
(self.r.prefix,
self.r.resource.name))
if self.r.component: # if it is a component
s3ocr_root.append(resource_element)
else: # if it is main resource
componentetrees = []
# mres is main resource etree
mres = etree.Element("resource")
for attr in resource_element.attrib.keys():
mres.set(attr, resource_element.attrib.get(attr))
for field_element in resource_element:
if field_element.tag == "field": # main resource fields
mres.append(field_element)
elif field_element.tag == "resource": # component resource
componentetrees.append(field_element)
serialised_component_etrees = componentetrees
# create s3ocr tree
s3ocr_root.append(mres)
for res in serialised_component_etrees:
s3ocr_root.append(res)
# database fieldtype to ocr fieldtype mapping
self.generic_ocr_field_type = {
"string": "string",
"text": "textbox",
"boolean" : "boolean",
"double": "double",
"date": "date",
"datetime": "datetime",
"integer": "integer",
"list:integer": "multiselect",
"list:string": "multiselect",
"list:double": "multiselect",
"list:text": "multiselect",
}
# remove fields which are not required
# loading user defined configuartions
FIELD_TYPE_LINES = { # mapping types with number of lines
"string": 2,
"textbox": 4,
"integer": 1,
"double": 1,
"date": 1,
"datetime": 1,
}
FIELD_TYPE_BOXES = { # mapping type with numboxes
"integer": 9,
"double": 16,
}
for eachresource in s3ocr_root.iterchildren():
resourcetablename = eachresource.attrib.get("name")
# exclude components
if not self.r.component:
if eachresource.attrib.get("name") in self.exclude_component_list:
# excluded components are removed
s3ocr_root.remove(eachresource)
continue
for eachfield in eachresource.iterchildren():
fieldname = eachfield.attrib.get("name")
# fields which have to be displayed
fieldtype = eachfield.attrib.get(TYPE)
if fieldtype.startswith("reference "):
eachfield.set(RESOURCE,
fieldtype.split("reference ")[1])
eachfield.set(REFERENCE, "1")
else:
eachfield.set(REFERENCE, "0")
# loading ocr specific fieldtypes
ocrfieldtype = self.generic_ocr_field_type.get(fieldtype,
None)
if ocrfieldtype != None:
eachfield.set(TYPE, ocrfieldtype)
# refresh fieldtypes after update
fieldtype = eachfield.attrib.get(TYPE)
# set num boxes and lines
fieldhasoptions = eachfield.attrib.get(HASOPTIONS)
if fieldhasoptions == "False":
eachfield.set(LINES,
str(FIELD_TYPE_LINES.get(fieldtype,
1)))
if fieldtype in FIELD_TYPE_BOXES.keys():
eachfield.set(BOXES,
str(FIELD_TYPE_BOXES.get(fieldtype)))
# if field is readable but not writable set default value
if eachfield.attrib.get("readable", "False") == "True" and \
eachfield.attrib.get("writable", "False") == "False":
try:
fieldresourcename = \
eachresource.attrib.get("name").split("%s_" %\
self.prefix)[1]
except:
fieldresourcename = \
eachresource.attrib.get("name").split("_")[1]
fieldresource = \
self.resource.components.get(fieldresourcename, None)
if not fieldresource:
fieldresource = self.resource
fieldname = eachfield.attrib.get("name")
try:
fielddefault = self.r.resource.table[fieldname].default
except(KeyError):
fielddefault = "None"
eachfield.set("default",
str(fielddefault))
# for unknown field types
if fieldtype not in self.generic_ocr_field_type.values():
eachfield.set(TYPE, "string")
eachfield.set(HASOPTIONS, "False")
eachfield.set(LINES, "2")
# refresh fieldtypes after update
fieldtype = eachfield.attrib.get(TYPE)
# in ocr boolean fields should be shown as options
if fieldtype == "boolean":
eachfield.set(HASOPTIONS, "True")
# fields removed which need not be displayed
if eachfield.attrib.get("readable", "False") == "False" and \
eachfield.attrib.get("writable", "False") == "False":
eachresource.remove(eachfield)
continue
if eachfield.attrib.get(HASOPTIONS, "False") == "True" and \
eachfield.attrib.get(TYPE) != "boolean":
s3ocrselect = eachfield.getchildren()[0]
for eachoption in s3ocrselect.iterchildren():
if eachoption.text == "" or eachoption.text == None:
s3ocrselect.remove(eachoption)
continue
#print etree.tostring(s3ocr_root, pretty_print=True)
return s3ocr_root
def OCRPDFManager(self):
"""
Produces OCR Compatible PDF forms
"""
T = current.T
manager = current.manager
s3ocr_root = self.__s3OCREtree() # get element s3xml
self.s3ocrxml = etree.tostring(s3ocr_root, pretty_print=DEBUG)
self.content = []
s3ocr_layout_etree = self.layoutEtree
# Define font size
titlefontsize = 18
sectionfontsize = 15
regularfontsize = 13
hintfontsize = 10
# etree labels
ITEXT = "label"
HINT = "comment"
TYPE = "type"
HASOPTIONS = "has_options"
LINES = "lines"
BOXES = "boxes"
REFERENCE = "reference"
RESOURCE = "resource"
# l10n
l10n = {
"datetime_hint": {
"date": T("fill in order: day(2) month(2) year(4)"),
"datetime": T("fill in order: hour(2) min(2) day(2) month(2) year(4)"),
},
"boolean": {
"yes": T("Yes"),
"no": T("No"),
},
"select": {
"multiselect": T("Select one or more option(s) that apply"),
"singleselect": T("Select any one option that apply"),
},
}
# Print the etree
for eachresource in s3ocr_root:
# Create resource element of ocr layout xml
s3ocr_layout_resource_etree =\
etree.SubElement(s3ocr_layout_etree,
"resource", name=eachresource.attrib.get("name"))
styleSheet = getStyleSheet()
self.content.append(DrawHrLine(0.5))
self.content.append(Paragraph(html_unescape_and_strip(eachresource.attrib.get(ITEXT,
eachresource.attrib.get("name"))),
styleSheet["Section"]))
self.content.append(DrawHrLine(0.5))
for eachfield in eachresource.iterchildren():
# Create field element of ocr layout xml
s3ocr_layout_field_etree =\
etree.SubElement(s3ocr_layout_resource_etree,
"field",
name=eachfield.attrib.get("name"),
type=eachfield.attrib.get("type"))
if eachfield.attrib.get(REFERENCE) == "1":
s3ocr_layout_field_etree.set(REFERENCE,
"1")
s3ocr_layout_field_etree.set(RESOURCE,
eachfield.attrib.get(RESOURCE))
fieldlabel = eachfield.attrib.get(ITEXT)
spacing = " " * 5
fieldhint = self.__trim(eachfield.attrib.get(HINT))
if fieldhint != "" and fieldhint != None:
self.content.append(Paragraph(html_unescape_and_strip("%s%s( %s )" % \
(fieldlabel,
spacing,
fieldhint)),
styleSheet["Question"]))
else:
self.content.append(Paragraph(html_unescape_and_strip(fieldlabel),
styleSheet["Question"]))
if eachfield.attrib.get("readable", "False") == "True" and \
eachfield.attrib.get("writable", "False") == "False":
self.content.append(Paragraph(html_unescape_and_strip(eachfield.attrib.get("default",
"No default Value")),
styleSheet["DefaultAnswer"]))
# Remove the layout component of empty fields
s3ocr_layout_resource_etree.remove(s3ocr_layout_field_etree)
elif eachfield.attrib.get(HASOPTIONS) == "True":
fieldtype = eachfield.attrib.get(TYPE)
# if the field has to be shown with options
if fieldtype == "boolean":
bool_text = l10n.get("boolean")
self.content.append(DrawOptionBox(bool_text.get("yes").\
decode("utf-8"),
s3ocr_layout_field_etree,
"yes"))
self.content.append(DrawOptionBox(bool_text.get("no").\
decode("utf-8"),
s3ocr_layout_field_etree,
"no"))
else:
if fieldtype == "multiselect":
option_hint = l10n.get("select").get("multiselect")
else:
option_hint = l10n.get("select").get("singleselect")
s3ocrselect = eachfield.getchildren()[0]
numoptions = len(s3ocrselect.getchildren())
if numoptions <= MAX_FORM_OPTIONS_LIMIT:
s3ocr_layout_field_etree.attrib["limitcrossed"] = "1"
self.content.append(DrawHintBox(option_hint.\
decode("utf-8")))
for eachoption in s3ocrselect.iterchildren():
self.content.append(DrawOptionBox(eachoption.text,
s3ocr_layout_field_etree,
eachoption.attrib.get("value")))
else:
self.content.append(DrawHintBox(T("Enter a value carefully without spelling mistakes, this field will be crosschecked.").decode("utf-8")))
for eachtextbox in xrange(2):
self.content.append(StringInputBoxes(numBoxes=None,
etreeElem=s3ocr_layout_field_etree))
else:
# It is a text field
fieldtype = eachfield.attrib.get(TYPE)
BOXES_TYPES = ["string", "textbox", "integer",
"double", "date", "datetime",]
if fieldtype in BOXES_TYPES:
if fieldtype in ["string", "textbox"]:
#form.linespace(3)
num_lines = int(eachfield.attrib.get("lines",
1))
for eachline in xrange(num_lines):
self.content.append(StringInputBoxes(numBoxes=None,
etreeElem=s3ocr_layout_field_etree))
elif fieldtype in ["integer", "double"]:
num_boxes = int(eachfield.attrib.get("boxes",
9))
self.content.append(StringInputBoxes(numBoxes=num_boxes,
etreeElem=s3ocr_layout_field_etree))
elif fieldtype in ["date", "datetime"]:
# Print hint
hinttext = \
l10n.get("datetime_hint").get(fieldtype).decode("utf-8")
self.content.append(DrawHintBox(hinttext))
if fieldtype == "datetime":
self.content.append(DateTimeBoxes(s3ocr_layout_field_etree))
elif fieldtype == "date":
self.content.append(DateBoxes(s3ocr_layout_field_etree))
else:
self.r.error(501, current.manager.PARSE_ERROR)
print sys.stderr("%s :invalid field type: %s" %\
(eachfield.attrib.get("name"),
fieldtype))
return
def __getOCRLayout(self):
"""
return layout file
@return: layout xml for the generated OCR form
"""
prettyprint = True if DEBUG else False
#print etree.tostring(self.layoutEtree, pretty_print=prettyprint)
return etree.tostring(self.layoutEtree, pretty_print=prettyprint)
def __trim(self, text):
"""
Helper to trim off any enclosing paranthesis
@param text: text which need to be trimmed
@return: text with front and rear paranthesis stripped
"""
if isinstance(text, str) and \
text[0] == "(" and \
text[-1] == ")":
text = text[1:-1]
return text
def __update_dbmeta(self, formUUID, layoutXML, numPages):
"""
Store the PDF layout information into the database/disk.
@param formUUID: uuid of the generated form
@param layoutXML: layout xml of the generated form
@param numPages: number of pages in the generated form
"""
layout_file_stream = StringIO(layoutXML)
layout_file_name = "%s_xml" % formUUID
s3ocrxml_file_stream = StringIO(self.s3ocrxml)
s3ocrxml_file_name = "%s_ocrxml" % formUUID
db = current.db
tablename = "ocr_meta"
rows = db(db[tablename]["form_uuid"] == formUUID).select()
row = rows[0]
row.update_record(layout_file=db[tablename]["layout_file"].store(\
layout_file_stream,
layout_file_name),
s3ocrxml_file=db[tablename]["s3ocrxml_file"].store(\
s3ocrxml_file_stream,
s3ocrxml_file_name),
pages=numPages)
def __book_revision(self, formUUID, formResourceName):
"""
Books a revision number for current operation in ocr_meta
@param formUUID: uuid of the generated form
@param formResourceName: name of the eden resource
"""
db = current.db
tablename = "ocr_meta"
#determining revision
#selector = db[tablename]["revision"].max()
#rows = db(db[tablename]["resource_name"]==formResourceName).select(selector)
#row = rows.first()
#revision = 0 if (row[selector] == None) else (row[selector] + 1)
#to make the table migratable
#taking the timestamp in hex
import uuid
revision = uuid.uuid5(formUUID, formResourceName).hex.upper()[:6]
db[tablename].insert(form_uuid=formUUID,
resource_name=formResourceName,
revision=revision)
return revision
def defaultTitle(self, resource):
"""
Method to extract a generic title from the resource using the
crud strings
@param: resource: a S3Resource object
@return: the title as a String
"""
try:
return current.response.s3.crud_strings.get(resource.table._tablename).get("title_list")
except:
# No CRUD Strings for this resource
return current.T(resource.name.replace("_", " ")).decode("utf-8")
def setMargins(self, left=None, right=None, top=None, bottom=None):
"""
Method to set the margins of the document
@param left: the size of the left margin, default None
@param right: the size of the right margin, default None
@param top: the size of the top margin, default None
@param bottom: the size of the bottom margin, default None
The margin is only changed if a value is provided, otherwise the
last value that was set will be used. The original values are set
up to be an inch - in newDocument()
@todo: make this for a page rather than the document
"""
if left != None:
self.doc.leftMargin = left
self.leftMargin = left
else:
self.doc.leftMargin = self.leftMargin
if right != None:
self.doc.rightMargin = right
self.rightMargin = right
else:
self.doc.rightMargin = self.rightMargin
if top != None:
self.doc.topMargin = top
self.topMargin = top
else:
self.doc.topMargin = self.topMargin
if bottom != None:
self.doc.bottomMargin = bottom
self.bottomMargin = bottom
else:
self.doc.bottomMargin = self.bottomMargin
def getPageWidth(self):
return self.doc.width
def getPageHeight(self):
return self.doc.height
def setPortrait(self):
"""
Method to set the orientation of the document to be portrait
@todo: make this for a page rather than the document
@todo: change the hardcoded page size
"""
self.doc.pagesize = portrait(self.paper_size)
def setLandscape(self):
"""
Method to set the orientation of the document to be landscape
@todo: make this for a page rather than the document
@todo: change the hardcoded page size
"""
self.doc.pagesize = landscape(self.paper_size)
def addTable(self,
resource = None,
raw_data = None,
list_fields=None,
report_groupby=None,
report_hide_comments=False
):
"""
Method to create a table that will be inserted into the document
@param resource: A S3Resource object
@param list_Fields: A list of field names
@param report_groupby: A field name that is to be used as a sub-group
All the records that share the same report_groupby value will
be clustered together
@param report_hide_comments: Any comment field will be hidden
This uses the class S3PDFTable to build and properly format the table.
The table is then built and stored in the document flow ready for
generating the pdf.
If the table is too wide for the page then it will automatically
adjust the margin, font or page orientation. If it is still too
wide then the table will be split across multiple pages.
"""
table = S3PDFTable(document=self,
resource=resource,
raw_data=raw_data,
list_fields=list_fields,
groupby=report_groupby,
hide_comments=report_hide_comments
)
result = table.build()
if result != None:
self.content += result
def extractrHeader(self,
rHeader
):
"""
Method to convert the HTML generated for a rHeader into PDF
"""
# let's assume that it's a callable rHeader
try:
# switch the representation to html so the rHeader doesn't barf
repr = self.r.representation
self.r.representation = "html"
html = rHeader(self.r)
self.r.representation = repr
except:
# okay so maybe it wasn't ... it could be an HTML object
html = rHeader
parser = S3html2pdf(pageWidth = self.getPageWidth(),
exclude_class_list=["tabs"])
result = parser.parse(html)
if result != None:
self.content += result
def addrHeader(self,
resource = None,
raw_data = None,
list_fields=None,
report_hide_comments=False
):
"""
Method to create a rHeader table that is inserted into the document
@param resource: A S3Resource object
@param list_Fields: A list of field names
@param report_hide_comments: Any comment field will be hidden
This uses the class S3PDFTable to build and properly format the table.
The table is then built and stored in the document flow ready for
generating the pdf.
"""
rHeader = S3PDFRHeader(self,
resource,
raw_data,
list_fields,
report_hide_comments
)
result = rHeader.build()
if result != None:
self.content += result
def addPlainTable(self, text, style=None, append=True):
table = Table(text, style=style)
if append:
self.content.append(table)
return table
def addParagraph(self, text, style=None, append=True):
"""
Method to create a paragraph that may be inserted into the document
@param text: The text for the paragraph
@param append: If True then the paragraph will be stored in the
document flow ready for generating the pdf.
@return The paragraph
This method can return the paragraph rather than inserting into the
document. This is useful if the paragraph needs to be first
inserted in another flowable, before being added to the document.
An example of when this is useful is when large amounts of text
(such as a comment) are added to a cell of a table.
"""
if text != "":
if style == None:
styleSheet = getSampleStyleSheet()
style = styleSheet["Normal"]
para = Paragraph(text, style)
if append:
self.content.append(para)
return para
return ""
def addSpacer(self, height, append=True):
"""
Add a spacer to the story
"""
spacer = Spacer(1,height)
if append:
self.content.append(spacer)
return spacer
def addOverlay(self, callback, data):
"""
Add a overlay to the page
"""
self.content.append(Overlay(callback, data))
def addBoxes(self, cnt, append=True):
"""
Add a square text boxes for text entry to the story
"""
boxes = StringInputBoxes(cnt,etree.Element("dummy"))
if append:
self.content.append(boxes)
return boxes
def throwPageBreak(self):
"""
Method to force a page break in the report
"""
self.content.append(PageBreak())
def changePageTitle(self, newTitle):
"""
Method to force a page break in the report
"""
self.content.append(ChangePageTitle(self, newTitle))
def getStyledTable(self, table, colWidths=None, rowHeights = None, style=[]):
"""
Method to create a simple table
"""
(list,style) = self.addCellStyling(table, style)
return Table(list,
colWidths=colWidths,
rowHeights=rowHeights,
style=style,
)
def getTableMeasurements(self, tempTable):
"""
Method to calculate the dimensions of the table
"""
tempDoc = EdenDocTemplate(StringIO())
tempDoc.setPageTemplates(lambda x, y: None, lambda x, y: None)
tempDoc.pagesize = portrait(self.paper_size)
tempDoc.build([tempTable], canvasmaker=canvas.Canvas)
return (tempTable._colWidths, tempTable._rowHeights)
def cellStyle(self, style, cell):
"""
Add special styles to the text in a cell
"""
if style == "*GREY":
return [("TEXTCOLOR",cell, cell, colors.lightgrey)]
elif style == "*RED":
return [("TEXTCOLOR",cell, cell, colors.red)]
return []
def addCellStyling(self, table, style):
"""
Add special styles to the text in a table
"""
row = 0
for line in table:
col = 0
for cell in line:
try:
if cell.startswith("*"):
(instruction,sep,text) = cell.partition(" ")
style += self.cellStyle(instruction, (col, row))
table[row][col] = text
except:
pass
col += 1
row += 1
return (table, style)
def setHeaderBanner (self, image):
"""
This method will add a banner to a page, used by pageHeader
"""
self.headerBanner = os.path.join(current.request.folder,image)
def pageHeader(self, canvas, doc):
"""
This method will generate the basic look of a page.
It is a callback method and will not be called directly
"""
canvas.saveState()
if self.logo and os.path.exists(self.logo):
im = Image.open(self.logo)
(iwidth, iheight) = im.size
height = 1.0 * inch
width = iwidth * (height/iheight)
canvas.drawImage(self.logo,
inch,
doc.pagesize[1]-1.2*inch,
width = width,
height = height)
if self.headerBanner and os.path.exists(self.headerBanner):
im = Image.open(self.headerBanner)
(iwidth, iheight) = im.size
height = 0.75 * inch
width = iwidth * (height/iheight)
canvas.drawImage(self.headerBanner,
3*inch,
doc.pagesize[1]-0.95*inch,
width = width,
height = height)
canvas.setFont("Helvetica-Bold", 16)
canvas.drawCentredString(doc.pagesize[0] / 2.0,
doc.pagesize[1] - 1.3*inch, self.title
)
canvas.setFont("Helvetica-Bold", 9)
now = S3DateTime.datetime_represent(datetime.utcnow(), utc=True)
canvas.drawCentredString(doc.pagesize[0] - 1.5*inch,
doc.pagesize[1] - 1.3*inch, now
)
canvas.restoreState()
def pageFooter(self, canvas, doc):
"""
This method will generate the basic look of a page.
It is a callback method and will not be called directly
"""
canvas.saveState()
canvas.setFont("Helvetica", 9)
canvas.drawString(inch, 0.75 * inch,
"Page %d %s" % (doc.page,
self.prevtitle
)
)
self.prevtitle = self.title
canvas.restoreState()
def buildDoc(self):
"""
This method will build the pdf document.
The response headers are set up for a pdf document and the document
is then sent
@return the document as a stream of characters
@todo add a proper template class so that the doc.build is more generic
"""
styleSheet = getSampleStyleSheet()
self.doc.build(self.content,
canvasmaker=canvas.Canvas)
self.output.seek(0)
return self.output.read()
# Nested classes that extended external libraries
# If the external library failed to be imported then we get a stacktrace
if reportLabImported:
class S3PDFOCRForm(BaseDocTemplate):
"""
Extended class of the BaseDocTemplate to be used with OCR Forms.
The form has a standard page template that draws handles on the
page in the four corners, the middle of the side and bottom edges
@author: Shiv Deepak
"""
_invalidInitArgs = ("pageTemplates",)
def __init__(self, filename, **attr):
BaseDocTemplate.__init__(self, filename, **attr)
self.formUUID = attr.get("formUUID", "")
self.formResourceName = attr.get("formResourceName", "")
self.formRevision = attr.get("formRevision", "")
self.pdfTitle = attr.get("pdfTitle", "OCR Form")
self.content = []
self.leftMargin = 50
self.rightMargin = 50
self.topMargin = 50
self.bottomMargin = 50
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
def handle_pageBegin(self):
"""
override base method to add a change of page template after the firstpage.
"""
self._handle_pageBegin()
self._handle_nextPageTemplate('Later')
def build(self, content=[], canvasmaker=canvas.Canvas, **attr):
"""
build the document using the flowables.
"""
T = current.T
self._calc() # in case we changed margins sizes etc
frameT = Frame(self.leftMargin,
self.bottomMargin,
self.width,
self.height,
id='normal')
self.addPageTemplates([PageTemplate(id='First',
frames=frameT,
onPage=self.firstPageTemplate,
pagesize=self.pagesize),
PageTemplate(id='Later',
frames=frameT,
onPage=self.laterPageTemplate,
pagesize=self.pagesize)])
# Generate PDF header
ocrInstructions = [
T("1. Fill the necessary fields in BLOCK CAPITAL letters.").decode("utf-8"),
T("2. Always use one box per letter and leave one box space to separate words.").decode("utf-8"),
T("3. Fill in the circles completely.").decode("utf-8"),
]
# Put pdf title
styleSheet = getStyleSheet()
self.content = [Paragraph(html_unescape_and_strip(self.pdfTitle), styleSheet["Title"])]
# Print input instructions
for eachInstruction in ocrInstructions:
self.content.append(Paragraph(html_unescape_and_strip(eachInstruction),
styleSheet["Instructions"]))
# Add content
self.content.extend(content)
# Build OCRable PDF form
BaseDocTemplate.build(self, self.content, canvasmaker=canvasmaker)
self.numPages = self.canv.getPageNumber() - 1
def firstPageTemplate(self, canvas, doc):
"""
Template for first page
"""
self.laterPageTemplate(canvas, doc)
def laterPageTemplate(self, canvas, doc):
"""
Template for all pages but first
"""
self.pageDecorate(canvas, doc)
self.pageMeta(canvas, doc)
def pageDecorate(self, canvas, doc):
"""
Decorate Page For OCRing
"""
canvas.saveState()
pagewidth, pageheight = self.paper_size
canvas.rect(20, 20, 20, 20, fill=1) #btlf
canvas.rect(pagewidth - 40, 20, 20, 20, fill=1) #btrt
canvas.rect(20, pageheight - 40, 20, 20, fill=1) #tplf
canvas.rect(pagewidth/2 - 10, 20, 20, 20, fill=1) #btmd
canvas.rect(20, pageheight/2 - 10, 20, 20, fill=1) #mdlf
canvas.rect(pagewidth - 40, pageheight - 40, 20, 20, fill=1) #tprt
canvas.rect(pagewidth - 40, pageheight/2 - 10, 20, 20, fill=1) #mdrt
canvas.restoreState()
def pageMeta(self, canvas, doc):
"""
put pagenumber and other mata info on each page
"""
canvas.saveState()
canvas.setFont("Helvetica", 10)
pageNumberText = "Page %s" % self.canv.getPageNumber()
pagewidth, pageheight = self.paper_size
metaHeight = 27
pageNumberWidth = pagewidth - (((len(pageNumberText)+2)*5) + 40)
pageNumberHeight = metaHeight
canvas.drawString(pageNumberWidth, pageNumberHeight, pageNumberText)
uuidText = "UUID %s" % self.formUUID
uuidWidth = 40 + 5
uuidHeight = metaHeight
canvas.drawString(uuidWidth, uuidHeight, uuidText)
resourceNameText = self.formResourceName
revisionText = self.formRevision
otherMetaText = "Resource %s Revision %s" % (resourceNameText,
revisionText)
otherMetaWidth = (pagewidth/2) + 20
otherMetaHeight = metaHeight
canvas.drawString(otherMetaWidth, otherMetaHeight, otherMetaText)
canvas.restoreState()
# end of class S3PDFORCForm
# end of class S3PDF
# -----------------------------------------------------------------------------
class S3PDFDataSource:
"""
Class to get the labels and the data from the database
"""
def __init__(self,
obj,
):
"""
Method to create the S3PDFDataSource object
"""
self.resource = obj.resource
self.list_fields = obj.list_fields
self.report_groupby = obj.report_groupby
self.hideComments = obj.hideComments
self.fields = None
self.labels = None
self.records = False
def select(self):
"""
Internally used method to get the data from the database
If the list of fields is provided then only these will be returned
otherwise all fields on the table will be returned
Automatically the id field will be hidden, and if
hideComments is true then the comments field will also be hidden.
If a groupby field is provided then this will be used as the sort
criteria, otherwise it will sort by the first field
The returned records are stored in the records property.
"""
response = current.response
manager = current.manager
resource = self.resource
list_fields = self.list_fields
if not list_fields:
fields = resource.readable_fields()
for field in fields:
if field.type == "id":
fields.remove(field)
if self.hideComments and field.name == "comments":
fields.remove(field)
if not fields:
fields = [table.id]
list_fields = [f.name for f in fields]
else:
indices = s3codec.S3Codec.indices
list_fields = [f for f in list_fields if f not in indices]
# Filter and orderby
if response.s3.filter is not None:
resource.add_filter(response.s3.filter)
orderby = self.report_groupby
# Retrieve the resource contents
table = resource.table
lfields, joins, left, distinct = resource.resolve_selectors(list_fields)
fields = [f for f in lfields if f.show]
headers = [f.label for f in lfields if f.show]
if orderby != None:
orderby = fields[0].field
self.records = resource.sqltable(fields=list_fields,
start=None,
limit=None,
orderby=orderby,
no_ids=True,
as_rows=True)
# Pass to getLabels
self.labels = headers
# Pass to getData
self.fields = fields
# Better to return a PDF, even if it has no records
#if not self.records:
# current.session.warning = current.manager.ERROR.NO_RECORDS
# redirect(URL(extension=""))
# -------------------------------------------------------------------------
def getLabels(self):
"""
Internally used method to get the field labels
Used to remove the report_groupby label (if present)
"""
# Collect the labels from the select() call
labels = self.labels
if self.report_groupby != None:
for label in labels:
if label == self.report_groupby.label:
labels.remove(label)
return labels
# -------------------------------------------------------------------------
def getData(self):
"""
Internally used method to format the data from the database
This will extract the data from the returned records list.
If there is a groupby then the records will be grouped by this field.
For each new value the groupby field will be placed in a list of
its own. This will then be followed by lists of the records that
share this value
If there is no groupby then the result is a simple matrix of
rows by fields
"""
represent = current.manager.represent
# Build the data list
data = []
currentGroup = None
subheadingList = []
rowNumber = 1
for item in self.records:
row = []
if self.report_groupby != None:
# @ToDo: non-XML output should use Field.represent
# - this saves the extra parameter
groupData = represent(self.report_groupby,
record=item,
strip_markup=True,
non_xml_output=True
)
if groupData != currentGroup:
currentGroup = groupData
data.append([groupData])
subheadingList.append(rowNumber)
rowNumber += 1
for field in self.fields:
if self.report_groupby != None:
if field.label == self.report_groupby.label:
continue
if field.field:
text = represent(field.field,
record=item,
strip_markup=True,
non_xml_output=True,
extended_comments=True
)
if text == "" or not field.field:
# some represents replace the data with an image which will
# then be lost by the strip_markup, so get back what we can
tname = field.tname
fname = field.fname
if fname in item:
text = item[fname]
elif tname in item and fname in item[tname]:
text = item[tname][fname]
else:
text = ""
row.append(text)
data.append(row)
rowNumber += 1
return (subheadingList, data)
# end of class S3PDFDataSource
# -----------------------------------------------------------------------------
class S3PDFRHeader():
"""
Class to build a simple table that holds the details of one record,
which can then be placed in a pdf document
This class doesn't need to be called directly.
Rather see S3PDF.addrHeader()
"""
def __init__(self,
document,
resource=None,
raw_data=None,
list_fields=None,
hide_comments=False
):
"""
Method to create a rHeader object
@param document: A S3PDF object
@param resource: A S3Resource object
@param list_fields: A list of field names
@param hide_comments: Any comment field will be hidden
"""
self.pdf = document
self.resource = resource
self.raw_data = raw_data
self.list_fields = list_fields
self.hideComments = hide_comments
self.report_groupby = None
self.data = []
self.subheadingList = []
self.labels = []
self.fontsize = 10
def build(self):
"""
Method to build the table.
@return: A list of Table objects. Normally this will be a list with
just one table object, but if the table needs to be split
across columns then one object per page will be created.
"""
if self.resource != None:
ds = S3PDFDataSource(self)
# Get records
ds.select()
self.labels = ds.getLabels()
self.data.append(self.labels)
(self.subheadingList, data) = ds.getData()
self.data + data
if self.raw_data != None:
self.data = self.raw_data
self.rheader = []
if len(self.data) == 0:
return None
else:
for index in range(len(self.labels)):
try:
value = data[0][index]
except:
value = "-"
self.rheader.append([self.labels[index],
value]
)
content = []
style = [("FONTSIZE", (0, 0), (-1, -1), self.fontsize),
("VALIGN", (0, 0), (-1, -1), "TOP"),
("FONTNAME", (0, 0), (0, -1), "Helvetica-Bold"),
("FONTNAME", (1, 0), (1, -1), "Helvetica"),
]
(self.rheader,style) = self.pdf.addCellStyling(self.rheader, style)
table = Table(self.rheader,
repeatRows=1,
style=style,
hAlign="LEFT",
)
content.append(table)
return content
# end of class S3PDFRHeader
# =============================================================================
# Custom Flowables (used by OCR)
if reportLabImported:
class DrawHrLine(Flowable):
""" Draw a horizontal line """
def __init__(self, lineThickness):
Flowable.__init__(self)
self.lineThickness = 1
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
def draw(self):
canv = self.canv
pagewidth, pageheight = self.paper_size
self.canv.line(0, -5, pagewidth - 100, -5)
def wrap(self, availWidth, availHeight):
self._width = availWidth
self._height = self.lineThickness
return self._width, self._height
class StringInputBoxes(Flowable):
""" Draw a input boxes in a complete line """
def __init__(self, numBoxes=None, etreeElem=None):
Flowable.__init__(self)
self.spaceAfter = 2
self.sideLength = 15
self.numBoxes = numBoxes
self.fontsize = 14
self.etreeElem = etreeElem
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
def draw(self):
canv = self.canv
pagewidth, pageheight = self.paper_size
numBoxes = int((pagewidth-\
(100+self.fontsize))/self.sideLength)
if self.numBoxes != None and\
isinstance(self.numBoxes, int):
numBoxes = self.numBoxes
canv.setLineWidth(0.90)
canv.setStrokeGray(0.9)
widthPointer = self.fontsize
# values are set manually
xpadding = 6 # default
ypadding = 4
margin = 50 # as set
markerOrigin = (29,29) # top left marker location
# reportlabs coordinate system uses bottom left
# as origin, we have to take top left marker as
# origin as to provide input for Python Imaging.
xCord = pagewidth -\
(self.layoutCords[0]+xpadding+margin) -\
markerOrigin[0]+\
self.fontsize
yCord = pageheight -\
(self.layoutCords[1]+ypadding+margin) -\
markerOrigin[1]
for eachbox in xrange(numBoxes):
self.canv.rect(widthPointer,
0,
self.sideLength,
self.sideLength)
widthPointer+=self.sideLength
StringInputBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % xCord,
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="%s" % numBoxes,
page="%s" % self.canv.getPageNumber())
StringInputBoxEtree.text = " "
def wrap(self, availWidth, availHeight):
self.layoutCords = availWidth, availHeight
self._width = availWidth
self._height = self.sideLength + self.spaceAfter
return self._width, self._height
class DateBoxes(Flowable):
""" Draw a input boxes in a complete line """
def __init__(self, etreeElem):
Flowable.__init__(self)
self.spaceAfter = 2
self.sideLength = 15
self.fontsize = 14
self.etreeElem = etreeElem
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
def draw(self):
canv = self.canv
pagewidth, pageheight = self.paper_size
canv.setLineWidth(0.90)
canv.setStrokeGray(0.9)
widthPointer = self.fontsize
# values are set manually
xpadding = 6 # default
ypadding = 4
margin = 50 # as set
markerOrigin = (29,29) # top left marker location
# reportlabs coordinate system uses bottom left
# as origin, we have to take top left marker as
# origin as to provide input for Python Imaging.
xCord = pagewidth -\
(self.layoutCords[0]+xpadding+margin) -\
markerOrigin[0]+\
self.fontsize
yCord = pageheight -\
(self.layoutCords[1]+ypadding+margin) -\
markerOrigin[1]
for eachbox in xrange(1, 11):
if eachbox not in (3,6):
self.canv.rect(widthPointer,
0,
self.sideLength,
self.sideLength)
widthPointer+=15
DateBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % xCord,
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="2",
page="%s" % self.canv.getPageNumber())
DateBoxEtree.text = "DD"
DateBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % (xCord+(self.sideLength*3)),
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="2",
page="%s" % self.canv.getPageNumber())
DateBoxEtree.text = "MO"
DateBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % (xCord+(self.sideLength*6)),
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="4",
page="%s" % self.canv.getPageNumber())
DateBoxEtree.text = "YYYY"
def wrap(self, availWidth, availHeight):
self.layoutCords = availWidth, availHeight
self._width = availWidth
self._height = self.sideLength + self.spaceAfter
return self._width, self._height
class DateTimeBoxes(Flowable):
""" Draw a input boxes in a complete line """
def __init__(self, etreeElem):
Flowable.__init__(self)
self.spaceAfter = 2
self.sideLength = 15
self.fontsize = 14
self.etreeElem = etreeElem
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
def draw(self):
canv = self.canv
pagewidth, pageheight = self.paper_size
canv.setLineWidth(0.90)
canv.setStrokeGray(0.9)
widthPointer = self.fontsize
# values are set manually
xpadding = 6 # default
ypadding = 4
margin = 50 # as set
markerOrigin = (29,29) # top left marker location
# reportlabs coordinate system uses bottom left
# as origin, we have to take top left marker as
# origin as to provide input for Python Imaging.
xCord = pagewidth -\
(self.layoutCords[0]+xpadding+margin) -\
markerOrigin[0]+\
self.fontsize
yCord = pageheight -\
(self.layoutCords[1]+ypadding+margin) -\
markerOrigin[1]
for eachbox in xrange(1, 18):
if eachbox not in (3,6,7,10,13):
self.canv.rect(widthPointer,
0,
self.sideLength,
self.sideLength)
widthPointer+=15
DateTimeBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % xCord,
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="2",
page="%s" % self.canv.getPageNumber())
DateTimeBoxEtree.text = "HH"
DateTimeBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % (xCord+(self.sideLength*3)),
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="2",
page="%s" % self.canv.getPageNumber())
DateTimeBoxEtree.text = "MM"
DateTimeBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % (xCord+(self.sideLength*7)),
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="2",
page="%s" % self.canv.getPageNumber())
DateTimeBoxEtree.text = "DD"
DateTimeBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % (xCord+(self.sideLength*10)),
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="2",
page="%s" % self.canv.getPageNumber())
DateTimeBoxEtree.text = "MO"
DateTimeBoxEtree = etree.SubElement(self.etreeElem,
"textbox",
x="%s" % (xCord+(self.sideLength*13)),
y="%s" % yCord,
side="%s" % self.sideLength,
boxes="4",
page="%s" % self.canv.getPageNumber())
DateTimeBoxEtree.text = "YYYY"
def wrap(self, availWidth, availHeight):
self.layoutCords = availWidth, availHeight
self._width = availWidth
self._height = self.sideLength + self.spaceAfter
return self._width, self._height
class DrawOptionBox(Flowable):
""" write text without wrap """
def __init__(self, text, etreeElem, elemValue):
Flowable.__init__(self)
self.text = text
self.fontsize = 14
self.spaceAfter = 2
self.etreeElem = etreeElem
self.elemValue = elemValue
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
def draw(self):
canv = self.canv
pagewidth, pageheight = self.paper_size
canv.setLineWidth(0.90)
canv.setStrokeGray(0.9)
radius = (self.fontsize/2)-1
circleCenter = (self.width+self.fontsize,
self.height+(self.fontsize/4)+1)
# values are set manually
xpadding = 6 # default
ypadding = 8
margin = 50 # as set
markerOrigin = (29,29) # top left marker location
# reportlabs coordinate system uses bottom left
# as origin, we have to take top left marker as
# origin as to provide input for Python Imaging.
xCord = pagewidth -\
(self.layoutCords[0]+xpadding+margin) -\
markerOrigin[0]+\
circleCenter[0]
yCord = pageheight -\
(self.layoutCords[1]+ypadding+margin) -\
markerOrigin[1]+\
circleCenter[0]
self.canv.circle(circleCenter[0],
circleCenter[1],
radius,
fill=0)
self.canv.drawString(self.width+(self.fontsize*2),
self.height,
html_unescape_and_strip(self.text))
optionBoxEtree = etree.SubElement(self.etreeElem,
"optionbox",
x="%s" % xCord,
y="%s" % yCord,
radius="%s" % radius,
boxes="1",
page="%s" % self.canv.getPageNumber())
optionBoxEtree.set("value", self.elemValue)
optionBoxEtree.text = self.text
def wrap(self, availWidth, availHeight):
self.layoutCords = availWidth, availHeight
self._width = (self.fontsize*(len(self.text)+8))/2
self._height = self.fontsize + self.spaceAfter
return self._width, self._height
class DrawHintBox(Flowable):
""" write text without wrap """
def __init__(self, text=""):
Flowable.__init__(self)
self.text = text
self.fontsize = 12
self.spaceAfter = 6
settings = current.deployment_settings
if settings.get_paper_size() == "Letter":
self.paper_size = LETTER
else:
self.paper_size = A4
def draw(self):
canv = self.canv
canv.setFillGray(0.4)
self.canv.drawString(self.width+(self.fontsize/2),
self.height,
html_unescape_and_strip(self.text))
def wrap(self, availWidth, availHeight):
self._width = (self.fontsize*(len(self.text)+4))/2
self._height = self.fontsize + self.spaceAfter
return self._width, self._height
# end of custom Flowables
# Custom styleSheets
_baseFontNameB = tt2ps(_baseFontName,1,0)
_baseFontNameI = tt2ps(_baseFontName,0,1)
_baseFontNameBI = tt2ps(_baseFontName,1,1)
def getStyleSheet():
styleSheet = getSampleStyleSheet()
styleSheet.add(ParagraphStyle(name="Instructions",
parent=styleSheet["Bullet"],
fontName=_baseFontName,
fontSize=13,
firstLineIndent=0,
spaceBefore=3),
alias="Inst")
styleSheet.add(ParagraphStyle(name="Section",
parent=styleSheet["Normal"],
fontName=_baseFontName,
fontSize=14,
spaceBefore=5,
spaceAfter=5,
firstLineIndent=0),
alias="Sec")
styleSheet.add(ParagraphStyle(name="Question",
parent=styleSheet["Normal"],
fontName=_baseFontName,
fontSize=13,
firstLineIndent=0,
spaceAfter=5,
spaceBefore=10),
alias="Quest")
styleSheet.add(ParagraphStyle(name="DefaultAnswer",
parent=styleSheet["Normal"],
fontName=_baseFontName,
fontSize=12,
firstLineIndent=0,
spaceBefore=3),
alias="DefAns")
return styleSheet
# end of custom styleSheet definations
# Helper functions (used by OCR)
html_unescape_and_strip = lambda m: html_strip(html_unescape(m))
def html_unescape(text):
"""
Helper function, unscape any html special characters
"""
return re.sub("&(%s);" % "|".join(name2codepoint),
lambda m: unichr(name2codepoint[m.group(1)]),
text)
def html_strip(text):
"""Strips html markup from text"""
mark = 0
markstart = 0
markend = 0
index = 0
occur = 0
for i in text:
if i == "<":
try:
if text[index+1] != " ":
mark = 1
markstart = index
except(IndexError):
pass
elif i == ">":
if mark == 1:
mark = 0
markend = index
text = "%s%s" % (text[:markstart], text[markend+1:])
occur = 1
break
index += 1
if occur == 1:
text = html_strip(text)
return text
# convert unicode to ascii compatible strings
cast2ascii = lambda m: m if isinstance(m, str) else\
unicodedata.normalize("NFKD",
m).encode("ascii",
"ignore")
# =============================================================================
# S3OCRImageParser
class S3OCRImageParser(object):
"""
Image Parsing and OCR Utility
"""
def __init__(self, s3method, r):
""" Instialise it with environment variables and functions """
self.r = r
self.request = current.request
checkDependencies(r)
def parse(self, form_uuid, set_uuid, **kwargs):
""" performs OCR on a given set of pages """
raw_images = {}
images = {}
self.set_uuid = set_uuid
db = current.db
T = current.T
request = self.request
# Get metadata of the form
metatable = "ocr_meta"
query = (db[metatable]["form_uuid"] == form_uuid)
row = db(query).select(limitby=(0, 1)).first()
revision = row["revision"]
resourcename = row["resource_name"]
layoutfilename = row["layout_file"]
pages = int(row["pages"])
is_component = True if len(self.r.resource.components) == 1 else False
# Open each page
for eachpage in xrange(1, pages+1):
payloadtable = "ocr_payload"
row =\
db((db[payloadtable]["image_set_uuid"]==set_uuid) &\
(db[payloadtable]["page_number"]==eachpage)
).select().first()
pageimagefile = row["image_file"]
raw_images[eachpage] =\
Image.open(os.path.join(self.r.folder,
"uploads",
"ocr_payload",
pageimagefile))
# Transform each image
for each_img_index in raw_images.keys():
_debug("Transforming Page %s/%s" % (each_img_index,
pages))
images[each_img_index] = {}
images[each_img_index]["image"] =\
self.__convertImage2binary(raw_images[each_img_index])
images[each_img_index]["markers"] =\
self.__getMarkers(images[each_img_index]["image"])
images[each_img_index]["orientation"] =\
self.__getOrientation(images[each_img_index]["markers"])
if images[each_img_index]["orientation"] != 0.0:
images[each_img_index]["image"] =\
images[each_img_index]["image"].rotate(images[each_img_index]["orientation"])
images[each_img_index]["markers"] =\
self.__getMarkers(images[each_img_index]["image"])
images[each_img_index]["orientation"] =\
self.__getOrientation(images[each_img_index]["markers"])
images[each_img_index]["scalefactor"] =\
self.__scaleFactor(images[each_img_index]["markers"])
# Get layout file, convert it to etree
layout_file = open(os.path.join(self.r.folder,
"uploads",
"ocr_meta",
layoutfilename),
"rb")
layout_xml = layout_file.read()
layout_file.close()
layout_etree = etree.fromstring(layout_xml)
# Data etree
s3xml_root_etree = etree.Element("s3xml")
parent_resource_exist = False
for eachresource in layout_etree:
# Create data etree
if not is_component:
if parent_resource_exist == False:
s3xml_parent_resource_etree = etree.SubElement(s3xml_root_etree,
"resource")
s3xml_resource_etree = s3xml_parent_resource_etree
parent_resource_exist = True
else:
s3xml_resource_etree = etree.SubElement(s3xml_parent_resource_etree,
"resource")
else:
s3xml_resource_etree = etree.SubElement(s3xml_root_etree,
"resource")
s3xml_resource_etree.set("name",
eachresource.attrib.get("name", None))
for eachfield in eachresource:
field_name = eachfield.attrib.get("name", None)
field_type = eachfield.attrib.get("type", None)
field_reference = eachfield.attrib.get("reference")
if field_reference == "1":
field_is_reference = True
field_resource = eachfield.attrib.get("resource")
else:
field_is_reference = False
# Create data/reference etree
if field_is_reference:
s3xml_reference_etree = etree.SubElement(s3xml_resource_etree,
"reference")
s3xml_reference_etree.set("field", field_name)
s3xml_reference_etree.set("resource", field_resource)
s3xml_sub_reference_etree = etree.SubElement(s3xml_reference_etree,
"resource")
s3xml_sub_reference_etree.set("name", field_resource)
s3xml_field_etree = etree.SubElement(s3xml_sub_reference_etree,
"data")
s3xml_field_etree.set("field", "name")
else:
s3xml_field_etree = etree.SubElement(s3xml_resource_etree,
"data")
s3xml_field_etree.set("field", field_name)
#s3xml_field_etree.set("type", field_type)
components = eachfield.getchildren()
numcomponents = len(components)
null_field = False
if numcomponents == 0:
continue
else:
component_type = components[0].tag
if component_type in ("optionbox", "textbox"):
if component_type == "optionbox":
linenum = 0
OCRText = []
OCRValue = []
for eachcomponent in components:
comp_x = float(eachcomponent.attrib.get("x"))
comp_y = float(eachcomponent.attrib.get("y"))
comp_boxes = int(eachcomponent.attrib.get("boxes"))
comp_radius = float(eachcomponent.attrib.get("radius"))
comp_page = int(eachcomponent.attrib.get("page"))
comp_value = str(eachcomponent.attrib.get("value"))
comp_text = str(eachcomponent.text)
try:
page_origin = images[comp_page]["markers"]
except(KeyError):
self.r.error(501,
T("insufficient number of pages provided"))
crop_box = (
int(page_origin[0][0]+\
(comp_x*\
images[comp_page]["scalefactor"]["x"])-\
comp_radius*images[comp_page]["scalefactor"]["x"]),
int(page_origin[0][1]+\
(comp_y*\
images[comp_page]["scalefactor"]["y"])-\
comp_radius*images[comp_page]["scalefactor"]["y"]),
int(page_origin[0][0]+\
(comp_x*\
images[comp_page]["scalefactor"]["x"])+\
comp_radius*images[comp_page]["scalefactor"]["x"]),
int(page_origin[0][1]+\
(comp_y*\
images[comp_page]["scalefactor"]["y"])+\
comp_radius*images[comp_page]["scalefactor"]["y"]),
)
temp_image = images[comp_page]["image"].crop(crop_box)
cropped_image = images[comp_page]["image"].crop(crop_box)
result = self.__ocrIt(cropped_image,
form_uuid,
resourcename,
linenum,
content_type="optionbox",
resource_table=eachresource.attrib.get("name"),
field_name=eachfield.attrib.get("name"),
field_value=comp_value)
if result:
OCRText.append(unicode.strip(comp_text.decode("utf-8")))
OCRValue.append(unicode.strip(comp_value.decode("utf-8")))
linenum+=1
# Store values into xml
if len(OCRValue) in [0, 1]:
uOCRValue = "|".join(OCRValue)
uOCRText = "|".join(OCRText)
else:
uOCRValue = "|%s|" % "|".join(OCRValue)
uOCRText = "|%s|" % "|".join(OCRText)
s3xml_field_etree.set("value", uOCRValue)
s3xml_field_etree.text = uOCRText
if len(OCRValue) == 0:
null_field = True
else:
null_field = False
elif component_type == "textbox":
linenum = 1
if field_type in ["date", "datetime"]:
# Date(Time) Text Box
OCRedValues = {}
comp_count = 1
for eachcomponent in components:
comp_x = float(eachcomponent.attrib.get("x"))
comp_y = float(eachcomponent.attrib.get("y"))
comp_boxes = int(eachcomponent.attrib.get("boxes"))
comp_side = float(eachcomponent.attrib.get("side"))
comp_page = int(eachcomponent.attrib.get("page"))
comp_meta = str(eachcomponent.text)
try:
page_origin = images[comp_page]["markers"]
except(KeyError):
self.r.error(501,
T("insufficient number of pages provided"))
crop_box = (
int(page_origin[0][0]+\
(comp_x*\
images[comp_page]["scalefactor"]["x"])),
int(page_origin[0][1]+\
(comp_y*\
images[comp_page]["scalefactor"]["y"])),
int(page_origin[0][0]+\
(comp_x*\
images[comp_page]["scalefactor"]["x"])+\
comp_side*comp_boxes*images[comp_page]["scalefactor"]["x"]),
int(page_origin[0][1]+\
(comp_y*\
images[comp_page]["scalefactor"]["y"])+\
comp_side*images[comp_page]["scalefactor"]["y"]),
)
cropped_image = images[comp_page]["image"].crop(crop_box)
output = self.__ocrIt(cropped_image,
form_uuid,
resourcename,
linenum,
resource_table=eachresource.attrib.get("name"),
field_name=eachfield.attrib.get("name"),
field_seq=comp_count)
linenum += 1
comp_count += 1
OCRedValues[comp_meta] = unicode.strip(output.decode("utf-8"))
#YYYY
yyyy = datetime.now().year
try:
if int(OCRedValues["YYYY"]) in range(1800, 2300):
yyyy = int(OCRedValues["YYYY"])
except:
pass
if yyyy % 4 == 0:
leapyear = True
else:
leapyear = False
#MO
try:
if int(OCRedValues["MO"]) in range(1, 13):
mo = int(OCRedValues["MO"])
except:
mo = 1
#DD
try:
if int(OCRedValues["DD"]) in range(1, 32):
dd = int(OCRedValues["DD"])
except:
dd = 1
if mo in [4, 6, 9, 11]:
if dd == 31:
dd = 1
elif mo == 2:
if leapyear:
if dd > 29:
dd = 1
else:
if dd > 28:
dd = 1
if field_type == "datetime":
#MM
try:
if int(OCRedValues["MM"]) in range(0, 60):
mm = int(OCRedValues["MM"])
except:
mm = 0
#MM
try:
if int(OCRedValues["HH"]) in range(0, 24):
hh = int(OCRedValues["HH"])
except:
hh = 0
if field_type == "date":
s3xml_field_etree.set("value",
"%s-%s-%s" % (yyyy, mo, dd))
s3xml_field_etree.text =\
"%s-%s-%s" % (yyyy, mo, dd)
elif field_type == "datetime":
utctime = self.__convert_utc(yyyy, mo, dd, hh, mm)
utcftime = utctime.strftime("%Y-%m-%dT%H:%M:%SZ")
s3xml_field_etree.set("value", utcftime)
s3xml_field_etree.text = utcftime
else:
# Normal Text Box
ocrText = ""
comp_count = 1
for eachcomponent in components:
comp_x = float(eachcomponent.attrib.get("x"))
comp_y = float(eachcomponent.attrib.get("y"))
comp_boxes = int(eachcomponent.attrib.get("boxes"))
comp_side = float(eachcomponent.attrib.get("side"))
comp_page = int(eachcomponent.attrib.get("page"))
comp_meta = str(eachcomponent.text)
try:
page_origin = images[comp_page]["markers"]
except(KeyError):
self.r.error(501,
T("insufficient number of pages provided"))
crop_box = (
int(page_origin[0][0]+\
(comp_x*\
images[comp_page]["scalefactor"]["x"])),
int(page_origin[0][1]+\
(comp_y*\
images[comp_page]["scalefactor"]["y"])),
int(page_origin[0][0]+\
(comp_x*\
images[comp_page]["scalefactor"]["x"])+\
comp_side*comp_boxes*images[comp_page]["scalefactor"]["x"]),
int(page_origin[0][1]+\
(comp_y*\
images[comp_page]["scalefactor"]["y"])+\
comp_side*images[comp_page]["scalefactor"]["y"]),
)
cropped_image = images[comp_page]["image"].crop(crop_box)
output = self.__ocrIt(cropped_image,
form_uuid,
resourcename,
linenum,
resource_table=eachresource.attrib.get("name"),
field_name=eachfield.attrib.get("name"),
field_seq=comp_count)
ocrText += output
linenum += 1
comp_count += 1
output = unicode.strip(ocrText.decode("utf-8"))
# Store OCRText
if field_type in ["double", "integer"]:
try:
output = int(self.__strip_spaces(output))
except:
output = 0
s3xml_field_etree.set("value",
"%s" % output)
s3xml_field_etree.text =\
"%s" % output
else:
s3xml_field_etree.text = output
if len("%s" % output) == 0:
null_field = True
else:
null_field = False
else:
continue
if null_field:
if field_is_reference:
s3xml_resource_etree.remove(s3xml_reference_etree)
else:
s3xml_resource_etree.remove(s3xml_field_etree)
output = etree.tostring(s3xml_root_etree, pretty_print=True)
return output
def __strip_spaces(self, text):
""" Remove all spaces from a string """
try:
text = "".join(text.split())
except:
pass
return text
def __convert_utc(self,
yyyy,
mo,
dd,
hh,
mm):
""" Convert local time to UTC """
timetuple = datetime.strptime("%s-%s-%s %s:%s:00" % (yyyy,
mo,
dd,
hh,
mm),
"%Y-%m-%d %H:%M:%S")
auth = current.auth
if auth.user:
utc_offset = auth.user.utc_offset
else:
utc_offset = None
try:
t = utc_offset.split()[1]
if len(t) == 5:
sign = t[0]
hours = t[1:3]
minutes = t[3:5]
tdelta = timedelta(hours=int(hours), minutes=int(minutes))
if sign == "+":
utctime = timetuple - tdelta
elif sign == "-":
utctime = timetuple + tdelta
except:
utctime = timetuple
return utctime
def __ocrIt(self,
image,
form_uuid,
resourcename,
linenum,
content_type="textbox",
**kwargs):
""" put Tesseract to work, actual OCRing will be done here """
db = current.db
ocr_field_crops = "ocr_field_crops"
import uuid
uniqueuuid = uuid.uuid1() # to make it thread safe
resource_table = kwargs.get("resource_table")
field_name = kwargs.get("field_name")
inputfilename = "%s_%s_%s_%s.tif" % (uniqueuuid,
form_uuid,
resourcename,
linenum)
outputfilename = "%s_%s_%s_%s_text" % (uniqueuuid,
form_uuid,
resourcename,
linenum)
ocr_temp_dir = os.path.join(self.r.folder, "uploads", "ocr_temp")
try:
os.mkdir(ocr_temp_dir)
except(OSError):
pass
if content_type == "optionbox":
field_value = kwargs.get("field_value")
imgfilename = "%s.png" % inputfilename[:-3]
imgpath = os.path.join(ocr_temp_dir, imgfilename)
image.save(imgpath)
imgfile = open(imgpath, "r")
db[ocr_field_crops].insert(image_set_uuid=self.set_uuid,
resource_table=resource_table,
field_name=field_name,
image_file=db[ocr_field_crops]["image_file"].store(imgfile,
imgfilename),
value=field_value)
imgfile.close()
os.remove(imgpath)
stat = ImageStat.Stat(image)
#print resource_table, field_name, field_value
if stat.mean[0] < 96 :
return True
else:
return None
elif content_type == "textbox":
field_seq = kwargs.get("field_seq")
inputpath = os.path.join(ocr_temp_dir, inputfilename)
image.save(inputpath)
success =\
subprocess.call(["tesseract", inputpath,
os.path.join(ocr_temp_dir, outputfilename)])
if success != 0:
self.r.error(501, ERROR.TESSERACT_ERROR)
outputpath = os.path.join(ocr_temp_dir, "%s.txt" % outputfilename)
outputfile = open(outputpath)
outputtext = outputfile.read()
outputfile.close()
output = outputtext.replace("\n", " ")
os.remove(outputpath)
imgfilename = "%s.png" % inputfilename[:-3]
imgpath = os.path.join(ocr_temp_dir, imgfilename)
image.save(imgpath)
imgfile = open(imgpath, "r")
db[ocr_field_crops].insert(image_set_uuid=self.set_uuid,
resource_table=resource_table,
field_name=field_name,
image_file=db[ocr_field_crops]["image_file"].store(imgfile,
imgfilename),
sequence=field_seq)
imgfile.close()
os.remove(imgpath)
os.remove(inputpath)
#print resource_table, field_name, field_seq
try:
os.rmdir(ocr_temp_dir)
except(OSError):
import shutil
shutil.rmtree(ocr_temp_dir)
return output
def __convertImage2binary(self, image, threshold = 180):
""" Converts the image into binary based on a threshold. here it is 180"""
image = ImageOps.grayscale(image)
image.convert("L")
width, height = image.size
for x in xrange(width):
for y in xrange(height):
if image.getpixel((x,y)) < 180 :
image.putpixel((x,y), 0)
else:
image.putpixel((x,y), 255)
return image
def __findRegions(self, im):
"""
Return the list of regions which are found by the following algorithm.
-----------------------------------------------------------
Raster Scanning Algorithm for Connected Component Analysis:
-----------------------------------------------------------
On the first pass:
=================
1. Iterate through each element of the data by column, then by row (Raster Scanning)
2. If the element is not the background
1. Get the neighboring elements of the current element
2. If there are no neighbors, uniquely label the current element and continue
3. Otherwise, find the neighbor with the smallest label and assign it to the current element
4. Store the equivalence between neighboring labels
On the second pass:
===================
1. Iterate through each element of the data by column, then by row
2. If the element is not the background
1. Relabel the element with the lowest equivalent label
( source: http://en.wikipedia.org/wiki/Connected_Component_Labeling )
"""
width, height = im.size
ImageOps.grayscale(im)
im = im.convert("L")
regions = {}
pixel_region = [[0 for y in xrange(height)] for x in xrange(width)]
equivalences = {}
n_regions = 0
#first pass. find regions.
for x in xrange(width):
for y in xrange(height):
#look for a black pixel
if im.getpixel((x, y)) == 0 : #BLACK
# get the region number from north or west or create new region
region_n = pixel_region[x-1][y] if x > 0 else 0
region_w = pixel_region[x][y-1] if y > 0 else 0
#region_nw = pixel_region[x-1][y-1] if x > 0 and y > 0 else 0
#region_ne = pixel_region[x-1][y+1] if x > 0 else 0
max_region = max(region_n, region_w)
if max_region > 0:
#a neighbour already has a region, new region is the smallest > 0
new_region = min(filter(lambda i: i > 0, (region_n, region_w)))
#update equivalences
if max_region > new_region:
if max_region in equivalences:
equivalences[max_region].add(new_region)
else:
equivalences[max_region] = set((new_region, ))
else:
n_regions += 1
new_region = n_regions
pixel_region[x][y] = new_region
#Scan image again, assigning all equivalent regions the same region value.
for x in xrange(width):
for y in xrange(height):
r = pixel_region[x][y]
if r > 0:
while r in equivalences:
r = min(equivalences[r])
if r in regions:
regions[r].add(x, y)
else:
regions[r] = self.__Region(x, y)
return list(regions.itervalues())
def __getOrientation(self, markers):
""" Returns orientation of the sheet in radians """
x1, y1 = markers[0]
x2, y2 = markers[2]
try:
slope = ((x2-x1)*1.0) / ((y2-y1)*1.0)
except(ZeroDivisionError):
slope = 999999999999999999999999999
return math.atan(slope)*(180.0/math.pi)*(-1)
def __scaleFactor(self, markers):
""" Returns the scale factors lengthwise and breadthwise """
stdWidth = sum((596, -60))
stdHeight = sum((842, -60))
li = [markers[0], markers[2]]
sf_y = self.__distance(li)/stdHeight
li = [markers[6], markers[2]]
sf_x = self.__distance(li)/stdWidth
return {"x":sf_x, "y":sf_y}
def __distance(self, li):
""" returns the euclidean distance if the input is of the form [(x1, y1), (x2, y2)]"""
return math.sqrt(math.fsum((math.pow(math.fsum((int(li[1][0]), -int(li[0][0]))), 2), math.pow(math.fsum((int(li[1][1]), -int(li[0][1]))), 2))))
def __getMarkers(self, image):
""" Gets the markers on the OCR image """
centers = {}
present = 0
regions = self.__findRegions(image)
for r in regions:
if r.area > 320 and r.aspectratio() < 1.5 and r.aspectratio() > 0.67:
present += 1
centers[present] = r.centroid()
# This is the list of all the markers on the form.
markers = list(centers.itervalues())
markers.sort()
l1 = sorted(markers[0:3], key=lambda y: y[1])
l2 = markers[3:4]
l3 = sorted(markers[4:7], key=lambda y: y[1])
markers = []
markers.extend(l1)
markers.extend(l2)
markers.extend(l3)
#markers.sort(key=lambda x: (x[0], x[1]))
#_debug(markers)
return markers
class __Region():
""" Self explainatory """
def __init__(self, x, y):
""" Initialize the region """
self._pixels = [(x, y)]
self._min_x = x
self._max_x = x
self._min_y = y
self._max_y = y
self.area = 1
def add(self, x, y):
""" Add a pixel to the region """
self._pixels.append((x, y))
self.area += 1
self._min_x = min(self._min_x, x)
self._max_x = max(self._max_x, x)
self._min_y = min(self._min_y, y)
self._max_y = max(self._max_y, y)
def centroid(self):
""" Returns the centroid of the bounding box """
return ((self._min_x + self._max_x)/2 , (self._min_y + self._max_y)/2)
def box(self):
""" Returns the bounding box of the region """
return [ (self._min_x, self._min_y) , (self._max_x, self._max_y)]
def aspectratio(self):
""" Calculating the aspect ratio of the region """
width = self._max_x - self._min_x
length = self._max_y - self._min_y
return float(width)/float(length)
# end S3OCRImageParser
# END =========================================================================
| {
"content_hash": "cefe4a179381ac0de80688565e870b87",
"timestamp": "",
"source": "github",
"line_count": 4402,
"max_line_length": 220,
"avg_line_length": 44.73103134938664,
"alnum_prop": 0.41562471433069587,
"repo_name": "mrGeen/eden",
"id": "a778674eb8f02f7704e794a4edfbfcabad8f2e8e",
"size": "196931",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/s3/s3pdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "727"
},
{
"name": "CSS",
"bytes": "1070670"
},
{
"name": "HTML",
"bytes": "358005"
},
{
"name": "JavaScript",
"bytes": "14790995"
},
{
"name": "NSIS",
"bytes": "3934"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Python",
"bytes": "22735063"
},
{
"name": "XSLT",
"bytes": "1263876"
}
],
"symlink_target": ""
} |
"""
Flask-User
==========
.. image:: https://img.shields.io/pypi/v/Flask-User.svg
:target: https://pypi.python.org/pypi/Flask-User
.. image:: https://img.shields.io/travis/lingthio/Flask-User.svg
:target: https://travis-ci.org/lingthio/Flask-User
.. image:: https://img.shields.io/pypi/dm/Flask-User.svg
:target: https://pypi.python.org/pypi/Flask-User
.. image:: https://img.shields.io/pypi/l/Flask-User.svg
:target: https://pypi.python.org/pypi/Flask-User
Customizable User Account Management for Flask
----------------------------------------------
| So you're writing a Flask web application and would like to authenticate your users.
| You start with a simple **Login** page, but soon enough you need to handle:
* **Registrations** and **Email Confirmations**
* **Change Usernames**, **Change Passwords**, and **Forgotten Passwords**
And wouldn't it be nice to also offer:
* **Role-based Authorization**
* **Remember-me cookies**
* **Multiple emails per user**
* **Internationalization**
| Flask-User offers these user features (and more) out-of-the-box
| while also honoring the following developer needs:
* **Reliable** (Code coverage of over 95%)
* **Secure** (Built on top of widely deployed Flask-Login)
* **Ready to use** (Through sensible defaults)
* **Largely configurable** (Through configuration settings)
* **Fully customizable** (Through customizable functions and email templates)
* **Well documented**
* **Tested on Python 2.6, 2.7, 3.3 and 3.4**
Status
------
| Flask-User v0.5 and v0.6 are quite stable and are used in production environments.
| It is marked as a Beta release because the API is subject to small changes.
| We appreciate it if you would enter issues and
enhancement requests into the `Flask-User Issue Tracker <https://github.com/lingthio/flask-user/issues>`_.
Demo
----
The `Flask-User Demo <https://flask-user-demo.herokuapp.com/>`_ showcases Flask-User.
To protect against spam mis-use, all email features have been disabled.
(If you're the first visitor in the last hour, it may take a few seconds for Heroku to start this service)
Documentation
-------------
`Flask-User Documentation <https://pythonhosted.org/Flask-User/>`_
Revision History
----------------
`Flask-User Revision History <http://pythonhosted.org//Flask-User/index.html#revision-history>`_
Contact Information
-------------------
Ling Thio - ling.thio [at] gmail.com
Acknowledgements
----------------
This project would not be possible without the use of the following amazing offerings:
* `Flask <http://flask.pocoo.org/>`_
* `Flask-Babel <http://babel.pocoo.org/>`_
* `Flask-Login <https://flask-login.readthedocs.org/en/latest/>`_
* `Flask-Mail <http://pythonhosted.org/flask-mail/>`_
* `SQLAlchemy <http://www.sqlalchemy.org/>`_ and `Flask-SQLAlchemy <http://pythonhosted.org/Flask-SQLAlchemy/>`_
* `WTForms <http://wtforms.readthedocs.org/en/latest/>`_ and `Flask-WTF <https://flask-wtf.readthedocs.org/en/latest/>`_
Alternative Flask extensions
----------------------------
* `Flask-Login <https://flask-login.readthedocs.org/en/latest/>`_
* `Flask-Security <https://pythonhosted.org/Flask-Security/>`_
"""
from __future__ import print_function
from setuptools import setup
setup(
name='Flask-User',
version='0.6.8',
url='http://github.com/lingthio/Flask-User',
license='BSD License',
author='Ling Thio',
author_email='ling.thio@gmail.com',
description='Customizable User Account Management for Flask: Register, Confirm email, Login, Change username, Change password, Forgot password and more.',
long_description=__doc__,
keywords='Flask User Registration Email Username Confirmation Password Reset',
packages=['flask_user'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'passlib',
'bcrypt',
'pycrypto',
'Flask',
'Flask-Login',
'Flask-Mail',
'Flask-SQLAlchemy',
'Flask-WTF',
],
test_suite="flask_user.tests.run_tests",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Flask',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: Chinese (Simplified)',
'Natural Language :: Dutch',
'Natural Language :: English',
'Natural Language :: French',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Security',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| {
"content_hash": "a9cb5bbb4149e129537fa817ddbd2367",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 158,
"avg_line_length": 36,
"alnum_prop": 0.6629901960784313,
"repo_name": "sundisee/Flask-User",
"id": "29a394edb65d7ca493daf0d4bae2ab5ea47eb97b",
"size": "4896",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "18344"
},
{
"name": "Python",
"bytes": "198015"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import copy
import os
import re
import sys
from io import BytesIO
from pprint import pformat
from django.conf import settings
from django.core import signing
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import MultiValueDict, ImmutableList
from django.utils.encoding import force_bytes, force_text, force_str, iri_to_uri
from django.utils.six.moves.urllib.parse import parse_qsl, urlencode, quote, urljoin
RAISE_ERROR = object()
absolute_http_url_re = re.compile(r"^https?://", re.I)
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET, self.POST, self.COOKIES, self.META, self.FILES = {}, {}, {}, {}, {}
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
def __repr__(self):
return build_request_repr(self)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
# There is no hostname validation when DEBUG=True
if settings.DEBUG:
return host
domain, port = split_domain_port(host)
if domain and validate_host(domain, settings.ALLOWED_HOSTS):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += "You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += "The domain name provided is not valid according to RFC 1034/1035"
raise DisallowedHost(msg)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (self.path, ('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else '')
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no location is specified, the absolute URI is built on
``request.get_full_path()``.
"""
if not location:
location = self.get_full_path()
if not absolute_http_url_re.match(location):
current_uri = '%s://%s%s' % (self.scheme,
self.get_host(), self.path)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
return 'https' if os.environ.get("HTTPS") == "on" else 'http'
@property
def scheme(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured('The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.')
if self.META.get(header, None) == value:
return 'https'
# Failing that, fall back to _get_scheme(), which is a hook for
# subclasses to implement.
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occured while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occured. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict that takes a query string when initialized.
This is immutable unless you create a copy of it.
Values retrieved from this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
if six.PY3:
if isinstance(query_string, bytes):
# query_string contains URL-encoded data, a subset of ASCII.
query_string = query_string.decode()
for key, value in parse_qsl(query_string or '',
keep_blank_values=True,
encoding=encoding):
self.appendlist(key, value)
else:
for key, value in parse_qsl(query_string or '',
keep_blank_values=True):
self.appendlist(force_text(key, encoding, errors='replace'),
force_text(value, encoding, errors='replace'))
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend([encode(k, force_bytes(v, self.encoding))
for v in list_])
return '&'.join(output)
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except Exception:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except Exception:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except Exception:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except Exception:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
six.text_type(get),
six.text_type(post),
six.text_type(cookies),
six.text_type(meta)))
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
if len(bits) == 2:
return tuple(bits)
return bits[0], ''
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
host = host[:-1] if host.endswith('.') else host
for pattern in allowed_hosts:
pattern = pattern.lower()
match = (
pattern == '*' or
pattern.startswith('.') and (
host.endswith(pattern) or host == pattern[1:]
) or
pattern == host
)
if match:
return True
return False
| {
"content_hash": "e1b3e6720ae52805fba817585cd91742",
"timestamp": "",
"source": "github",
"line_count": 533,
"max_line_length": 135,
"avg_line_length": 36.20637898686679,
"alnum_prop": 0.5884029433101876,
"repo_name": "frederick-masterton/django",
"id": "4f272ec2c7024443b8e636b757b2463069663ae0",
"size": "19298",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/http/request.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import os
import h5py
import numpy
from fuel.converters.base import fill_hdf5_file, check_exists
TRAIN_FILE = 'binarized_mnist_train.amat'
VALID_FILE = 'binarized_mnist_valid.amat'
TEST_FILE = 'binarized_mnist_test.amat'
ALL_FILES = [TRAIN_FILE, VALID_FILE, TEST_FILE]
@check_exists(required_files=ALL_FILES)
def convert_binarized_mnist(directory, output_directory,
output_filename='binarized_mnist.hdf5'):
"""Converts the binarized MNIST dataset to HDF5.
Converts the binarized MNIST dataset used in R. Salakhutdinov's DBN
paper [DBN] to an HDF5 dataset compatible with
:class:`fuel.datasets.BinarizedMNIST`. The converted dataset is
saved as 'binarized_mnist.hdf5'.
This method assumes the existence of the files
`binarized_mnist_{train,valid,test}.amat`, which are accessible
through Hugo Larochelle's website [HUGO].
.. [DBN] Ruslan Salakhutdinov and Iain Murray, *On the Quantitative
Analysis of Deep Belief Networks*, Proceedings of the 25th
international conference on Machine learning, 2008, pp. 872-879.
.. [HUGO] http://www.cs.toronto.edu/~larocheh/public/datasets/
binarized_mnist/binarized_mnist_{train,valid,test}.amat
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'binarized_mnist.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
train_set = numpy.loadtxt(
os.path.join(directory, TRAIN_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
valid_set = numpy.loadtxt(
os.path.join(directory, VALID_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
test_set = numpy.loadtxt(
os.path.join(directory, TEST_FILE)).reshape(
(-1, 1, 28, 28)).astype('uint8')
data = (('train', 'features', train_set),
('valid', 'features', valid_set),
('test', 'features', test_set))
fill_hdf5_file(h5file, data)
for i, label in enumerate(('batch', 'channel', 'height', 'width')):
h5file['features'].dims[i].label = label
h5file.flush()
h5file.close()
return (output_path,)
def fill_subparser(subparser):
"""Sets up a subparser to convert the binarized MNIST dataset files.
Parameters
----------
subparser : :class:`argparse.ArgumentParser`
Subparser handling the `binarized_mnist` command.
"""
subparser.set_defaults(func=convert_binarized_mnist)
| {
"content_hash": "c3ae82e3fae0cede2840d2dee649dcaa",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 74,
"avg_line_length": 32.906976744186046,
"alnum_prop": 0.6586572438162545,
"repo_name": "ejls/fuel",
"id": "7a772927f889fc03d672a20fd477ce8feb25e24d",
"size": "2830",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "fuel/converters/binarized_mnist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "284103"
},
{
"name": "Shell",
"bytes": "342"
}
],
"symlink_target": ""
} |
from swampdragon import route_handler
from swampdragon.route_handler import ModelRouter, BaseRouter
import time
from dataset_manager.enums import ComputingStateType
from dataset_manager.facades import DatasetFacade, VideoFacade
from dataset_manager.dragon_serializers import DatasetSerializer, VideoSerializer, FeaturesSerializer
from dataset_manager.models import Dataset, Video
class DatasetRouter(ModelRouter):
route_name = 'dataset'
serializer_class = DatasetSerializer
model = Dataset
valid_verbs = BaseRouter.valid_verbs + ['prepare_dataset', 'scan_video_folder', 'prepare_videos', 'detect_shot_boundaries', 'extract_features', 'model_arousal']
def create(self, **kwargs):
# Check if name already exists, return error if yes, create else
name = kwargs["name"]
if DatasetFacade.dataset_name_exists(name):
self.send({"error": {"message": "A dataset with this name already exists.", "type": "dataset_name"}})
else:
try:
super(DatasetRouter, self).create(**kwargs)
except:
self.send({"error": {"message": "A problem occured when trying to create the dataset.", "type": "creation_error"}})
def get_object(self, **kwargs):
return self.model.objects.get(pk=kwargs['id'])
def get_query_set(self, **kwargs):
return self.model.objects.all()
def prepare_dataset(self, dataset_id):
DatasetFacade.prepare(dataset_id)
def scan_video_folder(self, dataset_id):
DatasetFacade.scan_video_folder(dataset_id)
def prepare_videos(self, dataset_id, overwrite=False):
DatasetFacade.prepare_videos(dataset_id, overwrite=overwrite)
def detect_shot_boundaries(self, dataset_id, configuration):
DatasetFacade.detect_shot_boundaries(dataset_id, configuration)
def extract_features(self, dataset_id, feature_types, overwrite=False):
DatasetFacade.extract_features(dataset_id, feature_types, overwrite=overwrite)
def model_arousal(self, dataset_id, feature_types, overwrite=False):
DatasetFacade.model_arousal(dataset_id, feature_types, overwrite=overwrite)
class VideoRouter(ModelRouter):
route_name = 'video'
serializer_class = VideoSerializer
model = Video
valid_verbs = BaseRouter.valid_verbs + ['get_feature', 'evaluate_sbd']
def get_object(self, **kwargs):
return self.model.objects.get(pk=kwargs['id'])
def get_query_set(self, **kwargs):
return self.model.objects.filter(dataset__id=kwargs['dataset_id'])
def get_feature(self, video_id, feature_type):
feature = VideoFacade.get_video_feature(video_id, feature_type)
serializer = FeaturesSerializer(instance=feature)
serialized_feature = serializer.serialize()
self.send(serialized_feature)
def evaluate_sbd(self, video_id):
try:
VideoFacade.evaluate_sbd(video_id)
self.send({"state": ComputingStateType.SUCCESS})
except:
self.send({"state": ComputingStateType.FAILED})
# Register routers
route_handler.register(DatasetRouter)
route_handler.register(VideoRouter)
| {
"content_hash": "9c45c1992a9f23827fc8b78ad91b2e4b",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 164,
"avg_line_length": 39.5125,
"alnum_prop": 0.6962986396709901,
"repo_name": "dumoulinj/ers",
"id": "58e1348879b1c0f015810578eaa0d396bd99e53d",
"size": "3161",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ers_backend/dataset_manager/routers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "534"
},
{
"name": "CSS",
"bytes": "22413"
},
{
"name": "CoffeeScript",
"bytes": "72924"
},
{
"name": "HTML",
"bytes": "51302"
},
{
"name": "JavaScript",
"bytes": "102699"
},
{
"name": "Python",
"bytes": "254986"
},
{
"name": "Shell",
"bytes": "2759"
}
],
"symlink_target": ""
} |
import csv
import json
import requests
from django.core.exceptions import ObjectDoesNotExist
from Manager.models import catagories
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""docstring for Command.
There is actually no use for this anymore. This was usefull when
#5 was still in play, now the /manager/form is being used."""
def handle(self, *args, **options):
validator()
def getJSON():
catFiltersCSV = open(
'Manager/management/DatabaseInput/ComBunqWeb-category-filter.csv')
# NOTE: This can be used later for a from on the App itself
reader = csv.reader(catFiltersCSV, delimiter=',', quotechar='"')
keys = next(reader)
catFiltersJSON = [{
key: val for key, val in zip(keys, prop)} for prop in reader]
return catFiltersJSON
def getHeaders():
catFiltersCSV = open(
'Manager/management/DatabaseInput/ComBunqWeb-category''-filter.csv')
# NOTE: This can be used later for a from on the App itself
reader = csv.reader(catFiltersCSV, delimiter=',', quotechar='"')
keys = next(reader)
return list(keys)
def validator():
obj = getJSON()
url = 'https://openiban.com/validate/'
for x in range(len(getJSON())):
for y in getHeaders():
try:
obj[x][y]
# print y,':',obj[x][y]
check = json.loads(requests.get(
"".join([url, obj[x][y]])).content.decode())
if check['valid']:
obj[x][y] = newCatInfo(y, obj[x][y])
isInDatabase(obj[x][y])
except KeyError:
continue
class newCatInfo(object):
"""docstring for newCatInfo."""
def __init__(self, catName, Iban):
super(newCatInfo, self).__init__()
self.catName = catName
self.Iban = Iban
def getIban(self):
return self.Iban
def __str__(self):
return self.catName
def isInDatabase(catInfo):
cat = catagories.objects
catName = str(catInfo)
iban = catInfo.getIban()
try:
cat.get(Naam=catName)
except ObjectDoesNotExist:
# NOTE: create category
cat.create(Naam=catName, Rekening=[iban], regex=[])
else:
editCat = cat.get(Naam=catName)
ibanList = editCat.Rekening
if iban not in ibanList:
ibanList.append(iban)
editCat.save()
| {
"content_hash": "49396ee6bcb1eff229b3d518ddad11d0",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 76,
"avg_line_length": 28.529411764705884,
"alnum_prop": 0.6078350515463917,
"repo_name": "OGKevin/ComBunqWebApp",
"id": "9dc3951fc710c4aa1e8f1a0603003d29e93d86a2",
"size": "2425",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Manager/management/commands/InputDataInDataBase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "93946"
},
{
"name": "HTML",
"bytes": "70648"
},
{
"name": "JavaScript",
"bytes": "29078"
},
{
"name": "Python",
"bytes": "196749"
}
],
"symlink_target": ""
} |
"""
Skype4Py distutils script.
Copyright (c) 2007-2009, Arkadiusz Wahlig
All rights reserved.
Distributed under the BSD License, see the
accompanying LICENSE file for more information.
"""
import os
import sys
from setuptools import setup
from distutils.cmd import Command
from distutils.command.install_lib import install_lib as old_install_lib
VERSION = '1.0.35'
# Change the current dir to where the setup.py is in case we're not there.
path = os.path.split(sys.argv[0])[0]
if path:
os.chdir(path)
# So that the Skype4Py library may know that the setup is running.
sys.skype4py_setup = True
class install_lib(old_install_lib):
"""Handles the 'install_lib' command.
This modified version of install_lib command installs only the necessary
platform modules from the Skype4Py.api subpackage.
"""
def install(self):
# The build is done here, now we have to adapt it to current platform
# before installing.
self.adapt_build_to_platform()
# Let the original method do the hard work of copying the files.
outfiles = old_install_lib.install(self)
# Also byte_compile for distribution usage.
if outfiles is not None:
self.byte_compile(outfiles)
return outfiles
def adapt_build_to_platform(self):
# We have to remove unneeded files from the build directory. First,
# decide what platform we're on; this code is similar to the one
# in Skype4Py/api/__init__.py which decides what submodule to
# import at runtime.
if sys.platform[:3] == 'win':
platform = 'windows'
elif sys.platform == 'darwin':
platform = 'darwin'
else:
platform = 'posix'
# Scan the <build_dir>/Skype4Py/api directory and remove all files
# which names do not start with either '__' (for __init__) or the
# detected platform.
path = os.path.join(self.build_dir, os.path.join('Skype4Py', 'api'))
for name in os.listdir(path):
if not (name.startswith('__') or name.startswith(platform)):
os.remove(os.path.join(path, name))
class build_doc(Command):
"""Handles the 'build_doc' command.
This command builds the documentation using epydoc. The documentation is then
zipped using zipfile standard module.
"""
description = 'build the documentation'
user_options = [('pdf', None, 'Builds a PDF documentation instead of a HTML one.')]
def initialize_options(self):
self.pdf = None
def finalize_options(self):
pass
def run(self):
try:
from epydoc import cli
epydoc_config = os.path.join('doc', 'epydoc.conf')
old_argv = sys.argv[1:]
try:
sys.argv[1:] = ['--config=%s' % epydoc_config]
if self.pdf:
sys.argv.append('--pdf')
sys.argv.append('--output=doc/pdf/')
else:
sys.argv.append('--html')
sys.argv.append('--output=doc/html/')
cli.cli()
finally:
sys.argv[1:] = old_argv
print 'zipping the documentation'
import zipfile
if self.pdf:
doctype = 'pdf'
else:
doctype = 'html'
name = 'Skype4Py-%s-%sdoc' % (VERSION, doctype)
z = zipfile.ZipFile(os.path.join('doc', '%s.zip' % name),
'w', zipfile.ZIP_DEFLATED)
path = os.path.join('doc', doctype)
if self.pdf:
z.write(os.path.join(path, 'api.pdf'), '%s.pdf' % name)
else:
for f in os.listdir(path):
z.write(os.path.join(path, f), os.path.join(name, f))
z.close()
except ImportError:
print >>sys.stderr, 'epydoc not installed, skipping build_doc.'
commands = {'build_doc': build_doc,
'install_lib': install_lib}
desc = open("README.rst").read() + "\n" + open("CHANGES.rst").read()
system_specific_requires = {
'posix': ['dbus', 'gobject'],
}
# start the distutils setup
setup(name='Skype4Py',
version=VERSION,
description='Skype API wrapper for Python.',
long_description=desc,
author='Arkadiusz Wahlig',
author_email='arkadiusz.wahlig@gmail.com',
maintainer='Mikko Ohtamaa',
url='https://github.com/awahlig/skype4py',
license='BSD License',
platforms=['Windows', 'Linux', 'MacOS X'],
packages=['Skype4Py', 'Skype4Py.api', 'Skype4Py.lang'],
provides=['Skype4Py'],
install_requires=['setuptools'],
requires=system_specific_requires.get(os.name, []),
zip_safe=True,
cmdclass=commands)
| {
"content_hash": "46d28e3c7610b3af5f04dd3d2a4319be",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 87,
"avg_line_length": 30.923076923076923,
"alnum_prop": 0.5887230514096186,
"repo_name": "awahlig/skype4py",
"id": "24e47f36b84d9bb3e894003a6ac09a1271c47912",
"size": "4846",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "699548"
}
],
"symlink_target": ""
} |
from freezegun import freeze_time
import io
import os
import unittest
import json
from pathlib import Path
from unittest.mock import patch, Mock
from IdentityRecordedFuture import Actions, Client, period_to_date
import vcr as vcrpy
CASSETTES = Path(__file__).parent / "test_data"
DATETIME_STR_VALUE = "2021-12-08T12:10:21.837Z"
def filter_out_whoami(response):
body = response["body"]["string"]
try:
body.decode("utf-8")
json_blob = json.loads(body)
json_blob.pop("api_key", None)
response["body"]["string"] = json.dumps(json_blob).encode("utf-8")
except UnicodeDecodeError:
pass # It's not a json string
return response
vcr = vcrpy.VCR(
serializer="yaml",
cassette_library_dir=str(CASSETTES),
record_mode="once",
filter_headers=[("X-RFToken", "XXXXXX")],
before_record_response=filter_out_whoami,
)
def util_load_json(path):
with io.open(path, mode="r", encoding="utf-8") as f:
return json.loads(f.read())
def create_client() -> Client:
base_url = "https://api.recordedfuture.com/gw/xsoar/"
verify_ssl = True
token = os.environ.get("RF_TOKEN")
headers = {
"X-RFToken": token,
"X-RF-User-Agent": "Cortex_XSOAR/2.0 Cortex_XSOAR_unittest_0.1",
}
return Client(
base_url=base_url, verify=verify_ssl, headers=headers, proxy=None
)
@vcr.use_cassette()
def test_client_whoami() -> None:
client = create_client()
resp = client.whoami()
assert isinstance(resp, dict) is True
@patch("IdentityRecordedFuture.BaseClient._http_request", return_value={})
def test_identity_search(mock_http_request) -> None:
client = create_client()
resp = client.identity_search(
"fake.com", DATETIME_STR_VALUE, ["Email"], [], 0
)
assert isinstance(resp, dict) is True
def test_period_to_date_none() -> None:
period = "All time"
period_start = period_to_date(period)
assert period_start is None
@freeze_time("2020-01-01")
def test_period_to_date_period() -> None:
period = "3 Months ago"
expected = "2019-10-01T00:00:00.000000Z"
period_start = period_to_date(period)
assert isinstance(period_start, str)
assert period_start == expected
class RFTestIdentity(unittest.TestCase):
def setUp(self) -> None:
self.domains = ["fake1.com"]
self.password_properties = ["Letter", "Number"]
self.period = "3 Months ago"
@patch(
"IdentityRecordedFuture.period_to_date",
return_value=DATETIME_STR_VALUE,
)
def test_identity_search(self, period_to_date_mock) -> None:
"""Test search identities code"""
domain_type = "All"
all_domain_types = ["Email", "Authorization"]
limit_identities = 33
action_prefix = "RecordedFuture.Credentials.SearchIdentities"
search_response = util_load_json(
"./test_data/identity_search_response.json"
)
client = create_client()
client.identity_search = Mock(return_value=search_response)
actions = Actions(client)
action_return = actions.identity_search_command(
self.domains,
self.period,
domain_type,
self.password_properties,
limit_identities,
)
period_to_date_mock.assert_called_once_with(self.period)
client.identity_search.assert_called_once_with(
self.domains,
DATETIME_STR_VALUE,
all_domain_types,
self.password_properties,
limit_identities,
)
self.assertEqual(action_return.outputs_prefix, action_prefix)
self.assertEqual(action_return.outputs, search_response)
@patch(
"IdentityRecordedFuture.period_to_date",
return_value=DATETIME_STR_VALUE,
)
def test_identity_lookup(self, period_to_date_mock):
email_identities = ["realname@fake.com"]
username_identities = [
{"login": "notreal", "domain": "fake1.com"},
{
"login_sha1": "afafa12344afafa12344afafa12344afafa12344",
"domain": "fake1.com",
},
]
sha1_identities = ["afafa12344afafa12344afafa12344afafa12344"]
identities = "realname@fake.com; notreal; afafa12344afafa12344afafa12344afafa12344"
lookup_response = util_load_json(
"./test_data/identity_lookup_response.json"
)
action_prefix = "RecordedFuture.Credentials.Identities"
client = create_client()
client.identity_lookup = Mock(return_value=lookup_response)
actions = Actions(client)
action_return = actions.identity_lookup_command(
identities,
self.period,
self.password_properties,
self.domains,
)
period_to_date_mock.assert_called_once_with(self.period)
client.identity_lookup.assert_called_once_with(
email_identities,
username_identities,
sha1_identities,
DATETIME_STR_VALUE,
self.password_properties,
)
self.assertEqual(action_return.outputs_prefix, action_prefix)
self.assertEqual(action_return.outputs, lookup_response["identities"])
| {
"content_hash": "f621e7d42d59c5a42a2479bd8e641d26",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 91,
"avg_line_length": 31.87878787878788,
"alnum_prop": 0.6279467680608365,
"repo_name": "demisto/content",
"id": "e3f582db9f52c7965c8385e8f4e034771b0a9f42",
"size": "5260",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/IdentityRecordedFuture/Integrations/IdentityRecordedFuture/IdentityRecordedFuture_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47881712"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
import argparse
import itertools
import logging
import uuid
import mock
from oslo_config import cfg
from oslo_config import fixture as config
from oslo_serialization import jsonutils
import requests
import six
from testtools import matchers
from keystoneclient import adapter
from keystoneclient.auth import base
from keystoneclient import exceptions
from keystoneclient.i18n import _
from keystoneclient import session as client_session
from keystoneclient.tests.unit import utils
class SessionTests(utils.TestCase):
TEST_URL = 'http://127.0.0.1:5000/'
def setUp(self):
super(SessionTests, self).setUp()
self.deprecations.expect_deprecations()
def test_get(self):
session = client_session.Session()
self.stub_url('GET', text='response')
resp = session.get(self.TEST_URL)
self.assertEqual('GET', self.requests_mock.last_request.method)
self.assertEqual(resp.text, 'response')
self.assertTrue(resp.ok)
def test_post(self):
session = client_session.Session()
self.stub_url('POST', text='response')
resp = session.post(self.TEST_URL, json={'hello': 'world'})
self.assertEqual('POST', self.requests_mock.last_request.method)
self.assertEqual(resp.text, 'response')
self.assertTrue(resp.ok)
self.assertRequestBodyIs(json={'hello': 'world'})
def test_head(self):
session = client_session.Session()
self.stub_url('HEAD')
resp = session.head(self.TEST_URL)
self.assertEqual('HEAD', self.requests_mock.last_request.method)
self.assertTrue(resp.ok)
self.assertRequestBodyIs('')
def test_put(self):
session = client_session.Session()
self.stub_url('PUT', text='response')
resp = session.put(self.TEST_URL, json={'hello': 'world'})
self.assertEqual('PUT', self.requests_mock.last_request.method)
self.assertEqual(resp.text, 'response')
self.assertTrue(resp.ok)
self.assertRequestBodyIs(json={'hello': 'world'})
def test_delete(self):
session = client_session.Session()
self.stub_url('DELETE', text='response')
resp = session.delete(self.TEST_URL)
self.assertEqual('DELETE', self.requests_mock.last_request.method)
self.assertTrue(resp.ok)
self.assertEqual(resp.text, 'response')
def test_patch(self):
session = client_session.Session()
self.stub_url('PATCH', text='response')
resp = session.patch(self.TEST_URL, json={'hello': 'world'})
self.assertEqual('PATCH', self.requests_mock.last_request.method)
self.assertTrue(resp.ok)
self.assertEqual(resp.text, 'response')
self.assertRequestBodyIs(json={'hello': 'world'})
def test_user_agent(self):
session = client_session.Session(user_agent='test-agent')
self.stub_url('GET', text='response')
resp = session.get(self.TEST_URL)
self.assertTrue(resp.ok)
self.assertRequestHeaderEqual('User-Agent', 'test-agent')
resp = session.get(self.TEST_URL, headers={'User-Agent': 'new-agent'})
self.assertTrue(resp.ok)
self.assertRequestHeaderEqual('User-Agent', 'new-agent')
resp = session.get(self.TEST_URL, headers={'User-Agent': 'new-agent'},
user_agent='overrides-agent')
self.assertTrue(resp.ok)
self.assertRequestHeaderEqual('User-Agent', 'overrides-agent')
def test_http_session_opts(self):
session = client_session.Session(cert='cert.pem', timeout=5,
verify='certs')
FAKE_RESP = utils.test_response(text='resp')
RESP = mock.Mock(return_value=FAKE_RESP)
with mock.patch.object(session.session, 'request', RESP) as mocked:
session.post(self.TEST_URL, data='value')
mock_args, mock_kwargs = mocked.call_args
self.assertEqual(mock_args[0], 'POST')
self.assertEqual(mock_args[1], self.TEST_URL)
self.assertEqual(mock_kwargs['data'], 'value')
self.assertEqual(mock_kwargs['cert'], 'cert.pem')
self.assertEqual(mock_kwargs['verify'], 'certs')
self.assertEqual(mock_kwargs['timeout'], 5)
def test_not_found(self):
session = client_session.Session()
self.stub_url('GET', status_code=404)
self.assertRaises(exceptions.NotFound, session.get, self.TEST_URL)
def test_server_error(self):
session = client_session.Session()
self.stub_url('GET', status_code=500)
self.assertRaises(exceptions.InternalServerError,
session.get, self.TEST_URL)
def test_session_debug_output(self):
"""Test request and response headers in debug logs
in order to redact secure headers while debug is true.
"""
session = client_session.Session(verify=False)
headers = {'HEADERA': 'HEADERVALB'}
security_headers = {'Authorization': uuid.uuid4().hex,
'X-Auth-Token': uuid.uuid4().hex,
'X-Subject-Token': uuid.uuid4().hex, }
body = 'BODYRESPONSE'
data = 'BODYDATA'
all_headers = dict(
itertools.chain(headers.items(), security_headers.items()))
self.stub_url('POST', text=body, headers=all_headers)
resp = session.post(self.TEST_URL, headers=all_headers, data=data)
self.assertEqual(resp.status_code, 200)
self.assertIn('curl', self.logger.output)
self.assertIn('POST', self.logger.output)
self.assertIn('--insecure', self.logger.output)
self.assertIn(body, self.logger.output)
self.assertIn("'%s'" % data, self.logger.output)
for k, v in six.iteritems(headers):
self.assertIn(k, self.logger.output)
self.assertIn(v, self.logger.output)
# Assert that response headers contains actual values and
# only debug logs has been masked
for k, v in six.iteritems(security_headers):
self.assertIn('%s: {SHA1}' % k, self.logger.output)
self.assertEqual(v, resp.headers[k])
self.assertNotIn(v, self.logger.output)
def test_logs_failed_output(self):
"""Test that output is logged even for failed requests"""
session = client_session.Session()
body = uuid.uuid4().hex
self.stub_url('GET', text=body, status_code=400)
resp = session.get(self.TEST_URL, raise_exc=False)
self.assertEqual(resp.status_code, 400)
self.assertIn(body, self.logger.output)
def test_unicode_data_in_debug_output(self):
"""Verify that ascii-encodable data is logged without modification."""
session = client_session.Session(verify=False)
body = 'RESP'
data = u'unicode_data'
self.stub_url('POST', text=body)
session.post(self.TEST_URL, data=data)
self.assertIn("'%s'" % data, self.logger.output)
def test_binary_data_not_in_debug_output(self):
"""Verify that non-ascii-encodable data causes replacement."""
if six.PY2:
data = "my data" + chr(255)
else:
# Python 3 logging handles binary data well.
return
session = client_session.Session(verify=False)
body = 'RESP'
self.stub_url('POST', text=body)
# Forced mixed unicode and byte strings in request
# elements to make sure that all joins are appropriately
# handled (any join of unicode and byte strings should
# raise a UnicodeDecodeError)
session.post(unicode(self.TEST_URL), data=data)
self.assertIn("Replaced characters that could not be decoded"
" in log output", self.logger.output)
# Our data payload should have changed to
# include the replacement char
self.assertIn(u"-d 'my data\ufffd'", self.logger.output)
def test_logging_cacerts(self):
path_to_certs = '/path/to/certs'
session = client_session.Session(verify=path_to_certs)
self.stub_url('GET', text='text')
session.get(self.TEST_URL)
self.assertIn('--cacert', self.logger.output)
self.assertIn(path_to_certs, self.logger.output)
def test_connect_retries(self):
def _timeout_error(request, context):
raise requests.exceptions.Timeout()
self.stub_url('GET', text=_timeout_error)
session = client_session.Session()
retries = 3
with mock.patch('time.sleep') as m:
self.assertRaises(exceptions.RequestTimeout,
session.get,
self.TEST_URL, connect_retries=retries)
self.assertEqual(retries, m.call_count)
# 3 retries finishing with 2.0 means 0.5, 1.0 and 2.0
m.assert_called_with(2.0)
# we count retries so there will be one initial request + 3 retries
self.assertThat(self.requests_mock.request_history,
matchers.HasLength(retries + 1))
def test_uses_tcp_keepalive_by_default(self):
session = client_session.Session()
requests_session = session.session
self.assertIsInstance(requests_session.adapters['http://'],
client_session.TCPKeepAliveAdapter)
self.assertIsInstance(requests_session.adapters['https://'],
client_session.TCPKeepAliveAdapter)
def test_does_not_set_tcp_keepalive_on_custom_sessions(self):
mock_session = mock.Mock()
client_session.Session(session=mock_session)
self.assertFalse(mock_session.mount.called)
def test_ssl_error_message(self):
error = uuid.uuid4().hex
def _ssl_error(request, context):
raise requests.exceptions.SSLError(error)
self.stub_url('GET', text=_ssl_error)
session = client_session.Session()
# The exception should contain the URL and details about the SSL error
msg = _('SSL exception connecting to %(url)s: %(error)s') % {
'url': self.TEST_URL, 'error': error}
six.assertRaisesRegex(self,
exceptions.SSLError,
msg,
session.get,
self.TEST_URL)
def test_mask_password_in_http_log_response(self):
session = client_session.Session()
def fake_debug(msg):
self.assertNotIn('verybadpass', msg)
logger = mock.Mock(isEnabledFor=mock.Mock(return_value=True))
logger.debug = mock.Mock(side_effect=fake_debug)
body = {
"connection_info": {
"driver_volume_type": "iscsi",
"data": {
"auth_password": "verybadpass",
"target_discovered": False,
"encrypted": False,
"qos_specs": None,
"target_iqn": ("iqn.2010-10.org.openstack:volume-"
"744d2085-8e78-40a5-8659-ef3cffb2480e"),
"target_portal": "172.99.69.228:3260",
"volume_id": "744d2085-8e78-40a5-8659-ef3cffb2480e",
"target_lun": 1,
"access_mode": "rw",
"auth_username": "verybadusername",
"auth_method": "CHAP"}}}
body_json = jsonutils.dumps(body)
response = mock.Mock(text=body_json, status_code=200, headers={})
session._http_log_response(response, logger)
self.assertEqual(1, logger.debug.call_count)
class TCPKeepAliveAdapter(utils.TestCase):
@mock.patch.object(client_session, 'socket')
@mock.patch('requests.adapters.HTTPAdapter.init_poolmanager')
def test_init_poolmanager_all_options(self, mock_parent_init_poolmanager,
mock_socket):
# properties expected to be in socket.
mock_socket.TCP_KEEPIDLE = mock.sentinel.TCP_KEEPIDLE
mock_socket.TCP_KEEPCNT = mock.sentinel.TCP_KEEPCNT
mock_socket.TCP_KEEPINTVL = mock.sentinel.TCP_KEEPINTVL
desired_opts = [mock_socket.TCP_KEEPIDLE, mock_socket.TCP_KEEPCNT,
mock_socket.TCP_KEEPINTVL]
adapter = client_session.TCPKeepAliveAdapter()
adapter.init_poolmanager()
call_args, call_kwargs = mock_parent_init_poolmanager.call_args
called_socket_opts = call_kwargs['socket_options']
call_options = [opt for (protocol, opt, value) in called_socket_opts]
for opt in desired_opts:
self.assertIn(opt, call_options)
@mock.patch.object(client_session, 'socket')
@mock.patch('requests.adapters.HTTPAdapter.init_poolmanager')
def test_init_poolmanager(self, mock_parent_init_poolmanager, mock_socket):
spec = ['IPPROTO_TCP', 'TCP_NODELAY', 'SOL_SOCKET', 'SO_KEEPALIVE']
mock_socket.mock_add_spec(spec)
adapter = client_session.TCPKeepAliveAdapter()
adapter.init_poolmanager()
call_args, call_kwargs = mock_parent_init_poolmanager.call_args
called_socket_opts = call_kwargs['socket_options']
call_options = [opt for (protocol, opt, value) in called_socket_opts]
self.assertEqual([mock_socket.TCP_NODELAY, mock_socket.SO_KEEPALIVE],
call_options)
class RedirectTests(utils.TestCase):
REDIRECT_CHAIN = ['http://myhost:3445/',
'http://anotherhost:6555/',
'http://thirdhost/',
'http://finaldestination:55/']
DEFAULT_REDIRECT_BODY = 'Redirect'
DEFAULT_RESP_BODY = 'Found'
def setUp(self):
super(RedirectTests, self).setUp()
self.deprecations.expect_deprecations()
def setup_redirects(self, method='GET', status_code=305,
redirect_kwargs=None, final_kwargs=None):
redirect_kwargs = redirect_kwargs or {}
final_kwargs = final_kwargs or {}
redirect_kwargs.setdefault('text', self.DEFAULT_REDIRECT_BODY)
for s, d in zip(self.REDIRECT_CHAIN, self.REDIRECT_CHAIN[1:]):
self.requests_mock.register_uri(method, s, status_code=status_code,
headers={'Location': d},
**redirect_kwargs)
final_kwargs.setdefault('status_code', 200)
final_kwargs.setdefault('text', self.DEFAULT_RESP_BODY)
self.requests_mock.register_uri(method, self.REDIRECT_CHAIN[-1],
**final_kwargs)
def assertResponse(self, resp):
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.text, self.DEFAULT_RESP_BODY)
def test_basic_get(self):
session = client_session.Session()
self.setup_redirects()
resp = session.get(self.REDIRECT_CHAIN[-2])
self.assertResponse(resp)
def test_basic_post_keeps_correct_method(self):
session = client_session.Session()
self.setup_redirects(method='POST', status_code=301)
resp = session.post(self.REDIRECT_CHAIN[-2])
self.assertResponse(resp)
def test_redirect_forever(self):
session = client_session.Session(redirect=True)
self.setup_redirects()
resp = session.get(self.REDIRECT_CHAIN[0])
self.assertResponse(resp)
self.assertTrue(len(resp.history), len(self.REDIRECT_CHAIN))
def test_no_redirect(self):
session = client_session.Session(redirect=False)
self.setup_redirects()
resp = session.get(self.REDIRECT_CHAIN[0])
self.assertEqual(resp.status_code, 305)
self.assertEqual(resp.url, self.REDIRECT_CHAIN[0])
def test_redirect_limit(self):
self.setup_redirects()
for i in (1, 2):
session = client_session.Session(redirect=i)
resp = session.get(self.REDIRECT_CHAIN[0])
self.assertEqual(resp.status_code, 305)
self.assertEqual(resp.url, self.REDIRECT_CHAIN[i])
self.assertEqual(resp.text, self.DEFAULT_REDIRECT_BODY)
def test_history_matches_requests(self):
self.setup_redirects(status_code=301)
session = client_session.Session(redirect=True)
req_resp = requests.get(self.REDIRECT_CHAIN[0],
allow_redirects=True)
ses_resp = session.get(self.REDIRECT_CHAIN[0])
self.assertEqual(len(req_resp.history), len(ses_resp.history))
for r, s in zip(req_resp.history, ses_resp.history):
self.assertEqual(r.url, s.url)
self.assertEqual(r.status_code, s.status_code)
class ConstructSessionFromArgsTests(utils.TestCase):
KEY = 'keyfile'
CERT = 'certfile'
CACERT = 'cacert-path'
def _s(self, k=None, **kwargs):
k = k or kwargs
with self.deprecations.expect_deprecations_here():
return client_session.Session.construct(k)
def test_verify(self):
self.assertFalse(self._s(insecure=True).verify)
self.assertTrue(self._s(verify=True, insecure=True).verify)
self.assertFalse(self._s(verify=False, insecure=True).verify)
self.assertEqual(self._s(cacert=self.CACERT).verify, self.CACERT)
def test_cert(self):
tup = (self.CERT, self.KEY)
self.assertEqual(self._s(cert=tup).cert, tup)
self.assertEqual(self._s(cert=self.CERT, key=self.KEY).cert, tup)
self.assertIsNone(self._s(key=self.KEY).cert)
def test_pass_through(self):
value = 42 # only a number because timeout needs to be
for key in ['timeout', 'session', 'original_ip', 'user_agent']:
args = {key: value}
self.assertEqual(getattr(self._s(args), key), value)
self.assertNotIn(key, args)
class AuthPlugin(base.BaseAuthPlugin):
"""Very simple debug authentication plugin.
Takes Parameters such that it can throw exceptions at the right times.
"""
TEST_TOKEN = utils.TestCase.TEST_TOKEN
TEST_USER_ID = 'aUser'
TEST_PROJECT_ID = 'aProject'
SERVICE_URLS = {
'identity': {'public': 'http://identity-public:1111/v2.0',
'admin': 'http://identity-admin:1111/v2.0'},
'compute': {'public': 'http://compute-public:2222/v1.0',
'admin': 'http://compute-admin:2222/v1.0'},
'image': {'public': 'http://image-public:3333/v2.0',
'admin': 'http://image-admin:3333/v2.0'}
}
def __init__(self, token=TEST_TOKEN, invalidate=True):
self.token = token
self._invalidate = invalidate
def get_token(self, session):
return self.token
def get_endpoint(self, session, service_type=None, interface=None,
**kwargs):
try:
return self.SERVICE_URLS[service_type][interface]
except (KeyError, AttributeError):
return None
def invalidate(self):
return self._invalidate
def get_user_id(self, session):
return self.TEST_USER_ID
def get_project_id(self, session):
return self.TEST_PROJECT_ID
class CalledAuthPlugin(base.BaseAuthPlugin):
ENDPOINT = 'http://fakeendpoint/'
def __init__(self, invalidate=True):
self.get_token_called = False
self.get_endpoint_called = False
self.endpoint_arguments = {}
self.invalidate_called = False
self._invalidate = invalidate
def get_token(self, session):
self.get_token_called = True
return utils.TestCase.TEST_TOKEN
def get_endpoint(self, session, **kwargs):
self.get_endpoint_called = True
self.endpoint_arguments = kwargs
return self.ENDPOINT
def invalidate(self):
self.invalidate_called = True
return self._invalidate
class SessionAuthTests(utils.TestCase):
TEST_URL = 'http://127.0.0.1:5000/'
TEST_JSON = {'hello': 'world'}
def setUp(self):
super(SessionAuthTests, self).setUp()
self.deprecations.expect_deprecations()
def stub_service_url(self, service_type, interface, path,
method='GET', **kwargs):
base_url = AuthPlugin.SERVICE_URLS[service_type][interface]
uri = "%s/%s" % (base_url.rstrip('/'), path.lstrip('/'))
self.requests_mock.register_uri(method, uri, **kwargs)
def test_auth_plugin_default_with_plugin(self):
self.stub_url('GET', base_url=self.TEST_URL, json=self.TEST_JSON)
# if there is an auth_plugin then it should default to authenticated
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
resp = sess.get(self.TEST_URL)
self.assertDictEqual(resp.json(), self.TEST_JSON)
self.assertRequestHeaderEqual('X-Auth-Token', AuthPlugin.TEST_TOKEN)
def test_auth_plugin_disable(self):
self.stub_url('GET', base_url=self.TEST_URL, json=self.TEST_JSON)
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
resp = sess.get(self.TEST_URL, authenticated=False)
self.assertDictEqual(resp.json(), self.TEST_JSON)
self.assertRequestHeaderEqual('X-Auth-Token', None)
def test_service_type_urls(self):
service_type = 'compute'
interface = 'public'
path = '/instances'
status = 200
body = 'SUCCESS'
self.stub_service_url(service_type=service_type,
interface=interface,
path=path,
status_code=status,
text=body)
sess = client_session.Session(auth=AuthPlugin())
resp = sess.get(path,
endpoint_filter={'service_type': service_type,
'interface': interface})
self.assertEqual(self.requests_mock.last_request.url,
AuthPlugin.SERVICE_URLS['compute']['public'] + path)
self.assertEqual(resp.text, body)
self.assertEqual(resp.status_code, status)
def test_service_url_raises_if_no_auth_plugin(self):
sess = client_session.Session()
self.assertRaises(exceptions.MissingAuthPlugin,
sess.get, '/path',
endpoint_filter={'service_type': 'compute',
'interface': 'public'})
def test_service_url_raises_if_no_url_returned(self):
sess = client_session.Session(auth=AuthPlugin())
self.assertRaises(exceptions.EndpointNotFound,
sess.get, '/path',
endpoint_filter={'service_type': 'unknown',
'interface': 'public'})
def test_raises_exc_only_when_asked(self):
# A request that returns a HTTP error should by default raise an
# exception by default, if you specify raise_exc=False then it will not
self.requests_mock.get(self.TEST_URL, status_code=401)
sess = client_session.Session()
self.assertRaises(exceptions.Unauthorized, sess.get, self.TEST_URL)
resp = sess.get(self.TEST_URL, raise_exc=False)
self.assertEqual(401, resp.status_code)
def test_passed_auth_plugin(self):
passed = CalledAuthPlugin()
sess = client_session.Session()
self.requests_mock.get(CalledAuthPlugin.ENDPOINT + 'path',
status_code=200)
endpoint_filter = {'service_type': 'identity'}
# no plugin with authenticated won't work
self.assertRaises(exceptions.MissingAuthPlugin, sess.get, 'path',
authenticated=True)
# no plugin with an endpoint filter won't work
self.assertRaises(exceptions.MissingAuthPlugin, sess.get, 'path',
authenticated=False, endpoint_filter=endpoint_filter)
resp = sess.get('path', auth=passed, endpoint_filter=endpoint_filter)
self.assertEqual(200, resp.status_code)
self.assertTrue(passed.get_endpoint_called)
self.assertTrue(passed.get_token_called)
def test_passed_auth_plugin_overrides(self):
fixed = CalledAuthPlugin()
passed = CalledAuthPlugin()
sess = client_session.Session(fixed)
self.requests_mock.get(CalledAuthPlugin.ENDPOINT + 'path',
status_code=200)
resp = sess.get('path', auth=passed,
endpoint_filter={'service_type': 'identity'})
self.assertEqual(200, resp.status_code)
self.assertTrue(passed.get_endpoint_called)
self.assertTrue(passed.get_token_called)
self.assertFalse(fixed.get_endpoint_called)
self.assertFalse(fixed.get_token_called)
def test_requests_auth_plugin(self):
sess = client_session.Session()
requests_auth = object()
FAKE_RESP = utils.test_response(text='resp')
RESP = mock.Mock(return_value=FAKE_RESP)
with mock.patch.object(sess.session, 'request', RESP) as mocked:
sess.get(self.TEST_URL, requests_auth=requests_auth)
mocked.assert_called_once_with('GET', self.TEST_URL,
headers=mock.ANY,
allow_redirects=mock.ANY,
auth=requests_auth,
verify=mock.ANY)
def test_reauth_called(self):
auth = CalledAuthPlugin(invalidate=True)
sess = client_session.Session(auth=auth)
self.requests_mock.get(self.TEST_URL,
[{'text': 'Failed', 'status_code': 401},
{'text': 'Hello', 'status_code': 200}])
# allow_reauth=True is the default
resp = sess.get(self.TEST_URL, authenticated=True)
self.assertEqual(200, resp.status_code)
self.assertEqual('Hello', resp.text)
self.assertTrue(auth.invalidate_called)
def test_reauth_not_called(self):
auth = CalledAuthPlugin(invalidate=True)
sess = client_session.Session(auth=auth)
self.requests_mock.get(self.TEST_URL,
[{'text': 'Failed', 'status_code': 401},
{'text': 'Hello', 'status_code': 200}])
self.assertRaises(exceptions.Unauthorized, sess.get, self.TEST_URL,
authenticated=True, allow_reauth=False)
self.assertFalse(auth.invalidate_called)
def test_endpoint_override_overrides_filter(self):
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
override_base = 'http://mytest/'
path = 'path'
override_url = override_base + path
resp_text = uuid.uuid4().hex
self.requests_mock.get(override_url, text=resp_text)
resp = sess.get(path,
endpoint_override=override_base,
endpoint_filter={'service_type': 'identity'})
self.assertEqual(resp_text, resp.text)
self.assertEqual(override_url, self.requests_mock.last_request.url)
self.assertTrue(auth.get_token_called)
self.assertFalse(auth.get_endpoint_called)
def test_endpoint_override_ignore_full_url(self):
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
path = 'path'
url = self.TEST_URL + path
resp_text = uuid.uuid4().hex
self.requests_mock.get(url, text=resp_text)
resp = sess.get(url,
endpoint_override='http://someother.url',
endpoint_filter={'service_type': 'identity'})
self.assertEqual(resp_text, resp.text)
self.assertEqual(url, self.requests_mock.last_request.url)
self.assertTrue(auth.get_token_called)
self.assertFalse(auth.get_endpoint_called)
def test_user_and_project_id(self):
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
self.assertEqual(auth.TEST_USER_ID, sess.get_user_id())
self.assertEqual(auth.TEST_PROJECT_ID, sess.get_project_id())
def test_logger_object_passed(self):
logger = logging.getLogger(uuid.uuid4().hex)
logger.setLevel(logging.DEBUG)
logger.propagate = False
io = six.StringIO()
handler = logging.StreamHandler(io)
logger.addHandler(handler)
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
response = uuid.uuid4().hex
self.stub_url('GET',
text=response,
headers={'Content-Type': 'text/html'})
resp = sess.get(self.TEST_URL, logger=logger)
self.assertEqual(response, resp.text)
output = io.getvalue()
self.assertIn(self.TEST_URL, output)
self.assertIn(response, output)
self.assertNotIn(self.TEST_URL, self.logger.output)
self.assertNotIn(response, self.logger.output)
class AdapterTest(utils.TestCase):
SERVICE_TYPE = uuid.uuid4().hex
SERVICE_NAME = uuid.uuid4().hex
INTERFACE = uuid.uuid4().hex
REGION_NAME = uuid.uuid4().hex
USER_AGENT = uuid.uuid4().hex
VERSION = uuid.uuid4().hex
TEST_URL = CalledAuthPlugin.ENDPOINT
def setUp(self):
super(AdapterTest, self).setUp()
self.deprecations.expect_deprecations()
def _create_loaded_adapter(self):
auth = CalledAuthPlugin()
sess = client_session.Session()
return adapter.Adapter(sess,
auth=auth,
service_type=self.SERVICE_TYPE,
service_name=self.SERVICE_NAME,
interface=self.INTERFACE,
region_name=self.REGION_NAME,
user_agent=self.USER_AGENT,
version=self.VERSION)
def _verify_endpoint_called(self, adpt):
self.assertEqual(self.SERVICE_TYPE,
adpt.auth.endpoint_arguments['service_type'])
self.assertEqual(self.SERVICE_NAME,
adpt.auth.endpoint_arguments['service_name'])
self.assertEqual(self.INTERFACE,
adpt.auth.endpoint_arguments['interface'])
self.assertEqual(self.REGION_NAME,
adpt.auth.endpoint_arguments['region_name'])
self.assertEqual(self.VERSION,
adpt.auth.endpoint_arguments['version'])
def test_setting_variables_on_request(self):
response = uuid.uuid4().hex
self.stub_url('GET', text=response)
adpt = self._create_loaded_adapter()
resp = adpt.get('/')
self.assertEqual(resp.text, response)
self._verify_endpoint_called(adpt)
self.assertTrue(adpt.auth.get_token_called)
self.assertRequestHeaderEqual('User-Agent', self.USER_AGENT)
def test_setting_variables_on_get_endpoint(self):
adpt = self._create_loaded_adapter()
url = adpt.get_endpoint()
self.assertEqual(self.TEST_URL, url)
self._verify_endpoint_called(adpt)
def test_legacy_binding(self):
key = uuid.uuid4().hex
val = uuid.uuid4().hex
response = jsonutils.dumps({key: val})
self.stub_url('GET', text=response)
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.LegacyJsonAdapter(sess,
service_type=self.SERVICE_TYPE,
user_agent=self.USER_AGENT)
resp, body = adpt.get('/')
self.assertEqual(self.SERVICE_TYPE,
auth.endpoint_arguments['service_type'])
self.assertEqual(resp.text, response)
self.assertEqual(val, body[key])
def test_legacy_binding_non_json_resp(self):
response = uuid.uuid4().hex
self.stub_url('GET', text=response,
headers={'Content-Type': 'text/html'})
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.LegacyJsonAdapter(sess,
service_type=self.SERVICE_TYPE,
user_agent=self.USER_AGENT)
resp, body = adpt.get('/')
self.assertEqual(self.SERVICE_TYPE,
auth.endpoint_arguments['service_type'])
self.assertEqual(resp.text, response)
self.assertIsNone(body)
def test_methods(self):
sess = client_session.Session()
adpt = adapter.Adapter(sess)
url = 'http://url'
for method in ['get', 'head', 'post', 'put', 'patch', 'delete']:
with mock.patch.object(adpt, 'request') as m:
getattr(adpt, method)(url)
m.assert_called_once_with(url, method.upper())
def test_setting_endpoint_override(self):
endpoint_override = 'http://overrideurl'
path = '/path'
endpoint_url = endpoint_override + path
auth = CalledAuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.Adapter(sess, endpoint_override=endpoint_override)
response = uuid.uuid4().hex
self.requests_mock.get(endpoint_url, text=response)
resp = adpt.get(path)
self.assertEqual(response, resp.text)
self.assertEqual(endpoint_url, self.requests_mock.last_request.url)
self.assertEqual(endpoint_override, adpt.get_endpoint())
def test_adapter_invalidate(self):
auth = CalledAuthPlugin()
sess = client_session.Session()
adpt = adapter.Adapter(sess, auth=auth)
adpt.invalidate()
self.assertTrue(auth.invalidate_called)
def test_adapter_get_token(self):
auth = CalledAuthPlugin()
sess = client_session.Session()
adpt = adapter.Adapter(sess, auth=auth)
self.assertEqual(self.TEST_TOKEN, adpt.get_token())
self.assertTrue(auth.get_token_called)
def test_adapter_connect_retries(self):
retries = 2
sess = client_session.Session()
adpt = adapter.Adapter(sess, connect_retries=retries)
def _refused_error(request, context):
raise requests.exceptions.ConnectionError()
self.stub_url('GET', text=_refused_error)
with mock.patch('time.sleep') as m:
self.assertRaises(exceptions.ConnectionRefused,
adpt.get, self.TEST_URL)
self.assertEqual(retries, m.call_count)
# we count retries so there will be one initial request + 2 retries
self.assertThat(self.requests_mock.request_history,
matchers.HasLength(retries + 1))
def test_user_and_project_id(self):
auth = AuthPlugin()
sess = client_session.Session()
adpt = adapter.Adapter(sess, auth=auth)
self.assertEqual(auth.TEST_USER_ID, adpt.get_user_id())
self.assertEqual(auth.TEST_PROJECT_ID, adpt.get_project_id())
def test_logger_object_passed(self):
logger = logging.getLogger(uuid.uuid4().hex)
logger.setLevel(logging.DEBUG)
logger.propagate = False
io = six.StringIO()
handler = logging.StreamHandler(io)
logger.addHandler(handler)
auth = AuthPlugin()
sess = client_session.Session(auth=auth)
adpt = adapter.Adapter(sess, auth=auth, logger=logger)
response = uuid.uuid4().hex
self.stub_url('GET', text=response,
headers={'Content-Type': 'text/html'})
resp = adpt.get(self.TEST_URL, logger=logger)
self.assertEqual(response, resp.text)
output = io.getvalue()
self.assertIn(self.TEST_URL, output)
self.assertIn(response, output)
self.assertNotIn(self.TEST_URL, self.logger.output)
self.assertNotIn(response, self.logger.output)
class ConfLoadingTests(utils.TestCase):
GROUP = 'sessiongroup'
def setUp(self):
super(ConfLoadingTests, self).setUp()
self.conf_fixture = self.useFixture(config.Config())
client_session.Session.register_conf_options(self.conf_fixture.conf,
self.GROUP)
def config(self, **kwargs):
kwargs['group'] = self.GROUP
self.conf_fixture.config(**kwargs)
def get_session(self, **kwargs):
with self.deprecations.expect_deprecations_here():
return client_session.Session.load_from_conf_options(
self.conf_fixture.conf,
self.GROUP,
**kwargs)
def test_insecure_timeout(self):
self.config(insecure=True, timeout=5)
s = self.get_session()
self.assertFalse(s.verify)
self.assertEqual(5, s.timeout)
def test_client_certs(self):
cert = '/path/to/certfile'
key = '/path/to/keyfile'
self.config(certfile=cert, keyfile=key)
s = self.get_session()
self.assertTrue(s.verify)
self.assertEqual((cert, key), s.cert)
def test_cacert(self):
cafile = '/path/to/cacert'
self.config(cafile=cafile)
s = self.get_session()
self.assertEqual(cafile, s.verify)
def test_deprecated(self):
def new_deprecated():
return cfg.DeprecatedOpt(uuid.uuid4().hex, group=uuid.uuid4().hex)
opt_names = ['cafile', 'certfile', 'keyfile', 'insecure', 'timeout']
depr = dict([(n, [new_deprecated()]) for n in opt_names])
opts = client_session.Session.get_conf_options(deprecated_opts=depr)
self.assertThat(opt_names, matchers.HasLength(len(opts)))
for opt in opts:
self.assertIn(depr[opt.name][0], opt.deprecated_opts)
class CliLoadingTests(utils.TestCase):
def setUp(self):
super(CliLoadingTests, self).setUp()
self.parser = argparse.ArgumentParser()
client_session.Session.register_cli_options(self.parser)
def get_session(self, val, **kwargs):
args = self.parser.parse_args(val.split())
with self.deprecations.expect_deprecations_here():
return client_session.Session.load_from_cli_options(args, **kwargs)
def test_insecure_timeout(self):
s = self.get_session('--insecure --timeout 5.5')
self.assertFalse(s.verify)
self.assertEqual(5.5, s.timeout)
def test_client_certs(self):
cert = '/path/to/certfile'
key = '/path/to/keyfile'
s = self.get_session('--os-cert %s --os-key %s' % (cert, key))
self.assertTrue(s.verify)
self.assertEqual((cert, key), s.cert)
def test_cacert(self):
cacert = '/path/to/cacert'
s = self.get_session('--os-cacert %s' % cacert)
self.assertEqual(cacert, s.verify)
| {
"content_hash": "4df1f12025ed0ae3d03c99f5ef633ed8",
"timestamp": "",
"source": "github",
"line_count": 1069,
"max_line_length": 79,
"avg_line_length": 36.56969130028064,
"alnum_prop": 0.6004143964392602,
"repo_name": "klmitch/python-keystoneclient",
"id": "b72a1858c78058feb436541828c9d56a84f36a03",
"size": "39639",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystoneclient/tests/unit/test_session.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1230691"
},
{
"name": "Shell",
"bytes": "1776"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as UserAdminBase
from huxley.accounts.models import User
class UserAdmin(UserAdminBase):
model = User
fieldsets = UserAdminBase.fieldsets + (
('BMUN-Specific Information', {
'fields': (
'user_type',
'school',
'committee',
)
}),
)
admin.site.register(User, UserAdmin)
| {
"content_hash": "9041396ef72daf66c6a9cd250f087e94",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 64,
"avg_line_length": 22.65,
"alnum_prop": 0.5894039735099338,
"repo_name": "ctmunwebmaster/huxley",
"id": "5603fe1826f639674e7350567e12c2edb3d255c7",
"size": "625",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "huxley/accounts/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20631"
},
{
"name": "HTML",
"bytes": "5628"
},
{
"name": "JavaScript",
"bytes": "87792"
},
{
"name": "Python",
"bytes": "191860"
},
{
"name": "Shell",
"bytes": "2855"
}
],
"symlink_target": ""
} |
__author__ = 'jeffrey creighton & anand patel'
# Purpose: to collect and store scores from all players and matches
import ScorekeeperHistoryItem
import ScoreKeeperListItem
class Scorekeeper:
leader_board = []
match_history = []
def __init__(self):
self.leader_board = []
self.match_history = []
#updates both the leader_board and match_history
def update_tournament(self, p1, p2, w, s1, s2):
#creates the history item based on the match details
history_item = ScorekeeperHistoryItem.ScorekeeperHistoryItem(p1, p2, w, s1, s2)
#adds the history item to match_history[]
self.match_history.append(history_item)
#creates the list_items for both players and checks to see if they are on the leader_board
#if not present on the leader board, they are appended
list_item_a = ScoreKeeperListItem.ScorekeeperListItem(p1)
list_item_b = ScoreKeeperListItem.ScorekeeperListItem(p2)
if self.check_player(list_item_a):
self.leader_board.append(list_item_a)
if self.check_player(list_item_b):
self.leader_board.append(list_item_b)
#Checks the winner and awards a point to that player
if s1 > s2:
self.make_winner(p1)
elif s2 > s1:
self.make_winner(p2)
#in the unlikely event of a tie, no points awarded
else:
pass
#checks to see if the player is already on the leader board.
#returns true if they are not, false if they are.
def check_player(self, player):
check = True
for i in range(len(self.leader_board)):
if self.leader_board[i] == player.get_player():
check = False
else:
check = True
return check
#update the winner's score
def make_winner(self, player):
for i in range(len(self.leader_board)):
if player is self.leader_board[i]:
player.update_score()
#returns this leader_board
def get_leader_board(self):
return self.leader_board
#returns this match_history
def get_match_history(self):
return self.match_history | {
"content_hash": "981365829cf309a0058a4729684ce641",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 98,
"avg_line_length": 33.36363636363637,
"alnum_prop": 0.628519527702089,
"repo_name": "PaulieC/sprint1_Council",
"id": "811a8b3c88024206042be7c6eb8bfa931154204d",
"size": "2202",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Scorekeeper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46404"
}
],
"symlink_target": ""
} |
<<<<<<< HEAD
<<<<<<< HEAD
"""CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import tkinter
from tkinter.constants import TOP, LEFT, X, W, SUNKEN
import re
from sys import maxsize as INFINITY
from idlelib.configHandler import idleConf
BLOCKOPENERS = {"class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"}
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
=======
"""CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import tkinter
from tkinter.constants import TOP, LEFT, X, W, SUNKEN
import re
from sys import maxsize as INFINITY
from idlelib.configHandler import idleConf
BLOCKOPENERS = {"class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"}
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
"""CodeContext - Extension to display the block context above the edit window
Once code has scrolled off the top of a window, it can be difficult to
determine which block you are in. This extension implements a pane at the top
of each IDLE edit window which provides block structure hints. These hints are
the lines which contain the block opening keywords, e.g. 'if', for the
enclosing block. The number of hint lines is determined by the numlines
variable in the CodeContext section of config-extensions.def. Lines which do
not open blocks are not shown in the context hints pane.
"""
import tkinter
from tkinter.constants import TOP, LEFT, X, W, SUNKEN
import re
from sys import maxsize as INFINITY
from idlelib.configHandler import idleConf
BLOCKOPENERS = {"class", "def", "elif", "else", "except", "finally", "for",
"if", "try", "while", "with"}
UPDATEINTERVAL = 100 # millisec
FONTUPDATEINTERVAL = 1000 # millisec
getspacesfirstword =\
lambda s, c=re.compile(r"^(\s*)(\w*)"): c.match(s).groups()
class CodeContext:
menudefs = [('options', [('!Code Conte_xt', '<<toggle-code-context>>')])]
context_depth = idleConf.GetOption("extensions", "CodeContext",
"numlines", type="int", default=3)
bgcolor = idleConf.GetOption("extensions", "CodeContext",
"bgcolor", type="str", default="LightGray")
fgcolor = idleConf.GetOption("extensions", "CodeContext",
"fgcolor", type="str", default="Black")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
self.textfont = self.text["font"]
self.label = None
# self.info is a list of (line number, indent level, line text, block
# keyword) tuples providing the block structure associated with
# self.topvisible (the linenumber of the line displayed at the top of
# the edit window). self.info[0] is initialized as a 'dummy' line which
# starts the toplevel 'block' of the module.
self.info = [(0, -1, "", False)]
self.topvisible = 1
visible = idleConf.GetOption("extensions", "CodeContext",
"visible", type="bool", default=False)
if visible:
self.toggle_code_context_event()
self.editwin.setvar('<<toggle-code-context>>', True)
# Start two update cycles, one for context lines, one for font changes.
self.text.after(UPDATEINTERVAL, self.timer_event)
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
def toggle_code_context_event(self, event=None):
if not self.label:
# Calculate the border width and horizontal padding required to
# align the context with the text in the main Text widget.
#
# All values are passed through int(str(<value>)), since some
# values may be pixel objects, which can't simply be added to ints.
widgets = self.editwin.text, self.editwin.text_frame
# Calculate the required vertical padding
padx = 0
for widget in widgets:
padx += int(str( widget.pack_info()['padx'] ))
padx += int(str( widget.cget('padx') ))
# Calculate the required border width
border = 0
for widget in widgets:
border += int(str( widget.cget('border') ))
self.label = tkinter.Label(self.editwin.top,
text="\n" * (self.context_depth - 1),
anchor=W, justify=LEFT,
font=self.textfont,
bg=self.bgcolor, fg=self.fgcolor,
width=1, #don't request more than we get
padx=padx, border=border,
relief=SUNKEN)
# Pack the label widget before and above the text_frame widget,
# thus ensuring that it will appear directly above text_frame
self.label.pack(side=TOP, fill=X, expand=False,
before=self.editwin.text_frame)
else:
self.label.destroy()
self.label = None
idleConf.SetOption("extensions", "CodeContext", "visible",
str(self.label is not None))
idleConf.SaveUserCfgFiles()
def get_line_info(self, linenum):
"""Get the line indent value, text, and any block start keyword
If the line does not start a block, the keyword value is False.
The indentation of empty lines (or comment lines) is INFINITY.
"""
text = self.text.get("%d.0" % linenum, "%d.end" % linenum)
spaces, firstword = getspacesfirstword(text)
opener = firstword in BLOCKOPENERS and firstword
if len(text) == len(spaces) or text[len(spaces)] == '#':
indent = INFINITY
else:
indent = len(spaces)
return indent, text, opener
def get_context(self, new_topvisible, stopline=1, stopindent=0):
"""Get context lines, starting at new_topvisible and working backwards.
Stop when stopline or stopindent is reached. Return a tuple of context
data and the indent level at the top of the region inspected.
"""
assert stopline > 0
lines = []
# The indentation level we are currently in:
lastindent = INFINITY
# For a line to be interesting, it must begin with a block opening
# keyword, and have less indentation than lastindent.
for linenum in range(new_topvisible, stopline-1, -1):
indent, text, opener = self.get_line_info(linenum)
if indent < lastindent:
lastindent = indent
if opener in ("else", "elif"):
# We also show the if statement
lastindent += 1
if opener and linenum < new_topvisible and indent >= stopindent:
lines.append((linenum, indent, text, opener))
if lastindent <= stopindent:
break
lines.reverse()
return lines, lastindent
def update_code_context(self):
"""Update context information and lines visible in the context pane.
"""
new_topvisible = int(self.text.index("@0,0").split('.')[0])
if self.topvisible == new_topvisible: # haven't scrolled
return
if self.topvisible < new_topvisible: # scroll down
lines, lastindent = self.get_context(new_topvisible,
self.topvisible)
# retain only context info applicable to the region
# between topvisible and new_topvisible:
while self.info[-1][1] >= lastindent:
del self.info[-1]
elif self.topvisible > new_topvisible: # scroll up
stopindent = self.info[-1][1] + 1
# retain only context info associated
# with lines above new_topvisible:
while self.info[-1][0] >= new_topvisible:
stopindent = self.info[-1][1]
del self.info[-1]
lines, lastindent = self.get_context(new_topvisible,
self.info[-1][0]+1,
stopindent)
self.info.extend(lines)
self.topvisible = new_topvisible
# empty lines in context pane:
context_strings = [""] * max(0, self.context_depth - len(self.info))
# followed by the context hint lines:
context_strings += [x[2] for x in self.info[-self.context_depth:]]
self.label["text"] = '\n'.join(context_strings)
def timer_event(self):
if self.label:
self.update_code_context()
self.text.after(UPDATEINTERVAL, self.timer_event)
def font_timer_event(self):
newtextfont = self.text["font"]
if self.label and newtextfont != self.textfont:
self.textfont = newtextfont
self.label["font"] = self.textfont
self.text.after(FONTUPDATEINTERVAL, self.font_timer_event)
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
| {
"content_hash": "8da1d871bb6f468cef7981ab93bed7d2",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 80,
"avg_line_length": 47.161048689138575,
"alnum_prop": 0.5758815120711563,
"repo_name": "ArcherSys/ArcherSys",
"id": "f56b5c8ec3b6ca8af65f7b20322993a258504ad5",
"size": "25184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/idlelib/CodeContext.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Implements oldconfig functionality.
1. Loads existing .config
2. Prompts for the value of all modifiable symbols/choices that
aren't already set in the .config
3. Writes an updated .config
The default input/output filename is '.config'. A different filename can be
passed in the KCONFIG_CONFIG environment variable.
When overwriting a configuration file, the old version is saved to
<filename>.old (e.g. .config.old).
Entering '?' displays the help text of the symbol/choice, if any.
Unlike 'make oldconfig', this script doesn't print menu titles and comments,
but gives Kconfig definition locations. Printing menus and comments would be
pretty easy to add: Look at the parents of each item, and print all menu
prompts and comments unless they have already been printed (assuming you want
to skip "irrelevant" menus).
"""
from __future__ import print_function
import sys
from kconfiglib import Symbol, Choice, BOOL, TRISTATE, HEX, standard_kconfig
# Python 2/3 compatibility hack
if sys.version_info[0] < 3:
input = raw_input
def _main():
# Earlier symbols in Kconfig files might depend on later symbols and become
# visible if their values change. This flag is set to True if the value of
# any symbol changes, in which case we rerun the oldconfig to check for new
# visible symbols.
global conf_changed
kconf = standard_kconfig(__doc__)
print(kconf.load_config())
while True:
conf_changed = False
for node in kconf.node_iter():
oldconfig(node)
if not conf_changed:
break
print(kconf.write_config())
def oldconfig(node):
"""
Prompts the user for a value if node.item is a visible symbol/choice with
no user value.
"""
# See main()
global conf_changed
# Only symbols and choices can be configured
if not isinstance(node.item, (Symbol, Choice)):
return
# Skip symbols and choices that aren't visible
if not node.item.visibility:
return
# Skip symbols and choices that don't have a prompt (at this location)
if not node.prompt:
return
if isinstance(node.item, Symbol):
sym = node.item
# Skip symbols that already have a user value
if sym.user_value is not None:
return
# Skip symbols that can only have a single value, due to selects
if len(sym.assignable) == 1:
return
# Skip symbols in choices in y mode. We ask once for the entire choice
# instead.
if sym.choice and sym.choice.tri_value == 2:
return
# Loop until the user enters a valid value or enters a blank string
# (for the default value)
while True:
val = input("{} ({}) [{}] ".format(
node.prompt[0], _name_and_loc_str(sym),
_default_value_str(sym)))
if val == "?":
_print_help(node)
continue
# Substitute a blank string with the default value the symbol
# would get
if not val:
val = sym.str_value
# Automatically add a "0x" prefix for hex symbols, like the
# menuconfig interface does. This isn't done when loading .config
# files, hence why set_value() doesn't do it automatically.
if sym.type == HEX and not val.startswith(("0x", "0X")):
val = "0x" + val
old_str_val = sym.str_value
# Kconfiglib itself will print a warning here if the value
# is invalid, so we don't need to bother
if sym.set_value(val):
# Valid value input. We're done with this node.
if sym.str_value != old_str_val:
conf_changed = True
return
else:
choice = node.item
# Skip choices that already have a visible user selection...
if choice.user_selection and choice.user_selection.visibility == 2:
# ...unless there are new visible symbols in the choice. (We know
# they have y (2) visibility in that case, because m-visible
# symbols get demoted to n-visibility in y-mode choices, and the
# user-selected symbol had visibility y.)
for sym in choice.syms:
if sym is not choice.user_selection and sym.visibility and \
sym.user_value is None:
# New visible symbols in the choice
break
else:
# No new visible symbols in the choice
return
# Get a list of available selections. The mode of the choice limits
# the visibility of the choice value symbols, so this will indirectly
# skip choices in n and m mode.
options = [sym for sym in choice.syms if sym.visibility == 2]
if not options:
# No y-visible choice value symbols
return
# Loop until the user enters a valid selection or a blank string (for
# the default selection)
while True:
print("{} ({})".format(node.prompt[0], _name_and_loc_str(choice)))
for i, sym in enumerate(options, 1):
print("{} {}. {} ({})".format(
">" if sym is choice.selection else " ",
i,
# Assume people don't define choice symbols with multiple
# prompts. That generates a warning anyway.
sym.nodes[0].prompt[0],
sym.name))
sel_index = input("choice[1-{}]: ".format(len(options)))
if sel_index == "?":
_print_help(node)
continue
# Pick the default selection if the string is blank
if not sel_index:
choice.selection.set_value(2)
break
try:
sel_index = int(sel_index)
except ValueError:
print("Bad index", file=sys.stderr)
continue
if not 1 <= sel_index <= len(options):
print("Bad index", file=sys.stderr)
continue
# Valid selection
if options[sel_index - 1].tri_value != 2:
conf_changed = True
options[sel_index - 1].set_value(2)
break
# Give all of the non-selected visible choice symbols the user value n.
# This makes it so that the choice is no longer considered new once we
# do additional passes, if the reason that it was considered new was
# that it had new visible choice symbols.
#
# Only giving visible choice symbols the user value n means we will
# prompt for the choice again if later user selections make more new
# choice symbols visible, which is correct.
for sym in choice.syms:
if sym is not choice.user_selection and sym.visibility:
sym.set_value(0)
def _name_and_loc_str(sc):
# Helper for printing the name of the symbol/choice 'sc' along with the
# location(s) in the Kconfig files where it is defined. Unnamed choices
# return "choice" instead of the name.
return "{}, defined at {}".format(
sc.name or "choice",
", ".join("{}:{}".format(node.filename, node.linenr)
for node in sc.nodes))
def _print_help(node):
print("\n" + (node.help or "No help text\n"))
def _default_value_str(sym):
# Returns the "m/M/y" string in e.g.
#
# TRISTATE_SYM prompt (TRISTATE_SYM, defined at Kconfig:9) [n/M/y]:
#
# For string/int/hex, returns the default value as-is.
if sym.type in (BOOL, TRISTATE):
return "/".join(("NMY" if sym.tri_value == tri else "nmy")[tri]
for tri in sym.assignable)
# string/int/hex
return sym.str_value
if __name__ == "__main__":
_main()
| {
"content_hash": "8c3ba98ac55aa0a5e1c35581f744a39b",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 79,
"avg_line_length": 33.12448132780083,
"alnum_prop": 0.5856194413127896,
"repo_name": "gem5/gem5",
"id": "53434b252ff60969f948effd1218bbd4dbc200fd",
"size": "8080",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable",
"path": "ext/Kconfiglib/oldconfig.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
} |
import json
import requests
from glance.openstack.common import uuidutils
from glance.tests import functional
TENANT1 = uuidutils.generate_uuid()
TENANT2 = uuidutils.generate_uuid()
TENANT3 = uuidutils.generate_uuid()
TENANT4 = uuidutils.generate_uuid()
class TestImages(functional.FunctionalTest):
def setUp(self):
super(TestImages, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.start_servers(**self.__dict__.copy())
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_image_lifecycle(self):
# Image list should be empty
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Create an image (with a deployer-defined property)
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json'})
data = json.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar',
'disk_format': 'aki', 'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
image_location_header = response.headers['Location']
# Returned image entity should have a generated id and status
image = json.loads(response.text)
image_id = image['id']
checked_keys = set([
u'status',
u'name',
u'tags',
u'created_at',
u'updated_at',
u'visibility',
u'self',
u'protected',
u'id',
u'file',
u'min_disk',
u'foo',
u'type',
u'min_ram',
u'schema',
u'disk_format',
u'container_format',
])
self.assertEqual(set(image.keys()), checked_keys)
expected_image = {
'status': 'queued',
'name': 'image-1',
'tags': [],
'visibility': 'private',
'self': '/v2/images/%s' % image_id,
'protected': False,
'file': '/v2/images/%s/file' % image_id,
'min_disk': 0,
'foo': 'bar',
'type': 'kernel',
'min_ram': 0,
'schema': '/v2/schemas/image',
}
for key, value in expected_image.items():
self.assertEqual(image[key], value, key)
# Image list should now have one entry
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(1, len(images))
self.assertEqual(images[0]['id'], image_id)
# Get the image using the returned Location header
response = requests.get(image_location_header, headers=self._headers())
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertEqual(image_id, image['id'])
self.assertFalse('checksum' in image)
self.assertFalse('size' in image)
self.assertEqual('bar', image['foo'])
self.assertEqual(False, image['protected'])
self.assertEqual('kernel', image['type'])
self.assertTrue(image['created_at'])
self.assertTrue(image['updated_at'])
self.assertEqual(image['updated_at'], image['created_at'])
# The image should be mutable, including adding and removing properties
path = self._url('/v2/images/%s' % image_id)
media_type = 'application/openstack-images-v2.1-json-patch'
headers = self._headers({'content-type': media_type})
data = json.dumps([
{'op': 'replace', 'path': '/name', 'value': 'image-2'},
{'op': 'replace', 'path': '/disk_format', 'value': 'vhd'},
{'op': 'replace', 'path': '/foo', 'value': 'baz'},
{'op': 'add', 'path': '/ping', 'value': 'pong'},
{'op': 'replace', 'path': '/protected', 'value': True},
{'op': 'remove', 'path': '/type'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned image entity should reflect the changes
image = json.loads(response.text)
self.assertEqual('image-2', image['name'])
self.assertEqual('vhd', image['disk_format'])
self.assertEqual('baz', image['foo'])
self.assertEqual('pong', image['ping'])
self.assertEqual(True, image['protected'])
self.assertFalse('type' in image, response.text)
# Ensure the v2.0 json-patch content type is accepted
path = self._url('/v2/images/%s' % image_id)
media_type = 'application/openstack-images-v2.0-json-patch'
headers = self._headers({'content-type': media_type})
data = json.dumps([{'add': '/ding', 'value': 'dong'}])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned image entity should reflect the changes
image = json.loads(response.text)
self.assertEqual('dong', image['ding'])
# Updates should persist across requests
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertEqual(image_id, image['id'])
self.assertEqual('image-2', image['name'])
self.assertEqual('baz', image['foo'])
self.assertEqual('pong', image['ping'])
self.assertEqual(True, image['protected'])
self.assertFalse('type' in image, response.text)
# Try to download data before its uploaded
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers()
response = requests.get(path, headers=headers)
self.assertEqual(404, response.status_code)
# Upload some image data
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Checksum should be populated and status should be active
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertEqual('8f113e38d28a79a5a451b16048cc2b72', image['checksum'])
self.assertEqual('active', image['status'])
# Try to download the data that was just uploaded
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers()
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
self.assertEqual('8f113e38d28a79a5a451b16048cc2b72',
response.headers['Content-MD5'])
self.assertEqual(response.text, 'ZZZZZ')
# Uploading duplicate data should be rejected with a 409
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='XXX')
self.assertEqual(409, response.status_code)
# Ensure the size is updated to reflect the data uploaded
path = self._url('/v2/images/%s' % image_id)
headers = self._headers()
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
self.assertEqual(5, json.loads(response.text)['size'])
# Deletion should not work on protected images
path = self._url('/v2/images/%s' % image_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(403, response.status_code)
# Unprotect image for deletion
path = self._url('/v2/images/%s' % image_id)
media_type = 'application/openstack-images-v2.1-json-patch'
headers = self._headers({'content-type': media_type})
doc = [{'op': 'replace', 'path': '/protected', 'value': False}]
data = json.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Deletion should work
path = self._url('/v2/images/%s' % image_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This image should be no longer be directly accessible
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# And neither should its data
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers()
response = requests.get(path, headers=headers)
self.assertEqual(404, response.status_code)
# Image list should now be empty
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
self.stop_servers()
def test_permissions(self):
# Create an image that belongs to TENANT1
path = self._url('/v2/images')
headers = self._headers({'Content-Type': 'application/json'})
data = json.dumps({'name': 'image-1', 'disk_format': 'raw',
'container_format': 'bare'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
image_id = json.loads(response.text)['id']
# Upload some image data
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# TENANT1 should see the image in their list
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(image_id, images[0]['id'])
# TENANT1 should be able to access the image directly
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
# TENANT2 should not see the image in their list
path = self._url('/v2/images')
headers = self._headers({'X-Tenant-Id': TENANT2})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# TENANT2 should not be able to access the image directly
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'X-Tenant-Id': TENANT2})
response = requests.get(path, headers=headers)
self.assertEqual(404, response.status_code)
# TENANT2 should not be able to modify the image, either
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({
'Content-Type': 'application/openstack-images-v2.1-json-patch',
'X-Tenant-Id': TENANT2,
})
doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}]
data = json.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(404, response.status_code)
# TENANT2 should not be able to delete the image, either
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'X-Tenant-Id': TENANT2})
response = requests.delete(path, headers=headers)
self.assertEqual(404, response.status_code)
# Publicize the image as an admin of TENANT1
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({
'Content-Type': 'application/openstack-images-v2.1-json-patch',
'X-Roles': 'admin',
})
doc = [{'op': 'replace', 'path': '/visibility', 'value': 'public'}]
data = json.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
# TENANT3 should now see the image in their list
path = self._url('/v2/images')
headers = self._headers({'X-Tenant-Id': TENANT3})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(image_id, images[0]['id'])
# TENANT3 should also be able to access the image directly
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'X-Tenant-Id': TENANT3})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
# TENANT3 still should not be able to modify the image
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({
'Content-Type': 'application/openstack-images-v2.1-json-patch',
'X-Tenant-Id': TENANT3,
})
doc = [{'op': 'replace', 'path': '/name', 'value': 'image-2'}]
data = json.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
# TENANT3 should not be able to delete the image, either
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'X-Tenant-Id': TENANT3})
response = requests.delete(path, headers=headers)
self.assertEqual(403, response.status_code)
# Image data should still be present after the failed delete
path = self._url('/v2/images/%s/file' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
self.assertEqual(response.text, 'ZZZZZ')
self.stop_servers()
def test_property_protections(self):
# Enable property protection
self.api_server.property_protection_file = self.property_file
self.start_servers(**self.__dict__.copy())
# Image list should be empty
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
## Create an image for role member with extra props
# Raises 403 since user is not allowed to set 'foo'
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = json.dumps({'name': 'image-1', 'foo': 'bar',
'disk_format': 'aki', 'container_format': 'aki',
'x_owner_foo': 'o_s_bar'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(403, response.status_code)
## Create an image for role member without 'foo'
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'member'})
data = json.dumps({'name': 'image-1', 'disk_format': 'aki',
'container_format': 'aki',
'x_owner_foo': 'o_s_bar'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Returned image entity should have 'x_owner_foo'
image = json.loads(response.text)
image_id = image['id']
expected_image = {
'status': 'queued',
'name': 'image-1',
'tags': [],
'visibility': 'private',
'self': '/v2/images/%s' % image_id,
'protected': False,
'file': '/v2/images/%s/file' % image_id,
'min_disk': 0,
'x_owner_foo': 'o_s_bar',
'min_ram': 0,
'schema': '/v2/schemas/image',
}
for key, value in expected_image.items():
self.assertEqual(image[key], value, key)
# Create an image for role spl_role with extra props
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json',
'X-Roles': 'spl_role'})
data = json.dumps({'name': 'image-1',
'disk_format': 'aki', 'container_format': 'aki',
'spl_create_prop': 'create_bar',
'spl_read_prop': 'read_bar',
'spl_update_prop': 'update_bar',
'spl_delete_prop': 'delete_bar'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
image = json.loads(response.text)
image_id = image['id']
# Attempt to replace, add and remove properties which are forbidden
path = self._url('/v2/images/%s' % image_id)
media_type = 'application/openstack-images-v2.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = json.dumps([
{'op': 'replace', 'path': '/spl_read_prop', 'value': 'r'},
{'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
# Attempt to replace, add and remove properties which are forbidden
path = self._url('/v2/images/%s' % image_id)
media_type = 'application/openstack-images-v2.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = json.dumps([
{'op': 'add', 'path': '/spl_new_prop', 'value': 'new'},
{'op': 'remove', 'path': '/spl_create_prop'},
{'op': 'remove', 'path': '/spl_delete_prop'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(403, response.status_code, response.text)
# Attempt to replace, add and remove properties
path = self._url('/v2/images/%s' % image_id)
media_type = 'application/openstack-images-v2.1-json-patch'
headers = self._headers({'content-type': media_type,
'X-Roles': 'spl_role'})
data = json.dumps([
{'op': 'replace', 'path': '/spl_update_prop', 'value': 'u'},
{'op': 'remove', 'path': '/spl_delete_prop'},
])
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code, response.text)
# Returned image entity should reflect the changes
image = json.loads(response.text)
# 'spl_update_prop' has update permission for spl_role
# hence the value has changed
self.assertEqual('u', image['spl_update_prop'])
# 'spl_delete_prop' has delete permission for spl_role
# hence the property has been deleted
self.assertTrue('spl_delete_prop' not in image.keys())
# Image Deletion should work
path = self._url('/v2/images/%s' % image_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# This image should be no longer be directly accessible
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(404, response.status_code)
self.stop_servers()
def test_tag_lifecycle(self):
# Create an image with a tag - duplicate should be ignored
path = self._url('/v2/images')
headers = self._headers({'Content-Type': 'application/json'})
data = json.dumps({'name': 'image-1', 'tags': ['sniff', 'sniff']})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
image_id = json.loads(response.text)['id']
# Image should show a list with a single tag
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = json.loads(response.text)['tags']
self.assertEqual(['sniff'], tags)
# Update image with duplicate tag - it should be ignored
path = self._url('/v2/images/%s' % image_id)
media_type = 'application/openstack-images-v2.1-json-patch'
headers = self._headers({'content-type': media_type})
doc = [
{
'op': 'replace',
'path': '/tags',
'value': ['sniff', 'snozz', 'snozz'],
},
]
data = json.dumps(doc)
response = requests.patch(path, headers=headers, data=data)
self.assertEqual(200, response.status_code)
tags = json.loads(response.text)['tags']
self.assertEqual(['snozz', 'sniff'], tags)
# Image should show the appropriate tags
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = json.loads(response.text)['tags']
self.assertEqual(['snozz', 'sniff'], tags)
# Attempt to tag the image with a duplicate should be ignored
path = self._url('/v2/images/%s/tags/snozz' % image_id)
response = requests.put(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Create another more complex tag
path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id)
response = requests.put(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Double-check that the tags container on the image is populated
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = json.loads(response.text)['tags']
self.assertEqual(['gabe@example.com', 'snozz', 'sniff'], tags)
# Query images by single tag
path = self._url('/v2/images?tag=sniff')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(1, len(images))
self.assertEqual('image-1', images[0]['name'])
# Query images by multiple tags
path = self._url('/v2/images?tag=sniff&tag=snozz')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(1, len(images))
self.assertEqual('image-1', images[0]['name'])
# Query images by tag and other attributes
path = self._url('/v2/images?tag=sniff&status=queued')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(1, len(images))
self.assertEqual('image-1', images[0]['name'])
# Query images by tag and a nonexistent tag
path = self._url('/v2/images?tag=sniff&tag=fake')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# The tag should be deletable
path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# List of tags should reflect the deletion
path = self._url('/v2/images/%s' % image_id)
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
tags = json.loads(response.text)['tags']
self.assertEqual(['snozz', 'sniff'], tags)
# Deleting the same tag should return a 404
path = self._url('/v2/images/%s/tags/gabe%%40example.com' % image_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(404, response.status_code)
# The tags won't be able to to query the images after deleting
path = self._url('/v2/images?tag=gabe%%40example.com')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
self.stop_servers()
def test_images_container(self):
# Image list should be empty and no next link should be present
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
first = json.loads(response.text)['first']
self.assertEqual(0, len(images))
self.assertTrue('next' not in json.loads(response.text))
self.assertEqual('/v2/images', first)
# Create 7 images
images = []
fixtures = [
{'name': 'image-3', 'type': 'kernel', 'ping': 'pong'},
{'name': 'image-4', 'type': 'kernel', 'ping': 'pong'},
{'name': 'image-1', 'type': 'kernel', 'ping': 'pong'},
{'name': 'image-3', 'type': 'ramdisk', 'ping': 'pong'},
{'name': 'image-2', 'type': 'kernel', 'ping': 'ding'},
{'name': 'image-3', 'type': 'kernel', 'ping': 'pong'},
{'name': 'image-2', 'type': 'kernel', 'ping': 'pong'},
]
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json'})
for fixture in fixtures:
data = json.dumps(fixture)
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
images.append(json.loads(response.text))
# Image list should contain 7 images
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(7, len(body['images']))
self.assertEqual('/v2/images', body['first'])
self.assertFalse('next' in json.loads(response.text))
# Begin pagination after the first image
template_url = ('/v2/images?limit=2&sort_dir=asc&sort_key=name'
'&marker=%s&type=kernel&ping=pong')
path = self._url(template_url % images[2]['id'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(2, len(body['images']))
response_ids = [image['id'] for image in body['images']]
self.assertEqual([images[6]['id'], images[0]['id']], response_ids)
# Continue pagination using next link from previous request
path = self._url(body['next'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(2, len(body['images']))
response_ids = [image['id'] for image in body['images']]
self.assertEqual([images[5]['id'], images[1]['id']], response_ids)
# Continue pagination - expect no results
path = self._url(body['next'])
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(0, len(body['images']))
# Delete first image
path = self._url('/v2/images/%s' % images[0]['id'])
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
# Ensure bad request for using a deleted image as marker
path = self._url('/v2/images?marker=%s' % images[0]['id'])
response = requests.get(path, headers=self._headers())
self.assertEqual(400, response.status_code)
self.stop_servers()
def test_image_visibility_to_different_users(self):
self.cleanup()
self.api_server.deployment_flavor = 'fakeauth'
self.registry_server.deployment_flavor = 'fakeauth'
self.start_servers(**self.__dict__.copy())
owners = ['admin', 'tenant1', 'tenant2', 'none']
visibilities = ['public', 'private']
for owner in owners:
for visibility in visibilities:
path = self._url('/v2/images')
headers = self._headers({
'content-type': 'application/json',
'X-Auth-Token': 'createuser:%s:admin' % owner,
})
data = json.dumps({
'name': '%s-%s' % (owner, visibility),
'visibility': visibility,
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
def list_images(tenant, role='', visibility=None):
auth_token = 'user:%s:%s' % (tenant, role)
headers = {'X-Auth-Token': auth_token}
path = self._url('/v2/images')
if visibility is not None:
path += '?visibility=%s' % visibility
response = requests.get(path, headers=headers)
self.assertEqual(response.status_code, 200)
return json.loads(response.text)['images']
# 1. Known user sees public and their own images
images = list_images('tenant1')
self.assertEquals(len(images), 5)
for image in images:
self.assertTrue(image['visibility'] == 'public'
or 'tenant1' in image['name'])
# 2. Known user, visibility=public, sees all public images
images = list_images('tenant1', visibility='public')
self.assertEquals(len(images), 4)
for image in images:
self.assertEquals(image['visibility'], 'public')
# 3. Known user, visibility=private, sees only their private image
images = list_images('tenant1', visibility='private')
self.assertEquals(len(images), 1)
image = images[0]
self.assertEquals(image['visibility'], 'private')
self.assertTrue('tenant1' in image['name'])
# 4. Unknown user sees only public images
images = list_images('none')
self.assertEquals(len(images), 4)
for image in images:
self.assertEquals(image['visibility'], 'public')
# 5. Unknown user, visibility=public, sees only public images
images = list_images('none', visibility='public')
self.assertEquals(len(images), 4)
for image in images:
self.assertEquals(image['visibility'], 'public')
# 6. Unknown user, visibility=private, sees no images
images = list_images('none', visibility='private')
self.assertEquals(len(images), 0)
# 7. Unknown admin sees all images
images = list_images('none', role='admin')
self.assertEquals(len(images), 8)
# 8. Unknown admin, visibility=public, shows only public images
images = list_images('none', role='admin', visibility='public')
self.assertEquals(len(images), 4)
for image in images:
self.assertEquals(image['visibility'], 'public')
# 9. Unknown admin, visibility=private, sees only private images
images = list_images('none', role='admin', visibility='private')
self.assertEquals(len(images), 4)
for image in images:
self.assertEquals(image['visibility'], 'private')
# 10. Known admin sees all images
images = list_images('admin', role='admin')
self.assertEquals(len(images), 8)
# 11. Known admin, visibility=public, sees all public images
images = list_images('admin', role='admin', visibility='public')
self.assertEquals(len(images), 4)
for image in images:
self.assertEqual(image['visibility'], 'public')
# 12. Known admin, visibility=private, sees all private images
images = list_images('admin', role='admin', visibility='private')
self.assertEquals(len(images), 4)
for image in images:
self.assertEquals(image['visibility'], 'private')
self.stop_servers()
class TestImageDirectURLVisibility(functional.FunctionalTest):
def setUp(self):
super(TestImageDirectURLVisibility, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_v2_not_enabled(self):
self.api_server.enable_v2_api = False
self.start_servers(**self.__dict__.copy())
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(300, response.status_code)
self.stop_servers()
def test_v2_enabled(self):
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
self.stop_servers()
def test_image_direct_url_visible(self):
self.api_server.show_image_direct_url = True
self.start_servers(**self.__dict__.copy())
# Image list should be empty
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Create an image
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json'})
data = json.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar',
'disk_format': 'aki', 'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the image id
image = json.loads(response.text)
image_id = image['id']
# Image direct_url should not be visible before location is set
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertFalse('direct_url' in image)
# Upload some image data, setting the image location
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Image direct_url should be visible
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertTrue('direct_url' in image)
# Image direct_url should be visible in a list
path = self._url('/v2/images')
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
image = json.loads(response.text)['images'][0]
self.assertTrue('direct_url' in image)
self.stop_servers()
def test_image_multiple_location_url_visible(self):
self.api_server.show_multiple_locations = True
self.start_servers(**self.__dict__.copy())
# Create an image
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json'})
data = json.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar',
'disk_format': 'aki', 'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the image id
image = json.loads(response.text)
image_id = image['id']
# Image locations should not be visible before location is set
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertFalse('locations' in image)
# Upload some image data, setting the image location
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Image locations should be visible
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertTrue('locations' in image)
loc = image['locations']
self.assertTrue(len(loc) > 0)
loc = loc[0]
self.assertTrue('url' in loc)
self.assertTrue('metadata' in loc)
self.stop_servers()
def test_image_direct_url_not_visible(self):
self.api_server.show_image_direct_url = False
self.start_servers(**self.__dict__.copy())
# Image list should be empty
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Create an image
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json'})
data = json.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar',
'disk_format': 'aki', 'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
# Get the image id
image = json.loads(response.text)
image_id = image['id']
# Upload some image data, setting the image location
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data='ZZZZZ')
self.assertEqual(204, response.status_code)
# Image direct_url should not be visible
path = self._url('/v2/images/%s' % image_id)
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
image = json.loads(response.text)
self.assertFalse('direct_url' in image)
# Image direct_url should not be visible in a list
path = self._url('/v2/images')
headers = self._headers({'Content-Type': 'application/json'})
response = requests.get(path, headers=headers)
self.assertEqual(200, response.status_code)
image = json.loads(response.text)['images'][0]
self.assertFalse('direct_url' in image)
self.stop_servers()
class TestImageMembers(functional.FunctionalTest):
def setUp(self):
super(TestImageMembers, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'fakeauth'
self.registry_server.deployment_flavor = 'fakeauth'
self.start_servers(**self.__dict__.copy())
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_image_member_lifecycle(self):
def get_header(tenant, role=''):
auth_token = 'user:%s:%s' % (tenant, role)
headers = {'X-Auth-Token': auth_token}
return headers
# Image list should be empty
path = self._url('/v2/images')
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
owners = ['tenant1', 'tenant2', 'admin']
visibilities = ['public', 'private']
image_fixture = []
for owner in owners:
for visibility in visibilities:
path = self._url('/v2/images')
headers = self._headers({
'content-type': 'application/json',
'X-Auth-Token': 'createuser:%s:admin' % owner,
})
data = json.dumps({
'name': '%s-%s' % (owner, visibility),
'visibility': visibility,
})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
image_fixture.append(json.loads(response.text))
# Image list should contain 4 images for tenant1
path = self._url('/v2/images')
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(4, len(images))
# Image list should contain 3 images for TENANT3
path = self._url('/v2/images')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(3, len(images))
# Add Image member for tenant1-private image
path = self._url('/v2/images/%s/members' % image_fixture[1]['id'])
body = json.dumps({'member': TENANT3})
response = requests.post(path, headers=get_header('tenant1'),
data=body)
self.assertEqual(200, response.status_code)
image_member = json.loads(response.text)
self.assertEqual(image_fixture[1]['id'], image_member['image_id'])
self.assertEqual(TENANT3, image_member['member_id'])
self.assertTrue('created_at' in image_member)
self.assertTrue('updated_at' in image_member)
self.assertEqual('pending', image_member['status'])
# Image list should contain 3 images for TENANT3
path = self._url('/v2/images')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(3, len(images))
# Image list should contain 0 shared images for TENANT3
# becuase default is accepted
path = self._url('/v2/images?visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Image list should contain 4 images for TENANT3 with status pending
path = self._url('/v2/images?member_status=pending')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(4, len(images))
# Image list should contain 4 images for TENANT3 with status all
path = self._url('/v2/images?member_status=all')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(4, len(images))
# Image list should contain 1 image for TENANT3 with status pending
# and visibility shared
path = self._url('/v2/images?member_status=pending&visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(1, len(images))
self.assertEqual(images[0]['name'], 'tenant1-private')
# Image list should contain 0 image for TENANT3 with status rejected
# and visibility shared
path = self._url('/v2/images?member_status=rejected&visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Image list should contain 0 image for TENANT3 with status accepted
# and visibility shared
path = self._url('/v2/images?member_status=accepted&visibility=shared')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Image list should contain 0 image for TENANT3 with status accepted
# and visibility private
path = self._url('/v2/images?visibility=private')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Image tenant2-private's image members list should contain no members
path = self._url('/v2/images/%s/members' % image_fixture[3]['id'])
response = requests.get(path, headers=get_header('tenant2'))
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(0, len(body['members']))
# Tenant 1, who is the owner cannot change status of image
path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'],
TENANT3))
body = json.dumps({'status': 'accepted'})
response = requests.put(path, headers=get_header('tenant1'), data=body)
self.assertEqual(403, response.status_code)
# Tenant 3 can change status of image
path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'],
TENANT3))
body = json.dumps({'status': 'accepted'})
response = requests.put(path, headers=get_header(TENANT3), data=body)
self.assertEqual(200, response.status_code)
image_member = json.loads(response.text)
self.assertEqual(image_fixture[1]['id'], image_member['image_id'])
self.assertEqual(TENANT3, image_member['member_id'])
self.assertEqual('accepted', image_member['status'])
# Image list should contain 4 images for TENANT3 because status is
# accepted
path = self._url('/v2/images')
response = requests.get(path, headers=get_header(TENANT3))
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(4, len(images))
# Tenant 3 invalid status change
path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'],
TENANT3))
body = json.dumps({'status': 'invalid-status'})
response = requests.put(path, headers=get_header(TENANT3), data=body)
self.assertEqual(400, response.status_code)
# Owner cannot change status of image
path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'],
TENANT3))
body = json.dumps({'status': 'accepted'})
response = requests.put(path, headers=get_header('tenant1'), data=body)
self.assertEqual(403, response.status_code)
# Add Image member for tenant2-private image
path = self._url('/v2/images/%s/members' % image_fixture[3]['id'])
body = json.dumps({'member': TENANT4})
response = requests.post(path, headers=get_header('tenant2'),
data=body)
self.assertEqual(200, response.status_code)
image_member = json.loads(response.text)
self.assertEqual(image_fixture[3]['id'], image_member['image_id'])
self.assertEqual(TENANT4, image_member['member_id'])
self.assertTrue('created_at' in image_member)
self.assertTrue('updated_at' in image_member)
# Add Image member to public image
path = self._url('/v2/images/%s/members' % image_fixture[0]['id'])
body = json.dumps({'member': TENANT2})
response = requests.post(path, headers=get_header('tenant1'),
data=body)
self.assertEqual(403, response.status_code)
# Image tenant1-private's members list should contain 1 member
path = self._url('/v2/images/%s/members' % image_fixture[1]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(1, len(body['members']))
# Admin can see any members
path = self._url('/v2/images/%s/members' % image_fixture[1]['id'])
response = requests.get(path, headers=get_header('tenant1', 'admin'))
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(1, len(body['members']))
# Image members not found for private image not owned by TENANT 1
path = self._url('/v2/images/%s/members' % image_fixture[3]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(404, response.status_code)
# Image members forbidden for public image
path = self._url('/v2/images/%s/members' % image_fixture[0]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(403, response.status_code)
# Image Member Cannot delete Image membership
path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'],
TENANT3))
response = requests.delete(path, headers=get_header(TENANT3))
self.assertEqual(403, response.status_code)
# Delete Image member
path = self._url('/v2/images/%s/members/%s' % (image_fixture[1]['id'],
TENANT3))
response = requests.delete(path, headers=get_header('tenant1'))
self.assertEqual(204, response.status_code)
# Now the image has only no members
path = self._url('/v2/images/%s/members' % image_fixture[1]['id'])
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(200, response.status_code)
body = json.loads(response.text)
self.assertEqual(0, len(body['members']))
# Delete Image members not found for public image
path = self._url('/v2/images/%s/members/%s' % (image_fixture[0]['id'],
TENANT3))
response = requests.get(path, headers=get_header('tenant1'))
self.assertEqual(404, response.status_code)
self.stop_servers()
class TestQuotas(functional.FunctionalTest):
def setUp(self):
super(TestQuotas, self).setUp()
self.cleanup()
self.api_server.deployment_flavor = 'noauth'
self.user_storage_quota = 100
self.start_servers(**self.__dict__.copy())
def _url(self, path):
return 'http://127.0.0.1:%d%s' % (self.api_port, path)
def _headers(self, custom_headers=None):
base_headers = {
'X-Identity-Status': 'Confirmed',
'X-Auth-Token': '932c5c84-02ac-4fe5-a9ba-620af0e2bb96',
'X-User-Id': 'f9a41d13-0c13-47e9-bee2-ce4e8bfe958e',
'X-Tenant-Id': TENANT1,
'X-Roles': 'member',
}
base_headers.update(custom_headers or {})
return base_headers
def test_image_upload_under_quota(self):
# Image list should be empty
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Create an image (with a deployer-defined property)
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json'})
data = json.dumps({'name': 'image-2',
'disk_format': 'aki', 'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
image = json.loads(response.text)
image_id = image['id']
# upload data
data = 'x' * (self.user_storage_quota - 1)
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data=data)
self.assertEqual(204, response.status_code)
# Deletion should work
path = self._url('/v2/images/%s' % image_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
def test_image_upload_exceed_quota(self):
# Image list should be empty
path = self._url('/v2/images')
response = requests.get(path, headers=self._headers())
self.assertEqual(200, response.status_code)
images = json.loads(response.text)['images']
self.assertEqual(0, len(images))
# Create an image (with a deployer-defined property)
path = self._url('/v2/images')
headers = self._headers({'content-type': 'application/json'})
data = json.dumps({'name': 'image-1', 'type': 'kernel', 'foo': 'bar',
'disk_format': 'aki', 'container_format': 'aki'})
response = requests.post(path, headers=headers, data=data)
self.assertEqual(201, response.status_code)
image = json.loads(response.text)
image_id = image['id']
# upload data
data = 'x' * (self.user_storage_quota + 1)
path = self._url('/v2/images/%s/file' % image_id)
headers = self._headers({'Content-Type': 'application/octet-stream'})
response = requests.put(path, headers=headers, data=data)
self.assertEqual(413, response.status_code)
path = self._url('/v2/images/%s' % image_id)
response = requests.delete(path, headers=self._headers())
self.assertEqual(204, response.status_code)
| {
"content_hash": "0c2c2be14fd11a86192d2fa56e55b08b",
"timestamp": "",
"source": "github",
"line_count": 1306,
"max_line_length": 79,
"avg_line_length": 44.26033690658499,
"alnum_prop": 0.5939554356099924,
"repo_name": "citrix-openstack-build/glance",
"id": "2e010a5a1cf78cd2e3725bd0e9d9148486780680",
"size": "58485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/functional/v2/test_images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2464002"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
"""Utility to retrieve function args.."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
def _is_bounded_method(fn):
return tf_inspect.ismethod(fn) and (fn.__self__ is not None)
def _is_callable_object(obj):
return hasattr(obj, '__call__') and tf_inspect.ismethod(obj.__call__)
def fn_args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
Raises:
ValueError: if partial function has positionally bound arguments
"""
_, fn = tf_decorator.unwrap(fn)
if isinstance(fn, functools.partial):
args = fn_args(fn.func)
args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])]
else:
if _is_callable_object(fn):
fn = fn.__call__
args = tf_inspect.getfullargspec(fn).args
if _is_bounded_method(fn):
args.remove('self')
return tuple(args)
| {
"content_hash": "3ea9291575e240acf41378bbf388bc94",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 80,
"avg_line_length": 26.093023255813954,
"alnum_prop": 0.6827094474153298,
"repo_name": "Kongsea/tensorflow",
"id": "b31486dfa1122c2549ba3e9f6a730fd26444450a",
"size": "1812",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/estimator/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "198923"
},
{
"name": "C++",
"bytes": "29494349"
},
{
"name": "CMake",
"bytes": "644855"
},
{
"name": "Go",
"bytes": "976410"
},
{
"name": "Java",
"bytes": "409984"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38189"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "270658"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26227666"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373711"
}
],
"symlink_target": ""
} |
__author__ = 'Juan A. Ugalde'
def split_sequence_fragments(sequence, block_size):
"""
Function that takes a sequence of letters (usually a DNA sequence), and returns
fragments of a defined block size
"""
fragments = []
total_size = len(sequence)
for i in range(0, total_size, block_size):
fragments.append(sequence[i:i + block_size])
return fragments
def run_blastn(folder, reference, query, names):
"""
Function that takes a reference file and a query file and run blastn.
The required input is a folder to create the temporal blast database, and
reference and query files in fasta format.
The results will be saved with the name of query versus reference
"""
import os
#Make blastdb
if not os.path.isfile(reference):
print "Reference file: %s not found" % reference
#os.system('formatdb -i %s -p F -n %s/reference' % (reference, folder))
os.system('makeblastdb -in %s -dbtype nucl -out %s/reference' % (reference, folder))
num_processors = 4 # Number of processors to use for Blast
query_name, reference_name = names
blast_output_name = folder + "/" + query_name + "_" + reference_name
#os.system('blastall -p blastn -a %d -d %s/reference -i %s -X 150 -q -1 -F F -m 8 -o %s' %
# (num_processors, folder, query, blast_output_name))
os.system('blastn -num_threads %d -db %s/reference -query %s -xdrop_gap 150 -penalty -1 -dust no -outfmt 6 '
'-gapopen 5 -gapextend 2 -out %s' %
(num_processors, folder, query, blast_output_name))
return blast_output_name
def get_blast_top_hit(blast_file):
"""
Parse the blast file. Select the top hit
"""
blast_results = [line.rstrip() for line in open(blast_file)]
blast_top_hit = {}
for blast_line in blast_results:
best_hit = True
(queryId, subjectId, percIdentity, alnLength, mismatchCount, gapOpenCount, queryStart,
queryEnd, subjectStart, subjectEnd, evalue, bitScore) = blast_line.split("\t")
#get the top hit
if queryId in blast_top_hit:
if float(bitScore) < float(blast_top_hit.get(queryId)[11]):
best_hit = False
if best_hit:
blast_top_hit[queryId] = blast_line.split("\t")
return blast_top_hit
def calculate_ani(blast_results, fragment_length):
"""
Takes the input of the blast results, and calculates the ANI versus the reference genome
"""
sum_identity = float(0)
number_hits = 0 # Number of hits that passed the criteria
total_aligned_bases = 0 # Total of DNA bases that passed the criteria
total_unaligned_fragments = 0
total_unaligned_bases = 0
conserved_dna_bases = 0
for query in blast_results:
identity = blast_results[query][2]
queryEnd = blast_results[query][7]
queryStart = blast_results[query][6]
perc_aln_length = (float(queryEnd) - float(queryStart)) / fragment_length[query]
if float(identity) > float(69.9999) and float(perc_aln_length) > float(0.69999):
sum_identity += float(identity)
number_hits += 1
total_aligned_bases += fragment_length[query]
else:
total_unaligned_fragments += 1
total_unaligned_bases += fragment_length[query]
if float(identity) > float(89.999):
conserved_dna_bases += fragment_length[query]
return sum_identity, number_hits, total_aligned_bases, total_unaligned_fragments, total_unaligned_bases
def average_ani_results(ani_dictionary):
"""
This function takes the dictionary that contains the ani dictionary, take the reference and query
and takes the average between the two results of the combination of reference and query
"""
refined_ani_results = {}
for pair in ani_dictionary:
reference_query_value = ani_dictionary[pair]
reference, query = pair
query_reference_value = ani_dictionary[(query, reference)]
average_value = (reference_query_value + query_reference_value) / 2
if (query, reference) in refined_ani_results:
continue
else:
refined_ani_results[pair] = average_value
return refined_ani_results
def create_distance_matrix(ani_dictionary):
"""
"""
from itertools import count
import numpy as np
data = []
for pair in ani_dictionary:
reference, query = pair
value = 100 - float(ani_dictionary[pair])
data.append([reference, query, value])
data.append([query, reference, value])
rows = dict(zip(sorted(set(line[0] for line in data)), count()))
cols = dict(zip(sorted(set(line[1] for line in data)), count()))
ani_array = np.zeros((len(rows), len(rows)), dtype=float)
for row, col, val in data:
index = (rows[row], cols[col])
ani_array[index] = val
return rows, cols, ani_array
if __name__ == '__main__':
import sys
import shutil
import argparse
import os
import itertools
from Bio import SeqIO
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import scipy.cluster.hierarchy as sch
import scipy.spatial.distance
program_description = "Script that takes a list of genomes, containing the location of the fasta files," \
"and generates a matrix with the ANI values for all the combinations"
parser = argparse.ArgumentParser(description=program_description)
parser.add_argument("-i", "--genome_input_list", type=str, help="List with the genome names and files",
required=True)
parser.add_argument("-o", "--output_directory", type=str, help="Output directory", required=True)
args = parser.parse_args()
#Create output directory
if not os.path.exists(args.output_directory):
os.makedirs(args.output_directory)
#Create temporal folder for the blast analysis
temp_folder = args.output_directory + "/temp"
if not os.path.exists(temp_folder):
os.makedirs(temp_folder)
#Read the genome list:
genome_info = {element[0]: element[1] for element in [line.split("\t") for line in [line.rstrip() for line in open(args.genome_input_list)] if line.strip()]}
#Create log file
log_output = open(args.output_directory + "/logfile.txt", 'w')
mapping_summary = open(args.output_directory + "/mapping_summary.txt", 'w')
mapping_summary.write("Reference\tReference Genome size\t"
"Query\tQuery Genome Size\t"
"Total number fragments\tMapped Fragments\tIdentity\t"
"Mapped Bases\tUnmapped fragments\tUnmapped Bases\n")
#Parameters for blast and fragments
fragment_size = 500
#Create genome combinations for blast analysis
genome_combinations = itertools.permutations(genome_info.keys(), 2)
genome_pair_identity = {} # Results
raw_ani_results = {} # Results of the ANI analysis
for genome_pair in genome_combinations:
reference, query = genome_pair[0], genome_pair[1]
reference_file = genome_info[reference]
query_file = genome_info[query]
#Check that the files exists
if not os.path.isfile(reference_file):
print "The reference fasta for %s was not found" % reference
sys.exit("Check the path for the files")
if not os.path.isfile(query_file):
print "The query fasta for %s was not found" % query
sys.exit("Check the path for the files")
#Create query file, with fragments of 500bp
query_fragments_file = open(temp_folder + "/query.fna", 'w')
fragment_number = 1 # Id of each fragment
genome_query_fragments = 0
fragment_length_dict = {} # Store the size of each fragment
complete_query_genome_size = 0 # Total size of the query genome
trimmed_query_genome_size = 0 # Total size of genome no Ns
for seq_record in SeqIO.parse(query_file, "fasta"):
genome_sequence = seq_record.seq
edited_genome_sequence = (str(genome_sequence)).replace("N", "")
fragments = split_sequence_fragments(edited_genome_sequence, fragment_size)
complete_query_genome_size += len(seq_record.seq)
trimmed_query_genome_size += len(edited_genome_sequence)
genome_query_fragments += len(fragments)
for fragment in fragments:
fragment_name = "Fragment" + str(fragment_number)
query_fragments_file.write(">" + fragment_name + "\n" + str(fragment) + "\n")
fragment_length_dict[fragment_name] = len(fragment)
fragment_number += 1
query_fragments_file.close()
#Print total number of fragments
log_output.write("For the query genome: %s \n" % query)
log_output.write("Genome size: %d \n" % complete_query_genome_size)
log_output.write("Genome size, with no Ns: %d\n" % trimmed_query_genome_size)
log_output.write("Number of fragments: %d \n" % genome_query_fragments)
fragment_query_file = temp_folder + "/query.fna"
#Print information to screen
sys.stderr.write("Running blast of %s versus %s \n" % (reference, query))
sys.stderr.flush()
#Run blast
blast_file = run_blastn(temp_folder, reference_file, fragment_query_file, ("reference", "query"))
#Parse the blast result
blast_top_hit = get_blast_top_hit(blast_file)
sum_identity, number_hits, total_aligned_bases, total_unaligned_fragments, total_unaligned_bases = \
calculate_ani(blast_top_hit, fragment_length_dict)
try:
reference_query_ani = sum_identity / number_hits
except ZeroDivisionError: # Cases were there are no hits
reference_query_ani = 0
#Store the results
raw_ani_results[(reference, query)] = reference_query_ani
#Get the size of the reference genome
reference_genome_size = 0
for seq_record in SeqIO.parse(reference_file, "fasta"):
reference_genome_size += len(seq_record.seq)
results = [reference, str(reference_genome_size), query, str(trimmed_query_genome_size),
str(genome_query_fragments), str(number_hits), str(reference_query_ani),
str(total_aligned_bases), str(total_unaligned_fragments), str(total_unaligned_bases)]
mapping_summary.write("\t".join(results) + "\n")
##Take the average of the reference query values
final_ani_results = average_ani_results(raw_ani_results)
#Generate matrix file
rows, cols, ani_array = create_distance_matrix(final_ani_results)
order_col_labels = sorted(cols, key=cols.get)
#Save matrix file
matrix_file = open(args.output_directory + "/matrix_file.txt", 'w')
matrix_file.write("\t" + "\t".join(order_col_labels) + "\n")
for row_label, row in zip(order_col_labels, ani_array):
matrix_file.write(row_label + "\t" + "\t".join(str(n) for n in row) + "\n")
#Run hierarchical analysis and save the plot
distance_matrix = scipy.spatial.distance.squareform(ani_array)
linkage_matrix = sch.linkage(distance_matrix, method="complete", metric="euclidean") # Method and metric
X = sch.dendrogram(linkage_matrix, labels=order_col_labels, orientation="left")
plt.subplots_adjust(left=0.3)
plt.savefig(args.output_directory + "/ANI_hier_plot.pdf")
#Close final files
log_output.close()
mapping_summary.close()
matrix_file.close()
#Remove the temporal folder
shutil.rmtree(temp_folder)
| {
"content_hash": "0915fe1b4de4d684855644f28f298de9",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 161,
"avg_line_length": 36.09538461538462,
"alnum_prop": 0.640610348648879,
"repo_name": "dacuevas/bioinformatics",
"id": "aad609d5dfacd7973d2a44936ec1e082f90ace95",
"size": "11922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "misc/ANI_blastn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "89452"
},
{
"name": "R",
"bytes": "1839"
}
],
"symlink_target": ""
} |
from typing import Tuple
from controller.app import Application
from controller.deploy.docker import Docker
from controller.utilities import services
SERVICE_NAME = __name__
PASSWORD_VARIABLES = ["ALCHEMY_PASSWORD"]
IS_RUNNING_NEEDED = True
def password(container: Tuple[str, str], old_password: str, new_password: str) -> None:
docker = Docker()
# Interactively:
# \password username
# Non interactively:
# https://ubiq.co/database-blog/how-to-change-user-password-in-postgresql
user = Application.env.get("ALCHEMY_USER")
db = Application.env.get("ALCHEMY_DB")
docker.exec_command(
container,
user=services.get_default_user(SERVICE_NAME),
command=f"""
psql -U {user} -d {db} -c \"
ALTER USER {user} WITH PASSWORD \'{new_password}\';
\"
""",
)
| {
"content_hash": "07dfffa456f074cf9c385ad931482fd6",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 87,
"avg_line_length": 29.551724137931036,
"alnum_prop": 0.646441073512252,
"repo_name": "rapydo/do",
"id": "62bd0e77ad6932b0552fc43782ab55178e9ca35c",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/2.4",
"path": "controller/commands/password_modules/postgres.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jinja",
"bytes": "59123"
},
{
"name": "Python",
"bytes": "551612"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib.auth import get_user_model, authenticate, login, password_validation
from django.contrib.auth.forms import PasswordResetForm
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.core.exceptions import ValidationError
from django.forms import widgets, ModelForm
from django.template.loader import get_template, select_template, render_to_string
from django.utils.html import format_html
from django.utils.translation import gettext_lazy as _
from djng.forms import fields, NgModelFormMixin, NgFormValidationMixin
from djng.styling.bootstrap3.forms import Bootstrap3ModelForm
from post_office import mail as post_office_mail
from post_office.models import EmailTemplate
from shop.conf import app_settings
from shop.forms.base import UniqueEmailValidationMixin
from shop.models.customer import CustomerModel
from shop.signals import email_queued
class RegisterUserForm(NgModelFormMixin, NgFormValidationMixin, UniqueEmailValidationMixin, Bootstrap3ModelForm):
form_name = 'register_user_form'
scope_prefix = 'form_data'
field_css_classes = 'input-group has-feedback'
email = fields.EmailField(
label=_("Your e-mail address"),
widget=widgets.EmailInput(attrs={'placeholder': _("E-mail address")})
)
preset_password = fields.BooleanField(
label=_("Preset password"),
widget=widgets.CheckboxInput(attrs={'class': 'form-check-input'}),
required=False,
help_text=_("Send a randomly generated password to your e-mail address."),
)
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
password1 = fields.CharField(
label=_("New password"),
widget=widgets.PasswordInput(attrs={'placeholder': _("Password")}),
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = fields.CharField(
label=_("New password confirmation"),
strip=False,
widget=widgets.PasswordInput(attrs={'placeholder': _("Password")}),
help_text=format_html('<ul><li>{}</li></ul>', _("Confirm the password.")),
)
class Meta:
model = CustomerModel
fields = ['email', 'password1', 'password2']
def __init__(self, data=None, instance=None, *args, **kwargs):
if data and data.get('preset_password', False):
pwd_length = max(self.base_fields['password1'].min_length or 8, 8)
password = get_user_model().objects.make_random_password(pwd_length)
data['password1'] = data['password2'] = password
super().__init__(data=data, instance=instance, *args, **kwargs)
def clean(self):
cleaned_data = super().clean()
password1 = cleaned_data.get('password1')
password2 = cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2)
return cleaned_data
def save(self, request=None, commit=True):
self.instance.user.is_active = True
self.instance.user.email = self.cleaned_data['email']
self.instance.user.set_password(self.cleaned_data['password1'])
self.instance.recognize_as_registered(request, commit=False)
customer = super().save(commit)
password = self.cleaned_data['password1']
if self.cleaned_data['preset_password']:
self._send_password(request, customer.user, password)
user = authenticate(username=customer.user.username, password=password)
login(request, user)
return customer
def _send_password(self, request, user, password):
current_site = get_current_site(request)
context = {
'site_name': current_site.name,
'absolute_base_uri': request.build_absolute_uri('/'),
'email': user.email,
'password': password,
'user': user,
}
subject_template = select_template([
'{}/email/register-user-subject.txt'.format(app_settings.APP_LABEL),
'shop/email/register-user-subject.txt',
])
# Email subject *must not* contain newlines
subject = ''.join(subject_template.render(context).splitlines())
body_text_template = select_template([
'{}/email/register-user-body.txt'.format(app_settings.APP_LABEL),
'shop/email/register-user-body.txt',
])
body_html_template = select_template([
'{}/email/register-user-body.html'.format(app_settings.APP_LABEL),
'shop/email/register-user-body.html',
], using='post_office')
message = body_text_template.render(context)
html_message = body_html_template.render(context)
from_email = getattr(settings, 'DEFAULT_FROM_EMAIL')
user.email_user(subject, message, from_email=from_email, html_message=html_message)
email_queued()
class ContinueAsGuestForm(ModelForm):
"""
Handles Customer's decision to order as guest.
"""
form_name = 'continue_as_guest_form'
scope_prefix = 'form_data'
class Meta:
model = CustomerModel
fields = () # this form doesn't show any fields
def save(self, request=None, commit=True):
self.instance.recognize_as_guest(request, commit=False)
self.instance.user.is_active = app_settings.GUEST_IS_ACTIVE_USER
if self.instance.user.is_active:
# set a usable password, otherwise the user later can not reset its password
password = get_user_model().objects.make_random_password(length=30)
self.instance.user.set_password(password)
return super().save(commit)
class PasswordResetRequestForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
try:
email_template = EmailTemplate.objects.get(name='password-reset-inform')
except EmailTemplate.DoesNotExist:
subject = render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
if html_email_template_name:
template = get_template(html_email_template_name, using='post_office')
html = template.render(context)
email_message.attach_alternative(html, 'text/html')
template.attach_related(email_message)
email_message.send()
else:
context['user'] = str(context['user'])
context['uid'] = context['uid'].decode('utf-8')
post_office_mail.send(to_email, template=email_template, context=context, render_on_delivery=True)
email_queued()
| {
"content_hash": "ef0bbaecafbc0d997e111ee2e7627a5b",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 113,
"avg_line_length": 43.66867469879518,
"alnum_prop": 0.6538832942474824,
"repo_name": "awesto/django-shop",
"id": "ce06d4ccba75d015e1563adf57b3325e6c3aa8b7",
"size": "7249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shop/forms/auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "107122"
},
{
"name": "JavaScript",
"bytes": "51946"
},
{
"name": "Python",
"bytes": "588560"
}
],
"symlink_target": ""
} |
import os;
import sys;
import time;
# a marker object to indicate not found
class _NotFound (object):
def __init__(self):
pass;
NOT_FOUND = _NotFound();
# a basic lru cache class
class LRUCache (object):
DEBUG_DUMP_ENABLED = False;
@classmethod
def currentTimeMicros(cls):
return long(time.time() * 1000.0 * 1000.0);
# -------------------------------------------------------------------------------------
# Create an LRU Cache. (Set maxSize/Age to 0 to turn off the pruning logic)
# maxSize = maximum number of elements to keep in cache
# maxAgeMs = oldest entry to keep
# sizeElasiticity = allow cache to grow till maxSize + sizeElasticity before pruning
# -------------------------------------------------------------------------------------
def __init__(self, maxSize=32, maxAgeMs=0.0, sizeElasticity=10):
self.maxSize = maxSize;
self.maxAge = long(maxAgeMs * 1000.0);
self.elasticity = sizeElasticity;
self.cache = {}; # the actual cache Table
self.keyTable = {}; # time vs. key
self.timeStampTable = {}; #key vs. time
# set key = value in cache
def put(self, key, value):
if value:
self.cache[key] = value;
self._freshen(key);
self._prune();
# get the value for key from cache (default to supplied value)
def get(self, key, value=None):
ret = self.cache.get(key, NOT_FOUND);
# key was not found return default value
if ret == NOT_FOUND:
return value;
# check if value is within acceptable age
elif not self.maxAge == 0:
ts = self.timeStampTable[key];
age = LRUCache.currentTimeMicros() - ts;
if age > self.maxAge:
self.erase(key);
return value;
# we found the key in cache return it
else:
self._freshen(key);
return ret;
# erase key
def erase(self, key):
if self.has_key(key):
del self.cache[key];
ts = self.timeStampTable[key];
del self.timeStampTable[key];
del self.keyTable[ts];
# check if the key is present
def has_key(self, key):
return self.cache.has_key(key);
# the cache current size
def size(self):
return len(self.cache);
# clear the cache of all elements
def clear(self):
self.cache.clear();
self.keyTable.clear();
self.timeStampTable.clear();
# "freshen" the key i.e. change the timestamp to current time
def _freshen(self, key):
oldTs = self.timeStampTable.get(key, NOT_FOUND);
if not oldTs == NOT_FOUND:
del self.timeStampTable[key];
del self.keyTable[oldTs];
newTs = LRUCache.currentTimeMicros();
self.timeStampTable[key] = newTs;
self.keyTable[newTs] = key;
# we got to clear off all elements in excess of maxSize or maxAge
def _prune(self):
if self.maxSize == 0:
pass;
elif self.size()> (self.maxSize + self.elasticity):
print 'size [%d] is greater than [%d]. pruning...' % (self.size(), self.maxSize + self.elasticity);
toDel = self.size() - self.maxSize;
timeStamps = sorted(self.keyTable.keys());
timeStamps = timeStamps[0:toDel];
for ts in timeStamps:
key = self.keyTable[ts];
del self.keyTable[ts];
del self.timeStampTable[key];
del self.cache[key];
def __str__(self):
ret = 'LRU Cache (cur=%d, max=%d (+%d),maxAge=%f ms)' % (self.size(), self.maxSize, self.elasticity, 0.001 * self.maxAge);
return ret;
#
# a debug method to dump out the state of the cache
#
def dumpState(self, out=sys.stdout):
out.write(str(self)+'\n');
if not LRUCache.DEBUG_DUMP_ENABLED:
return;
if self.size() > 0:
out.write('CACHE\n');
for k,v in self.cache.items():
out.write(' [%s]=[%s]\n'%(k,v));
ts = LRUCache.currentTimeMicros();
out.write('>> KEY TABLE\n');
keys = sorted(self.keyTable.keys());
for k in keys:
out.write(' [%s]=[%s] (AGE=%d usec)\n'%(k,self.keyTable[k], ts - k));
out.write('>> TSTAMP TABLE\n');
for k,v in self.timeStampTable.items():
out.write(' [%s]=[%s]\n'%(k,v));
out.write('\n');
# sample usage
if __name__ == '__main__':
lc = LRUCache(3,0.300,0);
print 'initialized cache : '+str(lc);
for i in range(0,10):
lc.put('key'+str(i),'val'+str(i));
print 'After add #'+str(i+1)+':';
lc.dumpState();
lc.put('key2','val2');
print lc.get('key8');
lc.dumpState(); | {
"content_hash": "23ed0bd796284eb132d00d817c9a308b",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 130,
"avg_line_length": 35.845588235294116,
"alnum_prop": 0.5298461538461539,
"repo_name": "disqus/channels",
"id": "e8c556ed97b143fcbe649f015d713c480b6152ae",
"size": "5808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "channels/contrib/lrucache.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7829"
},
{
"name": "CoffeeScript",
"bytes": "70166"
},
{
"name": "JavaScript",
"bytes": "4347836"
},
{
"name": "Python",
"bytes": "52647"
},
{
"name": "Ruby",
"bytes": "2454"
},
{
"name": "Shell",
"bytes": "1848"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from django.shortcuts import render_to_response, RequestContext, Http404, HttpResponseRedirect, HttpResponse
from cart.models import Cart
from products.models import Featured
def home(request):
featured_products = []
featured = Featured.objects.get_featured_instance()
for i in featured.products.all():
featured_products.append(i)
try:
cart_id = request.session['cart_id']
cart = Cart.objects.get(id=cart_id)
except:
cart = False
cartitems = None
if cart:
cartitems = []
for item in cart.cartitem_set.all():
cartitems.append(item.product)
return render_to_response("home.html", locals(), context_instance=RequestContext(request))
| {
"content_hash": "bdeabc7a6a03a60b1e3196e9c5d620fa",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 108,
"avg_line_length": 26.6,
"alnum_prop": 0.6629072681704261,
"repo_name": "codingforentrepreneurs/marketplace",
"id": "b522641d51e5ce3f8da3a7026acdae269b245fd7",
"size": "798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/market/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42439"
},
{
"name": "HTML",
"bytes": "13438"
},
{
"name": "JavaScript",
"bytes": "108201"
},
{
"name": "Python",
"bytes": "38107"
}
],
"symlink_target": ""
} |
import os
import shutil
import win32api
import win32con
#dir_input = raw_input('Enter dir: ')
dir_input = r"C:\Users\romeroj1\Desktop\Google Drive"
win_trace = ['.svn']
files_removed = 0
def main():
'''Main module'''
for dirname, dirnames, filenames in os.walk(dir_input):
for subdirname in dirnames:
print os.path.join(dirname, subdirname)
for e in win_trace:
if e in dirnames:
delfolder = os.path.join(dirname, e)
setnormalfile(delfolder)
# for item in os.listdir(dir_input):
# if os.path.isfile(os.path.join(dir_input, item)):
# pass
# else:
# subdirs.append(os.path.join(dir_input, item))
#stripjunk(subdirs)
def setnormalfile(dirs):
'''Sets file Read-Only file bit on windows to False or Normal'''
for dirname,dirnames,filenames in os.walk(dirs):
for filename in filenames:
fn = os.path.join(dirname, filename)
win32api.SetFileAttributes(fn, win32con.FILE_ATTRIBUTE_NORMAL)
delete(dirs)
def delete(path):
'''Deletes folder or files'''
try:
print "removing '%s'" % path
shutil.rmtree(path)
#files_removed += 1
#print files_removed
except OSError as e:
print 'Error deleting folder: {0} \n Error: ({1}) : {2}'.format(e.filename, e.errno, e.strerror)
if __name__ == '__main__':
main() | {
"content_hash": "ce8b8253224a93ab1918071a0522d74e",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 105,
"avg_line_length": 29.057692307692307,
"alnum_prop": 0.5698213103904699,
"repo_name": "romeroj1/potential-batman",
"id": "8ad1e81493c7c2985623810c56104b961f4b0af4",
"size": "1695",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/python/delsvnfiles.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9587"
},
{
"name": "JavaScript",
"bytes": "36968"
},
{
"name": "Nix",
"bytes": "502927"
},
{
"name": "Perl",
"bytes": "459"
},
{
"name": "PowerShell",
"bytes": "57504"
},
{
"name": "Python",
"bytes": "91776"
},
{
"name": "Shell",
"bytes": "218531"
},
{
"name": "Visual Basic",
"bytes": "90994"
}
],
"symlink_target": ""
} |
"""Translates gates to a target basis using a given equivalence library."""
import time
import logging
from itertools import zip_longest
from collections import defaultdict
from functools import singledispatch
import retworkx
from qiskit.circuit import Gate, ParameterVector, QuantumRegister, ControlFlowOp, QuantumCircuit
from qiskit.dagcircuit import DAGCircuit
from qiskit.converters import circuit_to_dag, dag_to_circuit
from qiskit.circuit.equivalence import Key
from qiskit.transpiler.basepasses import TransformationPass
from qiskit.transpiler.exceptions import TranspilerError
logger = logging.getLogger(__name__)
class BasisTranslator(TransformationPass):
"""Translates gates to a target basis by searching for a set of translations
from a given EquivalenceLibrary.
This pass operates in several steps:
* Determine the source basis from the input circuit.
* Perform a Dijkstra search over basis sets, starting from the device's
target_basis new gates are being generated using the rules from the provided
EquivalenceLibrary and the search stops if all gates in the source basis have
been generated.
* The found path, as a set of rules from the EquivalenceLibrary, is composed
into a set of gate replacement rules.
* The composed replacement rules are applied in-place to each op node which
is not already in the target_basis.
If the target keyword argument is specified and that
:class:`~qiskit.transpiler.Target` objects contains operations
which are non-global (i.e. they are defined only for a subset of qubits),
as calculated by :meth:`~qiskit.transpiler.Target.get_non_global_operation_names`,
this pass will attempt to match the output translation to those constraints.
For 1 qubit operations this is straightforward, the pass will perform a
search using the union of the set of global operations with the set of operations
defined solely on that qubit. For multi-qubit gates this is a bit more involved,
while the behavior is initially similar to the single qubit case, just using all
the qubits the operation is run on (where order is not significant) isn't sufficient.
We also need to consider any potential local qubits defined on subsets of the
quantum arguments for the multi-qubit operation. This means the target used for the
search of a non-global multi-qubit gate is the union of global operations, non-global
multi-qubit gates sharing the same qubits, and any non-global gates defined on
any subset of the qubits used.
.. note::
In the case of non-global operations it is possible for a single
execution of this pass to output an incomplete translation if any
non-global gates are defined on qubits that are a subset of a larger
multi-qubit gate. For example, if you have a ``u`` gate only defined on
qubit 0 and an ``x`` gate only on qubit 1 it is possible when
translating a 2 qubit operation on qubit 0 and 1 that the output might
have ``u`` on qubit 1 and ``x`` on qubit 0. Typically running this pass
a second time will correct these issues.
"""
def __init__(self, equivalence_library, target_basis, target=None):
"""Initialize a BasisTranslator instance.
Args:
equivalence_library (EquivalenceLibrary): The equivalence library
which will be used by the BasisTranslator pass. (Instructions in
this library will not be unrolled by this pass.)
target_basis (list[str]): Target basis names to unroll to, e.g. `['u3', 'cx']`.
target (Target): The backend compilation target
"""
super().__init__()
self._equiv_lib = equivalence_library
self._target_basis = target_basis
self._target = target
self._non_global_operations = None
self._qargs_with_non_global_operation = {} # pylint: disable=invalid-name
if target is not None:
self._non_global_operations = self._target.get_non_global_operation_names()
self._qargs_with_non_global_operation = defaultdict(set)
for gate in self._non_global_operations:
for qarg in self._target[gate]:
self._qargs_with_non_global_operation[qarg].add(gate)
def run(self, dag):
"""Translate an input DAGCircuit to the target basis.
Args:
dag (DAGCircuit): input dag
Raises:
TranspilerError: if the target basis cannot be reached
Returns:
DAGCircuit: translated circuit.
"""
if self._target_basis is None and self._target is None:
return dag
qarg_indices = {qubit: index for index, qubit in enumerate(dag.qubits)}
# Names of instructions assumed to supported by any backend.
if self._target is None:
basic_instrs = ["measure", "reset", "barrier", "snapshot", "delay"]
target_basis = set(self._target_basis)
source_basis = set(_extract_basis(dag))
qargs_local_source_basis = {}
else:
basic_instrs = ["barrier", "snapshot"]
target_basis = self._target.keys() - set(self._non_global_operations)
source_basis, qargs_local_source_basis = self._extract_basis_target(dag, qarg_indices)
target_basis = set(target_basis).union(basic_instrs)
logger.info(
"Begin BasisTranslator from source basis %s to target basis %s.",
source_basis,
target_basis,
)
# Search for a path from source to target basis.
search_start_time = time.time()
basis_transforms = _basis_search(self._equiv_lib, source_basis, target_basis)
qarg_local_basis_transforms = {}
for qarg, local_source_basis in qargs_local_source_basis.items():
expanded_target = set(target_basis)
# For any multiqubit operation that contains a subset of qubits that
# has a non-local operation, include that non-local operation in the
# search. This matches with the check we did above to include those
# subset non-local operations in the check here.
if len(qarg) > 1:
for non_local_qarg, local_basis in self._qargs_with_non_global_operation.items():
if qarg.issuperset(non_local_qarg):
expanded_target |= local_basis
else:
expanded_target |= self._qargs_with_non_global_operation[tuple(qarg)]
logger.info(
"Performing BasisTranslator search from source basis %s to target "
"basis %s on qarg %s.",
local_source_basis,
expanded_target,
qarg,
)
local_basis_transforms = _basis_search(
self._equiv_lib, local_source_basis, expanded_target
)
if local_basis_transforms is None:
raise TranspilerError(
"Unable to map source basis {} to target basis {} on qarg {} "
"over library {}.".format(
local_source_basis, expanded_target, qarg, self._equiv_lib
)
)
qarg_local_basis_transforms[qarg] = local_basis_transforms
search_end_time = time.time()
logger.info(
"Basis translation path search completed in %.3fs.", search_end_time - search_start_time
)
if basis_transforms is None:
raise TranspilerError(
"Unable to map source basis {} to target basis {} "
"over library {}.".format(source_basis, target_basis, self._equiv_lib)
)
# Compose found path into a set of instruction substitution rules.
compose_start_time = time.time()
instr_map = _compose_transforms(basis_transforms, source_basis, dag)
extra_instr_map = {
qarg: _compose_transforms(transforms, qargs_local_source_basis[qarg], dag)
for qarg, transforms in qarg_local_basis_transforms.items()
}
compose_end_time = time.time()
logger.info(
"Basis translation paths composed in %.3fs.", compose_end_time - compose_start_time
)
# Replace source instructions with target translations.
replace_start_time = time.time()
def apply_translation(dag):
dag_updated = False
for node in dag.op_nodes():
node_qargs = tuple(qarg_indices[bit] for bit in node.qargs)
qubit_set = frozenset(node_qargs)
if node.name in target_basis:
if isinstance(node.op, ControlFlowOp):
flow_blocks = []
for block in node.op.blocks:
dag_block = circuit_to_dag(block)
dag_updated = apply_translation(dag_block)
if dag_updated:
flow_circ_block = dag_to_circuit(dag_block)
else:
flow_circ_block = block
flow_blocks.append(flow_circ_block)
node.op = node.op.replace_blocks(flow_blocks)
continue
if (
node_qargs in self._qargs_with_non_global_operation
and node.name in self._qargs_with_non_global_operation[node_qargs]
):
continue
if dag.has_calibration_for(node):
continue
if qubit_set in extra_instr_map:
self._replace_node(dag, node, extra_instr_map[qubit_set])
elif (node.op.name, node.op.num_qubits) in instr_map:
self._replace_node(dag, node, instr_map)
else:
raise TranspilerError(f"BasisTranslator did not map {node.name}.")
dag_updated = True
return dag_updated
apply_translation(dag)
replace_end_time = time.time()
logger.info(
"Basis translation instructions replaced in %.3fs.",
replace_end_time - replace_start_time,
)
return dag
def _replace_node(self, dag, node, instr_map):
target_params, target_dag = instr_map[node.op.name, node.op.num_qubits]
if len(node.op.params) != len(target_params):
raise TranspilerError(
"Translation num_params not equal to op num_params."
"Op: {} {} Translation: {}\n{}".format(
node.op.params, node.op.name, target_params, target_dag
)
)
if node.op.params:
# Convert target to circ and back to assign_parameters, since
# DAGCircuits won't have a ParameterTable.
target_circuit = dag_to_circuit(target_dag)
target_circuit.assign_parameters(
dict(zip_longest(target_params, node.op.params)), inplace=True
)
bound_target_dag = circuit_to_dag(target_circuit)
else:
bound_target_dag = target_dag
if len(bound_target_dag.op_nodes()) == 1 and len(
bound_target_dag.op_nodes()[0].qargs
) == len(node.qargs):
dag_op = bound_target_dag.op_nodes()[0].op
# dag_op may be the same instance as other ops in the dag,
# so if there is a condition, need to copy
if getattr(node.op, "condition", None):
dag_op = dag_op.copy()
dag.substitute_node(node, dag_op, inplace=True)
if bound_target_dag.global_phase:
dag.global_phase += bound_target_dag.global_phase
else:
dag.substitute_node_with_dag(node, bound_target_dag)
def _extract_basis_target(
self, dag, qarg_indices, source_basis=None, qargs_local_source_basis=None
):
if source_basis is None:
source_basis = set()
if qargs_local_source_basis is None:
qargs_local_source_basis = defaultdict(set)
for node in dag.op_nodes():
qargs = tuple(qarg_indices[bit] for bit in node.qargs)
if dag.has_calibration_for(node):
continue
# Treat the instruction as on an incomplete basis if the qargs are in the
# qargs_with_non_global_operation dictionary or if any of the qubits in qargs
# are a superset for a non-local operation. For example, if the qargs
# are (0, 1) and that's a global (ie no non-local operations on (0, 1)
# operation but there is a non-local operation on (1,) we need to
# do an extra non-local search for this op to ensure we include any
# single qubit operation for (1,) as valid. This pattern also holds
# true for > 2q ops too (so for 4q operations we need to check for 3q, 2q,
# and 1q operations in the same manner)
if qargs in self._qargs_with_non_global_operation or any(
frozenset(qargs).issuperset(incomplete_qargs)
for incomplete_qargs in self._qargs_with_non_global_operation
):
qargs_local_source_basis[frozenset(qargs)].add((node.name, node.op.num_qubits))
else:
source_basis.add((node.name, node.op.num_qubits))
if isinstance(node.op, ControlFlowOp):
for block in node.op.blocks:
block_dag = circuit_to_dag(block)
source_basis, qargs_local_source_basis = self._extract_basis_target(
block_dag,
qarg_indices,
source_basis=source_basis,
qargs_local_source_basis=qargs_local_source_basis,
)
return source_basis, qargs_local_source_basis
# this could be singledispatchmethod and included in above class when minimum
# supported python version=3.8.
@singledispatch
def _extract_basis(circuit):
return circuit
@_extract_basis.register
def _(dag: DAGCircuit):
for node in dag.op_nodes():
if not dag.has_calibration_for(node):
yield (node.name, node.op.num_qubits)
if isinstance(node.op, ControlFlowOp):
for block in node.op.blocks:
yield from _extract_basis(block)
@_extract_basis.register
def _(circ: QuantumCircuit):
for instr_context in circ.data:
instr, _, _ = instr_context
if not circ.has_calibration_for(instr_context):
yield (instr.name, instr.num_qubits)
if isinstance(instr, ControlFlowOp):
for block in instr.blocks:
yield from _extract_basis(block)
class StopIfBasisRewritable(Exception):
"""Custom exception that signals `retworkx.dijkstra_search` to stop."""
class BasisSearchVisitor(retworkx.visit.DijkstraVisitor):
"""Handles events emitted during `retworkx.dijkstra_search`."""
def __init__(self, graph, source_basis, target_basis, num_gates_for_rule):
self.graph = graph
self.target_basis = set(target_basis)
self._source_gates_remain = set(source_basis)
self._num_gates_remain_for_rule = dict(num_gates_for_rule)
self._basis_transforms = []
self._predecessors = dict()
self._opt_cost_map = dict()
def discover_vertex(self, v, score):
gate = self.graph[v]
self._source_gates_remain.discard(gate)
self._opt_cost_map[gate] = score
rule = self._predecessors.get(gate, None)
if rule is not None:
logger.debug(
"Gate %s generated using rule \n%s\n with total cost of %s.",
gate.name,
rule.circuit,
score,
)
self._basis_transforms.append((gate.name, gate.num_qubits, rule.params, rule.circuit))
# we can stop the search if we have found all gates in the original ciruit.
if not self._source_gates_remain:
# if we start from source gates and apply `basis_transforms` in reverse order, we'll end
# up with gates in the target basis. Note though that `basis_transforms` may include
# additional transformations that are not required to map our source gates to the given
# target basis.
self._basis_transforms.reverse()
raise StopIfBasisRewritable
def examine_edge(self, edge):
_, target, edata = edge
if edata is None:
return
index = edata["index"]
self._num_gates_remain_for_rule[index] -= 1
target = self.graph[target]
# if there are gates in this `rule` that we have not yet generated, we can't apply
# this `rule`. if `target` is already in basis, it's not beneficial to use this rule.
if self._num_gates_remain_for_rule[index] > 0 or target in self.target_basis:
raise retworkx.visit.PruneSearch
def edge_relaxed(self, edge):
_, target, edata = edge
if edata is not None:
gate = self.graph[target]
self._predecessors[gate] = edata["rule"]
def edge_cost(self, edge):
"""Returns the cost of an edge.
This function computes the cost of this edge rule by summing
the costs of all gates in the rule equivalence circuit. In the
end, we need to subtract the cost of the source since `dijkstra`
will later add it.
"""
if edge is None:
# the target of the edge is a gate in the target basis,
# so we return a default value of 1.
return 1
cost_tot = 0
rule = edge["rule"]
for instruction in rule.circuit:
key = Key(name=instruction.operation.name, num_qubits=len(instruction.qubits))
cost_tot += self._opt_cost_map[key]
source = edge["source"]
return cost_tot - self._opt_cost_map[source]
@property
def basis_transforms(self):
"""Returns the gate basis transforms."""
return self._basis_transforms
def _basis_search(equiv_lib, source_basis, target_basis):
"""Search for a set of transformations from source_basis to target_basis.
Args:
equiv_lib (EquivalenceLibrary): Source of valid translations
source_basis (Set[Tuple[gate_name: str, gate_num_qubits: int]]): Starting basis.
target_basis (Set[gate_name: str]): Target basis.
Returns:
Optional[List[Tuple[gate, equiv_params, equiv_circuit]]]: List of (gate,
equiv_params, equiv_circuit) tuples tuples which, if applied in order
will map from source_basis to target_basis. Returns None if no path
was found.
"""
logger.debug("Begining basis search from %s to %s.", source_basis, target_basis)
source_basis = {
(gate_name, gate_num_qubits)
for gate_name, gate_num_qubits in source_basis
if gate_name not in target_basis
}
# if source basis is empty, no work to be done.
if not source_basis:
return []
all_gates_in_lib = set()
graph = retworkx.PyDiGraph()
nodes_to_indices = dict()
num_gates_for_rule = dict()
def lazy_setdefault(key):
if key not in nodes_to_indices:
nodes_to_indices[key] = graph.add_node(key)
return nodes_to_indices[key]
rcounter = 0 # running sum of the number of equivalence rules in the library.
for key in equiv_lib._get_all_keys():
target = lazy_setdefault(key)
all_gates_in_lib.add(key)
for equiv in equiv_lib._get_equivalences(key):
sources = {
Key(name=instruction.operation.name, num_qubits=len(instruction.qubits))
for instruction in equiv.circuit
}
all_gates_in_lib |= sources
edges = [
(
lazy_setdefault(source),
target,
{"index": rcounter, "rule": equiv, "source": source},
)
for source in sources
]
num_gates_for_rule[rcounter] = len(sources)
graph.add_edges_from(edges)
rcounter += 1
# This is only neccessary since gates in target basis are currently reported by
# their names and we need to have in addition the number of qubits they act on.
target_basis_keys = [
key
for gate in target_basis
for key in filter(lambda key, name=gate: key.name == name, all_gates_in_lib)
]
vis = BasisSearchVisitor(graph, source_basis, target_basis_keys, num_gates_for_rule)
# we add a dummy node and connect it with gates in the target basis.
# we'll start the search from this dummy node.
dummy = graph.add_node("dummy starting node")
graph.add_edges_from_no_data([(dummy, nodes_to_indices[key]) for key in target_basis_keys])
rtn = None
try:
retworkx.digraph_dijkstra_search(graph, [dummy], vis.edge_cost, vis)
except StopIfBasisRewritable:
rtn = vis.basis_transforms
logger.debug("Transformation path:")
for gate_name, gate_num_qubits, params, equiv in rtn:
logger.debug("%s/%s => %s\n%s", gate_name, gate_num_qubits, params, equiv)
return rtn
def _compose_transforms(basis_transforms, source_basis, source_dag):
"""Compose a set of basis transforms into a set of replacements.
Args:
basis_transforms (List[Tuple[gate_name, params, equiv]]): List of
transforms to compose.
source_basis (Set[Tuple[gate_name: str, gate_num_qubits: int]]): Names
of gates which need to be translated.
source_dag (DAGCircuit): DAG with example gates from source_basis.
(Used to determine num_params for gate in source_basis.)
Returns:
Dict[gate_name, Tuple(params, dag)]: Dictionary mapping between each gate
in source_basis and a DAGCircuit instance to replace it. Gates in
source_basis but not affected by basis_transforms will be included
as a key mapping to itself.
"""
example_gates = _get_example_gates(source_dag)
mapped_instrs = {}
for gate_name, gate_num_qubits in source_basis:
# Need to grab a gate instance to find num_qubits and num_params.
# Can be removed following https://github.com/Qiskit/qiskit-terra/pull/3947 .
example_gate = example_gates[gate_name, gate_num_qubits]
num_params = len(example_gate.params)
placeholder_params = ParameterVector(gate_name, num_params)
placeholder_gate = Gate(gate_name, gate_num_qubits, list(placeholder_params))
placeholder_gate.params = list(placeholder_params)
dag = DAGCircuit()
qr = QuantumRegister(gate_num_qubits)
dag.add_qreg(qr)
dag.apply_operation_back(placeholder_gate, qr[:], [])
mapped_instrs[gate_name, gate_num_qubits] = placeholder_params, dag
for gate_name, gate_num_qubits, equiv_params, equiv in basis_transforms:
logger.debug(
"Composing transform step: %s/%s %s =>\n%s",
gate_name,
gate_num_qubits,
equiv_params,
equiv,
)
for mapped_instr_name, (dag_params, dag) in mapped_instrs.items():
doomed_nodes = [
node
for node in dag.op_nodes()
if (node.op.name, node.op.num_qubits) == (gate_name, gate_num_qubits)
]
if doomed_nodes and logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Updating transform for mapped instr %s %s from \n%s",
mapped_instr_name,
dag_params,
dag_to_circuit(dag),
)
for node in doomed_nodes:
replacement = equiv.assign_parameters(
dict(zip_longest(equiv_params, node.op.params))
)
replacement_dag = circuit_to_dag(replacement)
dag.substitute_node_with_dag(node, replacement_dag)
if doomed_nodes and logger.isEnabledFor(logging.DEBUG):
logger.debug(
"Updated transform for mapped instr %s %s to\n%s",
mapped_instr_name,
dag_params,
dag_to_circuit(dag),
)
return mapped_instrs
def _get_example_gates(source_dag):
def recurse(dag, example_gates=None):
example_gates = example_gates or {}
for node in dag.op_nodes():
example_gates[(node.op.name, node.op.num_qubits)] = node.op
if isinstance(node.op, ControlFlowOp):
for block in node.op.blocks:
example_gates = recurse(circuit_to_dag(block), example_gates)
return example_gates
return recurse(source_dag)
| {
"content_hash": "b3c230ee188008873b9290f66e77bd62",
"timestamp": "",
"source": "github",
"line_count": 611,
"max_line_length": 100,
"avg_line_length": 41.18166939443535,
"alnum_prop": 0.6022573722279628,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "67131298f1b72eed960afba2bf90c0b4f3c5130d",
"size": "25646",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "qiskit/transpiler/passes/basis/basis_translator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
} |
import os
import base64
import json
import copy
from unittest import mock
from Cryptodome.PublicKey import RSA
from Cryptodome.Cipher import AES
from keepercommander import rest_api, api, params, record, shared_folder, team, crypto, utils
_USER_NAME = 'unit.test@company.com'
_USER_PASSWORD = base64.b64encode(os.urandom(8)).decode('utf-8').strip('=')
_USER_ITERATIONS = 1000
_USER_SALT = os.urandom(16)
_USER_DATA_KEY = os.urandom(32)
_SESSION_TOKEN = base64.urlsafe_b64encode(os.urandom(64)).decode('utf-8').strip('=')
_DEVICE_ID = os.urandom(64)
_2FA_ONE_TIME_TOKEN = '123456'
_2FA_DEVICE_TOKEN = base64.urlsafe_b64encode(os.urandom(32)).decode('utf-8').strip('=')
_private_key, _public_key = crypto.generate_rsa_key()
_DER_PRIVATE_KEY = crypto.unload_rsa_private_key(_private_key)
_ENCRYPTED_PRIVATE_KEY = api.encrypt_aes(_DER_PRIVATE_KEY, _USER_DATA_KEY)
_IMPORTED_PUBLIC_KEY = crypto.unload_rsa_public_key(_public_key)
_V2_DERIVED_KEY = crypto.derive_keyhash_v2('data_key', _USER_PASSWORD, _USER_SALT, _USER_ITERATIONS)
_dk = rest_api.encrypt_aes(_USER_DATA_KEY, _V2_DERIVED_KEY)
_ENCRYPTED_DATA_KEY = base64.urlsafe_b64encode(_dk).decode('utf-8').strip()
_V1_DERIVED_KEY = api.derive_key(_USER_PASSWORD, _USER_SALT, _USER_ITERATIONS)
_enc_iter = int.to_bytes(_USER_ITERATIONS, length=3, byteorder='big', signed=False)
_enc_iv = os.urandom(16)
_cipher = AES.new(_V1_DERIVED_KEY, AES.MODE_CBC, _enc_iv)
_enc_dk = b'\x01' + _enc_iter + _USER_SALT + _enc_iv + _cipher.encrypt(_USER_DATA_KEY + _USER_DATA_KEY)
_ENCRYPTION_PARAMS = base64.urlsafe_b64encode(_enc_dk).decode('utf-8').strip('=')
class VaultEnvironment:
def __init__(self):
self.user = _USER_NAME
self.password = _USER_PASSWORD
self.iterations = _USER_ITERATIONS
self.salt = _USER_SALT
self.data_key = _USER_DATA_KEY
self.public_key = _public_key
self.encoded_public_key = utils.base64_url_encode(_IMPORTED_PUBLIC_KEY)
self.session_token = _SESSION_TOKEN
self.device_id = _DEVICE_ID
self.one_time_token = _2FA_ONE_TIME_TOKEN
self.device_token = _2FA_DEVICE_TOKEN
self.encrypted_private_key = _ENCRYPTED_PRIVATE_KEY
self.encrypted_data_key = _ENCRYPTED_DATA_KEY
self.encryption_params = _ENCRYPTION_PARAMS
self.revision = _REVISION
def get_user_params():
p = params.KeeperParams(server='https://test.keepersecurity.com/', device_id=_DEVICE_ID)
p.config['device_id'] = base64.urlsafe_b64encode(_DEVICE_ID).decode('utf-8').rstrip('=')
p.user = _USER_NAME
p.password = _USER_PASSWORD
return p
def get_connected_params():
p = get_user_params()
p.iterations = _USER_ITERATIONS
p.salt = _USER_SALT
p.data_key = _USER_DATA_KEY
p.auth_verifier = api.auth_verifier(_USER_PASSWORD, _USER_SALT, _USER_ITERATIONS)
p.rsa_key = RSA.importKey(_DER_PRIVATE_KEY)
p.session_token = _SESSION_TOKEN
return p
def get_synced_params():
p = get_connected_params()
with mock.patch('keepercommander.api.communicate') as mock_comm:
mock_comm.return_value = get_sync_down_response()
api.sync_down(p)
p.record_type_cache[1] = {
"$id": "login",
"categories": ["login"],
"description": "Login template",
"fields": [
{"$ref": "login"},
{"$ref": "password"},
{"$ref": "url"},
{"$ref": "fileRef"},
{"$ref": "oneTimeCode"}
]
}
return p
_REVISION = 100
_RECORDS = []
_RECORD_METADATA = []
_SHARED_FOLDERS = []
_USER_FOLDERS = []
_USER_FOLDER_RECORDS = []
_USER_FOLDER_SHARED_FOLDER = []
_TEAMS = []
def get_sync_down_response():
return {
'result': 'success',
'result_code': '',
'message': '',
'full_sync': True,
'revision': _REVISION,
'records': copy.deepcopy(_RECORDS),
'record_meta_data': copy.deepcopy(_RECORD_METADATA),
'shared_folders': copy.deepcopy(_SHARED_FOLDERS),
'teams': copy.deepcopy(_TEAMS),
'user_folders': copy.deepcopy(_USER_FOLDERS),
'user_folder_records': copy.deepcopy(_USER_FOLDER_RECORDS),
'user_folder_shared_folders': copy.deepcopy(_USER_FOLDER_SHARED_FOLDER),
}
def register_record(record, key_type=None):
# type: (record.Record, int or None) -> bytes
data = {
'title': record.title or '',
'secret1': record.login or '',
'secret2': record.password or '',
'link': record.login_url or '',
'notes': record.notes or '',
'custom': record.custom_fields or '',
'folder': record.folder or ''
}
extra = None
udata = None
if record.attachments:
extra = {
'files': record.attachments
}
udata = {
'file_id': [x['id'] for x in record.attachments]
}
record_key = api.generate_aes_key() if key_type != 0 else _USER_DATA_KEY
rec_object = {
'record_uid': record.record_uid,
'revision': record.revision if (0 < record.revision <= _REVISION) else _REVISION,
'version': 2 if key_type != 0 else 1,
'shared': key_type not in [0, 1],
'data': api.encrypt_aes(json.dumps(data).encode('utf-8'), record_key),
}
if extra:
rec_object['extra'] = api.encrypt_aes(json.dumps(extra).encode('utf-8'), record_key)
if udata:
rec_object['udata'] = udata
_RECORDS.append(rec_object)
meta_data = {
'record_uid': record.record_uid,
'owner': key_type in [0, 1],
'can_share': key_type == 1,
'can_edit': key_type == 1,
'record_key_type': key_type
}
if key_type == 0:
_RECORD_METADATA.append(meta_data)
if key_type == 1:
meta_data['record_key'] = utils.base64_url_encode(crypto.encrypt_aes_v1(record_key, _USER_DATA_KEY))
_RECORD_METADATA.append(meta_data)
elif key_type == 2:
meta_data['record_key'] = utils.base64_url_encode(crypto.encrypt_rsa(record_key, _public_key))
_RECORD_METADATA.append(meta_data)
return record_key
def register_records_to_folder(folder_uid, record_uids):
# type: (str or None, list) -> None
for record_uid in record_uids:
ufr = {
'record_uid': record_uid
}
if folder_uid:
ufr['folder_uid'] = folder_uid
_USER_FOLDER_RECORDS.append(ufr)
def register_shared_folder(shared_folder, records):
# type: (shared_folder.SharedFolder, dict) -> bytes
shared_folder_key = api.generate_aes_key()
sf = {
'shared_folder_uid': shared_folder.shared_folder_uid,
'key_type': 1,
'shared_folder_key': api.encrypt_aes(shared_folder_key, _USER_DATA_KEY),
'name': api.encrypt_aes(shared_folder.name.encode('utf-8'), shared_folder_key),
'is_account_folder': False,
'manage_records': False,
'manage_users': False,
'default_manage_records': True,
'default_manage_users': True,
'default_can_edit': True,
'default_can_share': True,
'full_sync': True,
'records': [{
'record_uid': x[0],
'record_key': api.encrypt_aes(x[1], shared_folder_key),
'can_share': False,
'can_edit': False
} for x in records.items()],
'users': [{
'username': _USER_NAME,
'manage_records': True,
'manage_users': True
}],
'revision': 5
}
_SHARED_FOLDERS.append(sf)
return shared_folder_key
def register_team(team, key_type, sfs=None):
# type: (team.Team, int, dict) -> bytes
team_key = api.generate_aes_key()
t = {
'team_uid': team.team_uid,
'name': team.name,
'team_key_type': key_type,
'team_key': api.encrypt_aes(team_key, _USER_DATA_KEY) if key_type == 1 else api.encrypt_rsa(team_key, _IMPORTED_PUBLIC_KEY),
'team_private_key': api.encrypt_aes(_DER_PRIVATE_KEY, team_key),
'restrict_edit': team.restrict_edit,
'restrict_share': team.restrict_share,
'restrict_view': team.restrict_view,
}
_TEAMS.append(t)
if sfs:
t['shared_folder_keys'] = [{
'shared_folder_uid': x[0],
'key_type': 1,
'shared_folder_key': api.encrypt_aes(x[1], team_key)
} for x in sfs.items()]
sf_uids = set()
for uid in sfs:
sf_uids.add(uid)
for sf in _SHARED_FOLDERS:
if sf['shared_folder_uid'] in sf_uids:
if 'teams' not in sf:
sf['teams'] = []
sf['teams'].append({
'team_uid': team.team_uid,
'name': team.name,
'manage_records': key_type == 1,
'manage_users': key_type == 1
})
return team_key
def generate_data():
r1 = record.Record()
r1.record_uid = api.generate_record_uid()
r1.folder = 'Old Folder'
r1.title = 'Record 1'
r1.login = 'user1@keepersecurity.com'
r1.password = 'password1'
r1.login_url = 'https://keepersecurity.com/1'
r1.set_field('field1', 'value1')
r1.notes = 'note1'
r1.attachments = [{
'name': 'Attachment 1',
'key': base64.urlsafe_b64encode(api.generate_aes_key()).decode('utf-8').rstrip('='),
'id': 'ABCDEFGH',
'size': 1000
}]
r1.revision = 1
r1_key = register_record(r1, 1)
r2 = record.Record()
r2.record_uid = api.generate_record_uid()
r2.title = 'Record 2'
r2.login = 'user2@keepersecurity.com'
r2.password = 'password2'
r2.login_url = 'https://keepersecurity.com/2'
r2.set_field('field2', 'value2')
r2.notes = 'note2'
r2.revision = 2
r2_key = register_record(r2, 2)
register_records_to_folder(None, [r1.record_uid, r2.record_uid])
r3 = record.Record()
r3.record_uid = api.generate_record_uid()
r3.title = 'Record 3'
r3.login = 'user3@keepersecurity.com'
r3.password = 'password3'
r3.login_url = 'https://keepersecurity.com/3'
r3.revision = 3
r3_key = register_record(r3)
sf1 = shared_folder.SharedFolder()
sf1.shared_folder_uid = api.generate_record_uid()
sf1.default_manage_records = False
sf1.default_manage_users = False
sf1.default_can_edit = False
sf1.default_can_share = False
sf1.name = 'Shared Folder 1'
sf1_key = register_shared_folder(sf1, {
r3.record_uid: r3_key
})
register_records_to_folder(sf1.shared_folder_uid, [r3.record_uid])
_USER_FOLDER_SHARED_FOLDER.append({'shared_folder_uid': sf1.shared_folder_uid})
t1 = team.Team()
t1.team_uid = api.generate_record_uid()
t1.name = 'Team 1'
t1.restrict_edit = True
t1.restrict_share = True
t1.restrict_view = False
register_team(t1, 1, {
sf1.shared_folder_uid: sf1_key
})
folder_key = api.generate_aes_key()
_USER_FOLDERS.append({
'folder_uid': api.generate_record_uid(),
'key_type': 1,
'user_folder_key': api.encrypt_aes(folder_key, _USER_DATA_KEY),
'revision': 200,
'type': 'user_folder',
'data': api.encrypt_aes(json.dumps({'name': 'User Folder 1'}).encode('utf-8'), folder_key)
})
generate_data()
| {
"content_hash": "c55f5ad0a70414398c7c00a503799c98",
"timestamp": "",
"source": "github",
"line_count": 348,
"max_line_length": 132,
"avg_line_length": 32.439655172413794,
"alnum_prop": 0.5941181681282665,
"repo_name": "Keeper-Security/Commander",
"id": "938eba44f7a41b72ea122686fbd4d4e42064ae58",
"size": "11289",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit-tests/data_vault.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2274231"
},
{
"name": "Shell",
"bytes": "3388"
}
],
"symlink_target": ""
} |
import ursgal
import importlib
import os
import sys
import csv
import os.path
# csv.field_size_limit(sys.maxsize)
class venndiagram_1_0_0(ursgal.UNode):
"""Venn Diagram uNode"""
META_INFO = {
"edit_version": 1.00,
"name": "Venndiagram",
"version": "1.0.0",
"release_date": None,
"engine_type": {
"visualizer": True,
},
"input_extensions": [".csv"],
"output_extensions": [".svg"],
"output_suffix": "venndiagram",
"include_in_git": True,
"in_development": False,
"distributable": True,
"utranslation_style": "venndiagram_style_1",
"engine": {
"platform_independent": {
"arc_independent": {
"exe": "venndiagram_1_0_0.py",
},
},
},
"citation": "Kremer, L. P. M., Leufken, J., Oyunchimeg, P., Schulze, S. & "
"Fufezan, C. (2016) Ursgal, Universal Python Module Combining "
"Common Bottom-Up Proteomics Tools for Large-Scale Analysis. J. "
"Proteome res. 15, 788-794.",
}
def __init__(self, *args, **kwargs):
super(venndiagram_1_0_0, self).__init__(*args, **kwargs)
pass
def _execute(self):
"""
Plot Venn Diagramm for a list of .csv result files (2-5)
Arguments are set in uparams.py but passed to the engine by self.params attribute
Returns:
dict: results for the different areas e.g. dict['C-(A|B|D)']['results']
Output file is written to the common_top_level_dir
"""
# Keyword Arguments:
# Input files (self.params['_input_file_dicts']): list of dictionaries created by multi_run
# column_names (list) : list of column names (str) used for the comparison
# columns are merged if more than one column name given
# header (str) : header of the produced Venn diagram
# label_list (list) : list of labels in the same order as the input files list
# names of last_search_engine are used if no label_list given
# output_file_name (str) : created by self.magic_name_generator if None
# opacity (float)
print("[ -ENGINE- ] Plotting Venn diagram ..")
venndiagram_main = self.import_engine_as_python_function()
venn_params = {}
translations = self.params["translations"]["_grouped_by_translated_key"]
output_file_name = os.path.join(
self.params["output_dir_path"], self.params["output_file"]
)
if output_file_name == None:
head, tail = self.determine_common_common_head_tail_part_of_names(
input_file_dicts=input_file_dicts
)
output_file_name = head + tail
translations["output_file"]["output_file_incl_path"] = output_file_name
for translated_key, translation_dict in translations.items():
if translated_key in [
"visualization_column_names",
# 'visualization_label_list',
]:
continue
elif translated_key == "visualization_font":
venn_params["font"] = translation_dict["visualization_font"][
"font_type"
]
venn_params["label font-size header"] = translation_dict[
"visualization_font"
]["font_size_header"]
venn_params["label font-size major"] = translation_dict[
"visualization_font"
]["font_size_major"]
venn_params["label font-size minor"] = translation_dict[
"visualization_font"
]["font_size_minor"]
venn_params["label font-size venn"] = translation_dict[
"visualization_font"
]["font_size_venn"]
elif translated_key == "visualization_scaling_factors":
venn_params["cx"] = translation_dict["visualization_scaling_factors"][
"x_axis"
]
venn_params["cy"] = translation_dict["visualization_scaling_factors"][
"y_axis"
]
elif translated_key == "visualization_size":
venn_params["width"] = translation_dict["visualization_size"]["width"]
venn_params["height"] = translation_dict["visualization_size"]["height"]
elif len(translation_dict) == 1:
venn_params[translated_key] = list(translation_dict.values())[0]
else:
print(
"The translatd key ",
translated_key,
" maps on more than one ukey, but no special rules have been defined",
)
print(translation_dict)
sys.exit(1)
column_sets = {}
default_label = [
"label_A",
"label_B",
"label_C",
"label_D",
"label_E",
"label_F",
]
input_file_dicts = self.params["input_file_dicts"]
data = []
for result_pos, result_file in enumerate(input_file_dicts):
has_last_engine = result_file.get("last_engine", False)
if has_last_engine is False:
label_for_venn = "{0}".format(result_pos)
else:
label_for_venn = "{0} ({1})".format(has_last_engine, result_pos)
data_field = (
label_for_venn,
os.path.join(result_file["dir"], result_file["file"]),
)
data.append(data_field)
all_are_csv = all([f[1].upper().endswith(".CSV") for f in data])
assert all_are_csv == True, "VennDiagram input files all have to be .csv"
assert (
len(data) <= 5
), """
ERROR: input_file_list can only contain two to five result files,
you can merge files before, if you need.
Current number of files: {0}""".format(
len(data)
)
used_labels = []
lookup_dict = {}
fieldnames_list = []
for n, (engine, file_path) in enumerate(data):
if self.params["translations"]["visualization_label_positions"] == {}:
label = engine
else:
label = self.params["translations"]["visualization_label_positions"][
str(n)
]
venn_params[default_label[n]] = label
column_sets[label] = set()
used_labels.append(label)
print("[ Reading ] Venn set {0} / file #{1} : {0}".format(n, file_path))
file_object = open(file_path, "r")
csv_input = csv.DictReader(filter(lambda row: row[0] != "#", file_object))
fieldnames = csv_input.fieldnames
# collect fieldnames
for f_name in fieldnames:
if f_name not in fieldnames_list:
fieldnames_list.append(f_name)
for line_dict in csv_input:
unique_identifier = ""
for column_name in self.params["translations"][
"visualization_column_names"
]:
unique_identifier += "||{0}".format(line_dict[column_name])
column_sets[label].add(unique_identifier)
if unique_identifier not in lookup_dict.keys():
lookup_dict[unique_identifier] = []
line_dict["original_input_label"] = default_label[n].split("_")[1]
line_dict["original_input_actual_name"] = label
lookup_dict[unique_identifier].append(line_dict)
in_sets = []
for label in used_labels:
in_sets.append(column_sets[label])
return_dict = venndiagram_main(
*in_sets,
**venn_params,
)
# retrieve files corresponding to each set, only if the user wants them
if self.params["translations"]["extract_venndiagram_file"] == True:
translation_dict_label = {
"A": "0",
"B": "1",
"C": "2",
"D": "3",
"E": "4",
}
# adding new columns for venn diagram output
fieldnames_list.extend(
[
"return_dict_nomenclature",
"actual_name",
"original_input_label",
"original_input_actual_name",
]
)
print("CREATING CSV FILE FROM VENN DIAGRAM ...")
with open(
output_file_name.replace(".svg", ".csv"), "w", newline=""
) as new_csvfile:
writer = csv.DictWriter(new_csvfile, fieldnames=fieldnames_list)
writer.writeheader()
for key in return_dict.keys():
# ceate output file
output_name = ""
for character in key:
if character in translation_dict_label.keys():
name_by_user = self.params["translations"][
"visualization_label_positions"
][translation_dict_label[character]]
assert "_[" not in name_by_user, print(
'ERROR MESSAGE: your label should not contain "_["'
)
assert "]_" not in name_by_user, print(
'ERROR MESSAGE: your label should not contain "]_"'
)
assert "(" not in name_by_user, print(
'ERROR MESSAGE: your label should not contain "("'
)
assert ")" not in name_by_user, print(
'ERROR MESSAGE: your label should not contain ")"'
)
output_name = output_name + name_by_user
else:
if character != "(" and character != ")":
output_name = output_name + "_[" + character + "]_"
else:
output_name = output_name + character
results = return_dict[key]["results"]
for unique_id in results:
line_dict_list = lookup_dict[unique_id]
for line_dict in line_dict_list:
line_dict["return_dict_nomenclature"] = key
line_dict["actual_name"] = output_name
writer.writerow(line_dict)
return return_dict
| {
"content_hash": "9615efe0343ce8d7148816077a8b930a",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 103,
"avg_line_length": 43.09803921568628,
"alnum_prop": 0.4873521383075523,
"repo_name": "ursgal/ursgal",
"id": "d83097acc67a0b7d2bd3e5467f8519bbe131fcd4",
"size": "11012",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "ursgal/wrappers/venndiagram_1_0_0.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2330138"
},
{
"name": "Shell",
"bytes": "780"
}
],
"symlink_target": ""
} |
import cbor
import hashlib
import json
import time
import logging
import collections
import collections.abc
import traceback
import random
import sys
# JadeError
from .jade_error import JadeError
# Low-level comms backends
from .jade_serial import JadeSerialImpl
from .jade_tcp import JadeTCPImpl
# 'jade' logger
logger = logging.getLogger('jade')
device_logger = logging.getLogger('jade-device')
# BLE comms backend is optional
# It relies on the BLE dependencies being available
try:
from .jade_ble import JadeBleImpl
except ImportError as e:
logger.warn(e)
logger.warn('BLE scanning/connectivity will not be available')
# Default serial connection
DEFAULT_SERIAL_DEVICE = '/dev/ttyUSB0'
DEFAULT_BAUD_RATE = 115200
DEFAULT_SERIAL_TIMEOUT = 120
# Default BLE connection
DEFAULT_BLE_DEVICE_NAME = 'Jade'
DEFAULT_BLE_SERIAL_NUMBER = None
DEFAULT_BLE_SCAN_TIMEOUT = 60
def _hexlify(data):
"""
Helper to map bytes-like types into hex-strings
to make for prettier message-logging.
Parameters
----------
data : any
The object to hexlify.
- bytes or bytearrays have 'hex()' method invoked
- list and dicts (values) have this function mapped over them
- Otherwise the input is returned unchanged
"""
if data is None:
return None
elif isinstance(data, bytes) or isinstance(data, bytearray):
return data.hex()
elif isinstance(data, list):
return [_hexlify(item) for item in data]
elif isinstance(data, dict):
return {k: _hexlify(v) for k, v in data.items()}
else:
return data
# NOTE: Removed entirely for electrum - so it is not used silently as a fallback.
# (hard error preferred in that case)
# Jade repo api will be improved to make enabling this function more explicit
# try:
# import requests
#
# def _http_request(params):
# """
# Simple http request function which can be used when a Jade response
# requires an external http call.
# The default implementation used in JadeAPI._jadeRpc() below.
# NOTE: Only available if the 'requests' dependency is available.
#
# Callers can supply their own implmentation of this call where it is required.
#
# Parameters
# ----------
# data : dict
# A dictionary structure describing the http call to make
#
# Returns
# -------
# dict
# with single key 'body', whose value is the json returned from the call
#
# """
# logger.debug('_http_request: {}'.format(params))
#
# # Use the first non-onion url
# url = [url for url in params['urls'] if not url.endswith('.onion')][0]
# if params['method'] == 'GET':
# assert 'data' not in params, 'Cannot pass body to requests.get'
# f = requests.get(url)
# elif params['method'] == 'POST':
# data = json.dumps(params['data'])
# f = requests.post(url, data)
#
# logger.debug("http_request received reply: {}".format(f.text))
#
# if f.status_code != 200:
# logger.error("http error {} : {}".format(f.status_code, f.text))
# raise ValueError(f.status_code)
#
# assert params['accept'] == 'json'
# f = f.json()
#
# return {'body': f}
#
# except ImportError as e:
# logger.warn(e)
# logger.warn('Default _http_requests() function will not be available')
#
class JadeAPI:
"""
High-Level Jade Client API
Builds on a JadeInterface to provide a meaningful API
Either:
a) use with JadeAPI.create_[serial|ble]() as jade:
(recommended)
or:
b) use JadeAPI.create_[serial|ble], then call connect() before
using, and disconnect() when finished
(caveat cranium)
or:
c) use ctor to wrap existing JadeInterface instance
(caveat cranium)
"""
def __init__(self, jade):
assert jade is not None
self.jade = jade
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc, tb):
if (exc_type):
logger.error("Exception causing JadeAPI context exit.")
logger.error(exc_type)
logger.error(exc)
traceback.print_tb(tb)
self.disconnect(exc_type is not None)
@staticmethod
def create_serial(device=None, baud=None, timeout=None):
"""
Create a JadeAPI object using the serial interface described.
Parameters
----------
device : str, optional
The device identifier for the serial device.
Underlying implementation will default (to /dev/ttyUSB0)
baud : int, optional
The communication baud rate.
Underlying implementation will default (to 115200)
timeout : int, optional
The serial read timeout when awaiting messages.
Underlying implementation will default (to 120s)
Returns
-------
JadeAPI
API object configured to use given serial parameters.
NOTE: the api instance has not yet tried to contact the hw
- caller must call 'connect()' before trying to use the Jade.
"""
impl = JadeInterface.create_serial(device, baud, timeout)
return JadeAPI(impl)
@staticmethod
def create_ble(device_name=None, serial_number=None,
scan_timeout=None, loop=None):
"""
Create a JadeAPI object using the BLE interface described.
NOTE: raises JadeError if BLE dependencies not installed.
Parameters
----------
device_name : str, optional
The device name of the desired BLE device.
Underlying implementation will default (to 'Jade')
serial_number : int, optional
The serial number of the desired BLE device
- used to disambiguate multiple beacons with the same 'device name'
Underlying implementation will connect to the first beacon it scans
with the matching 'device name'.
scan_timeout : int, optional
The timeout when scanning for devices which match the device name/serial number.
Underlying implementation will default (to 60s)
loop : optional
The asynchio event loop to use, if required.
Underlying implementation will default (to asyncio.get_event_loop())
Returns
-------
JadeAPI
API object configured to use given BLE parameters.
NOTE: the api instance has not yet tried to contact the hw
- caller must call 'connect()' before trying to use the Jade.
Raises
------
JadeError if BLE backend not available (ie. BLE dependencies not installed)
"""
impl = JadeInterface.create_ble(device_name, serial_number,
scan_timeout, loop)
return JadeAPI(impl)
def connect(self):
"""
Try to connect the underlying transport interface (eg. serial, ble, etc.)
Raises an exception on failure.
"""
self.jade.connect()
def disconnect(self, drain=False):
"""
Disconnect the underlying transport (eg. serial, ble, etc.)
Parameters
----------
drain : bool, optional
When true log any/all remaining messages/data, otherwise silently discard.
NOTE: can prevent disconnection if data is arriving constantly.
Defaults to False.
"""
self.jade.disconnect(drain)
def drain(self):
"""
Log any/all outstanding messages/data.
NOTE: can run indefinitely if data is arriving constantly.
"""
self.jade.drain()
@staticmethod
def _get_result_or_raise_error(reply):
"""
Raise any error message returned from a Jade rpc call as an exception.
Parameters
----------
reply : dict
Dictionary representing a reply from a Jade rpc call.
Returns
-------
dict
Any nested 'result' structure, if the reply is not an error.
Raises
------
JadeError
If the reply represented an error, including all details received.
"""
if 'error' in reply:
e = reply['error']
raise JadeError(e.get('code'), e.get('message'), e.get('data'))
return reply['result']
def _jadeRpc(self, method, params=None, inputid=None, http_request_fn=None, long_timeout=False):
"""
Helper to make a request/reply rpc call over the underlying transport interface.
NOTE: interface must be 'connected'.
If the call returns an 'http_request' structure, this is handled here and the http
call is made, and the result is passed into the rpc method given in 'on reply', by
calling this function recursively.
Parameters
----------
method : str
rpc method to invoke
params : dict, optional
any parameters to pass to the rpc method
Defaults to None.
inputid : str, optional
Any specific 'id' to use in the rpc message.
Defaults to a using a pseudo-random id generated in-situ.
http_request_fn : function, optional
A function which accepts a dict (containing a description of the http request), makes
the described http call, and returns the body data in an element called 'body'.
Defaults to _http_request() above.
long_timeout : bool, optional
Whether the rpc call should use an indefinitely long timeout, rather than that set on
construction.
(Useful if the call involves a non-trivial user interaction with the device.)
Defaults to False.
Returns
-------
dict
The reply from the rpc call.
NOTE: will return the last/final reply after a sequence of calls, where 'http_request'
was returned and remote data was fetched and passed into s subsequent call.
"""
newid = inputid if inputid else str(random.randint(100000, 999999))
request = self.jade.build_request(newid, method, params)
reply = self.jade.make_rpc_call(request, long_timeout)
result = self._get_result_or_raise_error(reply)
# The Jade can respond with a request for interaction with a remote
# http server. This is used for interaction with the pinserver but the
# code below acts as a dumb proxy and simply makes the http request and
# forwards the response back to the Jade.
# Note: the function called to make the http-request can be passed in,
# or it can default to the simple _http_request() function above, if available.
if isinstance(result, collections.abc.Mapping) and 'http_request' in result:
this_module = sys.modules[__name__]
make_http_request = http_request_fn or getattr(this_module, '_http_request', None)
assert make_http_request, 'Default _http_request() function not available'
http_request = result['http_request']
http_response = make_http_request(http_request['params'])
return self._jadeRpc(
http_request['on-reply'],
http_response['body'],
http_request_fn=make_http_request,
long_timeout=long_timeout)
return result
def get_version_info(self):
"""
RPC call to fetch summary details pertaining to the hardware unit and running firmware.
Returns
-------
dict
Contains keys for various info describing the hw and running fw
"""
return self._jadeRpc('get_version_info')
def add_entropy(self, entropy):
"""
RPC call to add client entropy into the unit RNG entropy pool.
Parameters
----------
entropy : bytes
Bytes to fold into the hw entropy pool.
Returns
-------
bool
True on success
"""
params = {'entropy': entropy}
return self._jadeRpc('add_entropy', params)
def set_epoch(self, epoch=None):
"""
RPC call to set the current time epoch value, required for TOTP use.
NOTE: The time is lost on each power-down and must be reset on restart/reconnect before
TOTP can be used.
Parameters
----------
epoch : int, optional
Current epoch value, in seconds. Defaults to int(time.time()) value.
Returns
-------
bool
True on success
"""
params = {'epoch': epoch if epoch is not None else int(time.time())}
return self._jadeRpc('set_epoch', params)
def ota_update(self, fwcmp, fwlen, chunksize, patchlen=None, cb=None):
"""
RPC call to attempt to update the unit's firmware.
Parameters
----------
fwcmp : bytes
The compressed firmware image to upload to the Jade unit. Can be a full firmware or
and incremental diff to be applied to the currently running firmware image.
fwlen : int
The size of the new complete (uncompressed) firmware image (after any delta is applied).
chunksize : int
The size of the chunks used to upload the compressed firmware. Each chunk is uploaded
and ack'd by the hw unit.
The maximum supported chunk size is given in the version info data, under the key
'JADE_OTA_MAX_CHUNK'.
patchlen: int, optional
If the compressed firmware bytes are an incremental diff to be applied to the running
firmware image, this is the size of that patch when uncompressed.
Defaults to None, implying the compressed data is a full firmware image upload.
(Compare with fwlen - the size of the final fw image.)
cb : function, optional
Callback function accepting two integers - the amount of compressed firmware sent thus
far, and the total length of the compressed firmware to send.
If passed, this function is invoked each time a fw chunk is successfully uploaded and
ack'd by the hw, to notify of upload progress.
Defaults to None, and nothing is called to report upload progress.
Returns
-------
bool
True if no errors were reported - on next restart the hw unit will attempt to boot the
new firmware.
"""
# Compute the sha256 hash of the compressed file being uploaded
cmphasher = hashlib.sha256()
cmphasher.update(fwcmp)
cmphash = cmphasher.digest()
cmplen = len(fwcmp)
# Initiate OTA
ota_method = 'ota'
params = {'fwsize': fwlen,
'cmpsize': cmplen,
'cmphash': cmphash}
if patchlen is not None:
ota_method = 'ota_delta'
params['patchsize'] = patchlen
result = self._jadeRpc(ota_method, params)
assert result is True
# Write binary chunks
written = 0
while written < cmplen:
remaining = cmplen - written
length = min(remaining, chunksize)
chunk = bytes(fwcmp[written:written + length])
result = self._jadeRpc('ota_data', chunk)
assert result is True
written += length
if (cb):
cb(written, cmplen)
# All binary data uploaded
return self._jadeRpc('ota_complete')
def run_remote_selfcheck(self):
"""
RPC call to run in-built tests.
NOTE: Only available in a DEBUG build of the firmware.
Returns
-------
bool
True on success.
"""
return self._jadeRpc('debug_selfcheck', long_timeout=True)
def clean_reset(self):
"""
RPC call to clean/reset memory and storage, as much as is practical.
NOTE: Only available in a DEBUG build of the firmware.
Returns
-------
bool
True on success.
"""
return self._jadeRpc('debug_clean_reset')
def set_mnemonic(self, mnemonic, passphrase=None, temporary_wallet=False):
"""
RPC call to set the wallet mnemonic (in RAM only - flash storage is untouched).
NOTE: Only available in a DEBUG build of the firmware.
Parameters
----------
mnemonic : str
The wallet mnemonic to set.
passphrase : str, optional
Any bip39 passphrase to apply.
Defaults to None.
temporary_wallet : bool, optional
Whether to treat this wallet/mnemonic as an 'Emergency Restore' temporary wallet, as
opposed to one successfully loaded from the flash storage.
NOTE: in either case the wallet is only set in RAM, and flash storage is not affected.
Defaults to False.
Returns
-------
bool
True on success.
"""
params = {'mnemonic': mnemonic, 'passphrase': passphrase,
'temporary_wallet': temporary_wallet}
return self._jadeRpc('debug_set_mnemonic', params)
def set_seed(self, seed):
"""
RPC call to set the wallet seed.
NOTE: Only available in a DEBUG build of the firmware.
NOTE: Setting a seed always sets a 'temporary' wallet.
Parameters
----------
seed : bytes
The wallet seed to set as a temporary wallet (cannot be persisted in flash).
Returns
-------
bool
True on success.
"""
params = {'seed': seed}
return self._jadeRpc('debug_set_mnemonic', params)
def set_pinserver(self, urlA=None, urlB=None, pubkey=None, cert=None):
"""
RPC call to explicitly set (override) the details of the blind pinserver used to
authenticate the PIN entered on the Jade unit.
This data is recorded in the hw flash, and returned to the caller when authenticating
(in auth_user(), below).
Parameters
----------
urlA : str, optional
The primary url of the pinserver to use.
urlB : str, optional
Any secondary url of the pinserver to use.
pubkey : bytes, optional
The public key used to verify pinserver signed payloads.
cert : bytes, optional
Any additional certificate required to verify the pinserver identity.
Returns
-------
bool
True on success.
"""
params = {}
if urlA is not None or urlB is not None:
params['urlA'] = urlA
params['urlB'] = urlB
if pubkey is not None:
params['pubkey'] = pubkey
if cert is not None:
params['certificate'] = cert
return self._jadeRpc('update_pinserver', params)
def reset_pinserver(self, reset_details, reset_certificate):
"""
RPC call to reset any formerly overidden pinserver details to their defauts.
Parameters
----------
reset_details : bool, optional
If set, any overidden urls and pubkey are reset to their defaults.
reset_certificate : bool, optional
If set, any additional certificate is reset (to None).
Returns
-------
bool
True on success.
"""
params = {'reset_details': reset_details,
'reset_certificate': reset_certificate}
return self._jadeRpc('update_pinserver', params)
def auth_user(self, network, http_request_fn=None, epoch=None):
"""
RPC call to authenticate the user on the hw device, for using with the network provided.
Parameters
----------
network : str
The name of the network intended for use - eg. 'mainnet', 'liquid', 'testnet' etc.
This is verified against the networks allowed on the hardware.
http_request_fn : function, optional
Optional http-request function to pass http requests to the Jade pinserver.
Default behaviour is to use the '_http_request()' function which defers to the
'requests' module.
If the 'reqests' module is not available, no default http-request function is created,
and one must be supplied here.
epoch : int, optional
Current epoch value, in seconds. Defaults to int(time.time()) value.
Returns
-------
bool
True is returned immediately if the hw is already unlocked for use on the given network.
True if the PIN is entered and verified with the remote blind pinserver.
False if the PIN entered was incorrect.
"""
params = {'network': network, 'epoch': epoch if epoch is not None else int(time.time())}
return self._jadeRpc('auth_user', params,
http_request_fn=http_request_fn,
long_timeout=True)
def register_otp(self, otp_name, otp_uri):
"""
RPC call to register a new OTP record on the hw device.
Parameters
----------
otp_name : str
An identifying name for this OTP record
otp_uri : str
The uri of this OTP record - must begin 'otpauth://'
Returns
-------
bool
True if the OTP uri was validated and persisted on the hw
"""
params = {'name': otp_name, 'uri': otp_uri}
return self._jadeRpc('register_otp', params)
def get_otp_code(self, otp_name, value_override=None):
"""
RPC call to fetch a new OTP code from the hw device.
Parameters
----------
otp_name : str
An identifying name for the OTP record to use
value_override : int
An overriding HOTP counter or TOTP timestamp to use.
NOTE: Only available in a DEBUG build of the firmware.
Returns
-------
bool
True if the OTP uri was validated and persisted on the hw
"""
params = {'name': otp_name}
if value_override is not None:
params['override'] = value_override
return self._jadeRpc('get_otp_code', params)
def get_xpub(self, network, path):
"""
RPC call to fetch an xpub for the given bip32 path for the given network.
Parameters
----------
network : str
Network to which the xpub applies - eg. 'mainnet', 'liquid', 'testnet', etc.
path : [int]
bip32 path for which the xpub should be generated.
Returns
-------
str
base58 encoded xpub
"""
params = {'network': network, 'path': path}
return self._jadeRpc('get_xpub', params)
def get_registered_multisigs(self):
"""
RPC call to fetch brief summaries of any multisig wallets registered to this signer.
Returns
-------
dict
Brief description of registered multisigs, keyed by registration name.
Each entry contains keys:
variant - str, script type, eg. 'sh(wsh(multi(k)))'
sorted - boolean, whether bip67 key sorting is applied
threshold - int, number of signers required,N
num_signers - total number of signatories, M
master_blinding_key - 32-bytes, any liquid master blinding key for this wallet
"""
return self._jadeRpc('get_registered_multisigs')
def register_multisig(self, network, multisig_name, variant, sorted_keys, threshold, signers,
master_blinding_key=None):
"""
RPC call to register a new multisig wallet, which must contain the hw signer.
A registration name is provided - if it already exists that record is overwritten.
Parameters
----------
network : string
Network to which the multisig should apply - eg. 'mainnet', 'liquid', 'testnet', etc.
multisig_name : string
Name to use to identify this multisig wallet registration record.
If a registration record exists with the name given, that record is overwritten.
variant : str
The script type - one of 'sh(multi(k))', 'wsh(multi(k))', 'sh(wsh(multi(k)))'
sorted_keys : bool
Whether this is a 'sortedmulti()' wallet - ie. whether to apply bip67 sorting to the
pubkeys when generating redeem scripts.
threshold : int
Number of signers required.
signers : [dict]
Description of signers - should include keys:
- 'fingerprint' - 4 bytes, origin fingerprint
- 'derivation' - [int], bip32 path from origin to signer xpub provided
- 'xpub' - str, base58 xpub of signer - will be verified for hw unit signer
- 'path' - [int], any fixed path to always apply after the xpub - usually empty.
master_blinding_key : 32-bytes, optional
The master blinding key to use for this multisig wallet on liquid.
Optional, defaults to None.
Logically mandatory when 'network' indicates a liquid network and the Jade is to be
used to generate confidential addresses, blinding keys, blinding nonces, asset blinding
factors or output commitments.
Returns
-------
bool
True on success, implying the mutisig wallet can now be used.
"""
params = {'network': network, 'multisig_name': multisig_name,
'descriptor': {'variant': variant, 'sorted': sorted_keys,
'threshold': threshold, 'signers': signers,
'master_blinding_key': master_blinding_key}}
return self._jadeRpc('register_multisig', params)
def get_receive_address(self, *args, recovery_xpub=None, csv_blocks=0,
variant=None, multisig_name=None, confidential=None):
"""
RPC call to generate, show, and return an address for the given path.
The call has three forms.
Parameters
----------
network: str
Network to which the address should apply - eg. 'mainnet', 'liquid', 'testnet', etc.
Then either:
1. Blockstream Green (multisig shield) addresses
subaccount : int
Blockstream Green subaccount
branch : int
Blockstream Green derivation branch
pointer : int
Blockstream Green address pointer
recovery_xpub : str, optional
xpub of recovery key for 2of3 subaccounts. Otherwise should be omitted.
Defaults to None (ie. not a 2of3 subaccount).
csv_blocks : int, optional
Number of blocks to include in csv redeem script, if this is a csv-enabled account.
Otherwise should be omitted.
Defaults to 0 (ie. does not apply/not a csv-enabled account.)
2. Generic single-sig addresses
path: [int]
bip32 path for which the xpub should be generated.
variant: str
The script type - one of 'pkh(k)', 'wpkh(k)', 'sh(wpkh(k))'
3. Generic multisig addresses
paths: [[int]]
bip32 path suffixes, one for each signer, applied as a suffix to the registered
signer path. Usually these path suffixes will all be identical.
multisig_name : str
The name of the registered multisig wallet record used to generate the address.
Returns
-------
str
The address generated for the given parameters.
"""
if multisig_name is not None:
assert len(args) == 2
keys = ['network', 'paths', 'multisig_name']
args += (multisig_name,)
elif variant is not None:
assert len(args) == 2
keys = ['network', 'path', 'variant']
args += (variant,)
else:
assert len(args) == 4
keys = ['network', 'subaccount', 'branch', 'pointer', 'recovery_xpub', 'csv_blocks']
args += (recovery_xpub, csv_blocks)
params = dict(zip(keys, args))
if confidential is not None:
params['confidential'] = confidential
return self._jadeRpc('get_receive_address', params)
def sign_message(self, path, message, use_ae_signatures=False,
ae_host_commitment=None, ae_host_entropy=None):
"""
RPC call to format and sign the given message, using the given bip32 path.
Supports RFC6979 and anti-exfil signatures.
Parameters
----------
path : [int]
bip32 path for which the signature should be generated.
message : str
Message string to format and sign.
ae_host_commitment : 32-bytes, optional
The host-commitment to use for Antil-Exfil signatures
ae_host_entropy : 32-bytes, optional
The host-entropy to use for Antil-Exfil signatures
Returns
-------
1. Legacy/RFC6979 signatures
str
base64-encoded signature
2. Anti-exfil signatures
(bytes, str)
signer-commitment, base64-encoded signature
"""
if use_ae_signatures:
# Anti-exfil protocol:
# We send the signing request and receive the signer-commitment in
# reply once the user confirms.
# We can then request the actual signature passing the ae-entropy.
params = {'path': path, 'message': message, 'ae_host_commitment': ae_host_commitment}
signer_commitment = self._jadeRpc('sign_message', params)
params = {'ae_host_entropy': ae_host_entropy}
signature = self._jadeRpc('get_signature', params)
return signer_commitment, signature
else:
# Standard EC signature, simple case
params = {'path': path, 'message': message}
return self._jadeRpc('sign_message', params)
def get_identity_pubkey(self, identity, curve, key_type, index=0):
"""
RPC call to fetch a pubkey for the given identity (slip13/slip17).
NOTE: this api returns an uncompressed public key
Parameters
----------
identity : str
Identity string to format and sign. For example ssh://satoshi@bitcoin.org
curve : str
Name of curve to use - currently only 'nist256p1' is supported
key_type : str
Key derivation type - must be either 'slip-0013' for an identity pubkey, or 'slip-0017'
for an ecdh pubkey.
index : int, optional
Index number (if require multiple keys/sigs per identity)
Defaults to 0
Returns
-------
65-bytes
Uncompressed public key for the given identity and index.
Consistent with 'sign_identity' or 'get_identity_shared_key', depending on the
'key_type'.
"""
params = {'identity': identity, 'curve': curve, 'type': key_type, 'index': index}
return self._jadeRpc('get_identity_pubkey', params)
def get_identity_shared_key(self, identity, curve, their_pubkey, index=0):
"""
RPC call to fetch a SLIP-0017 shared ecdh key for the identity and counterparty public key.
NOTE: this api takes an uncompressed public key
Parameters
----------
identity : str
Identity string to format and sign. For example ssh://satoshi@bitcoin.org
curve : str
Name of curve to use - currently only 'nist256p1' is supported
their_pubkey : 65-bytes
The counterparty's uncompressed public key
index : int, optional
Index number (if require multiple keys/sigs per identity)
Defaults to 0
Returns
-------
32-bytes
The shared ecdh key for the given identity and cpty public key
Consistent with 'get_identity_pubkey' with 'key_type=slip-0017'
"""
params = {'identity': identity, 'curve': curve, 'index': index,
'their_pubkey': their_pubkey}
return self._jadeRpc('get_identity_shared_key', params)
def sign_identity(self, identity, curve, challenge, index=0):
"""
RPC call to authenticate the given identity through a challenge.
Supports RFC6979.
Returns the signature and the associated SLIP-0013 pubkey
NOTE: this api returns an uncompressed public key
Parameters
----------
identity : str
Identity string to format and sign. For example ssh://satoshi@bitcoin.org
curve : str
Name of curve to use - currently only 'nist256p1' is supported
challenge : bytes
Challenge bytes to sign
index : int, optional
Index number (if require multiple keys/sigs per identity)
Defaults to 0
Returns
-------
dict
Contains keys:
pubkey - 65-bytes, the uncompressed SLIP-0013 public key, consistent with
'get_identity_pubkey' with 'key_type=slip-0013'
signature - 65-bytes, RFC6979 deterministic signature, prefixed with 0x00
"""
params = {'identity': identity, 'curve': curve, 'index': index, 'challenge': challenge}
return self._jadeRpc('sign_identity', params)
def get_master_blinding_key(self):
"""
RPC call to fetch the master (SLIP-077) blinding key for the hw signer.
NOTE: the master blinding key of any registered multisig wallets can be obtained from
the result of `get_registered_multisigs()`.
Returns
-------
32-bytes
SLIP-077 master blinding key
"""
return self._jadeRpc('get_master_blinding_key')
def get_blinding_key(self, script, multisig_name=None):
"""
RPC call to fetch the public blinding key for the hw signer.
Parameters
----------
script : bytes
The script for which the public blinding key is required.
multisig_name : str, optional
The name of any registered multisig wallet for which to fetch the blinding key.
Defaults to None
Returns
-------
33-bytes
Public blinding key for the passed script.
"""
params = {'script': script, 'multisig_name': multisig_name}
return self._jadeRpc('get_blinding_key', params)
def get_shared_nonce(self, script, their_pubkey, include_pubkey=False, multisig_name=None):
"""
RPC call to get the shared secret to unblind a tx, given the receiving script and
the pubkey of the sender (sometimes called "blinding nonce" in Liquid).
Optionally fetch the hw signer's public blinding key also.
Parameters
----------
script : bytes
The script for which the blinding nonce is required.
their_pubkey : 33-bytes
The counterparty public key.
include_pubkey : bool, optional
Whether to also return the wallet's public blinding key.
Defaults to False.
multisig_name : str, optional
The name of any registered multisig wallet for which to fetch the blinding nonce.
Defaults to None
Returns
-------
1. include_pubkey is False
33-bytes
Public blinding nonce for the passed script and counterparty public key.
2. include_pubkey is True
dict
Contains keys:
shared_nonce - 32-bytes, public blinding nonce for the passed script as above.
blinding_key - 33-bytes, public blinding key for the passed script.
"""
params = {'script': script, 'their_pubkey': their_pubkey,
'include_pubkey': include_pubkey, 'multisig_name': multisig_name}
return self._jadeRpc('get_shared_nonce', params)
def get_blinding_factor(self, hash_prevouts, output_index, bftype, multisig_name=None):
"""
RPC call to get a deterministic "trusted" blinding factor to blind an output.
Normally the blinding factors are generated and returned in the `get_commitments` call,
but for the last output the vbf must be generated on the host, so this call allows the
host to get a valid abf to compute the generator and then the "final" vbf.
Nonetheless, this call is kept generic, and can also generate vbfs, hence the "bftype"
parameter.
Parameters
----------
hash_prevouts : 32-bytes
This value is computed as specified in bip143.
It is verified immediately since at this point Jade doesn't have the tx in question.
It will be checked later during `sign_liquid_tx()`.
output_index : int
The index of the output we are trying to blind
bftype : str
Can be eitehr "ASSET" or "VALUE", to generate abfs or vbfs.
multisig_name : str, optional
The name of any registered multisig wallet for which to fetch the blinding factor.
Defaults to None
Returns
-------
32-bytes
The requested blinding factor
"""
params = {'hash_prevouts': hash_prevouts,
'output_index': output_index,
'type': bftype,
'multisig_name': multisig_name}
return self._jadeRpc('get_blinding_factor', params)
def get_commitments(self,
asset_id,
value,
hash_prevouts,
output_index,
vbf=None,
multisig_name=None):
"""
RPC call to generate deterministic blinding factors and commitments for a given output.
Can optionally get a "custom" VBF, normally used for the last input where the vbf is not
computed here, but generated on the host according to all the other values.
The commitments generated here should be passed back into `sign_liquid_tx()`.
Parameters
----------
asset_id : 32-bytes
asset_id as usually displayed - ie. reversed compared to network/consensus order
value : int
value in 'satoshi' or equivalent atomic integral unit
hash_prevouts : 32-bytes
This value is computed as specified in bip143.
It is verified immediately since at this point Jade doesn't have the tx in question.
It will be checked later during `sign_liquid_tx()`.
output_index : int
The index of the output we are trying to blind
vbf : 32-bytes, optional
The vbf to use, in preference to deterministically generating one in this call.
multisig_name : str, optional
The name of any registered multisig wallet for which to fetch the blinding factor.
Defaults to None
Returns
-------
dict
Containing the following the blinding factors and output commitments.
"""
params = {'asset_id': asset_id,
'value': value,
'hash_prevouts': hash_prevouts,
'output_index': output_index,
'vbf': vbf,
'multisig_name': multisig_name}
return self._jadeRpc('get_commitments', params)
def _send_tx_inputs(self, base_id, inputs, use_ae_signatures):
"""
Helper call to send the tx inputs to Jade for signing.
Handles legacy RFC6979 signatures, as well as the Anti-Exfil protocol.
Parameters
----------
base_id : int
The ids of the messages sent will be increments from this base id.
inputs : [dict]
The tx inputs - see `sign_tx()` / `sign_liquid_tx()` for details.
use_ae_signatures : bool
Whether to use the anti-exfil protocol to generate the signatures
Returns
-------
1. if use_ae_signatures is False
[bytes]
An array of signatures corresponding to the array of inputs passed.
The signatures are in DER format with the sighash appended.
'None' placeholder elements are used for inputs not requiring a signature.
2. if use_ae_signatures is True
[(32-bytes, bytes)]
An array of pairs of signer-commitments and signatures corresponding to the inputs.
The signatures are in DER format with the sighash appended.
(None, None) placeholder elements are used for inputs not requiring a signature.
"""
if use_ae_signatures:
# Anti-exfil protocol:
# We send one message per input (which includes host-commitment *but
# not* the host entropy) and receive the signer-commitment in reply.
# Once all n input messages are sent, we can request the actual signatures
# (as the user has a chance to confirm/cancel at this point).
# We request the signatures passing the ae-entropy for each one.
# Send inputs one at a time, receiving 'signer-commitment' in reply
signer_commitments = []
host_ae_entropy_values = []
for txinput in inputs:
# ae-protocol - do not send the host entropy immediately
txinput = txinput.copy() # shallow copy
host_ae_entropy_values.append(txinput.pop('ae_host_entropy', None))
base_id += 1
input_id = str(base_id)
reply = self._jadeRpc('tx_input', txinput, input_id)
signer_commitments.append(reply)
# Request the signatures one at a time, sending the entropy
signatures = []
for (i, host_ae_entropy) in enumerate(host_ae_entropy_values, 1):
base_id += 1
sig_id = str(base_id)
params = {'ae_host_entropy': host_ae_entropy}
reply = self._jadeRpc('get_signature', params, sig_id)
signatures.append(reply)
assert len(signatures) == len(inputs)
return list(zip(signer_commitments, signatures))
else:
# Legacy protocol:
# We send one message per input - without expecting replies.
# Once all n input messages are sent, the hw then sends all n replies
# (as the user has a chance to confirm/cancel at this point).
# Then receive all n replies for the n signatures.
# NOTE: *NOT* a sequence of n blocking rpc calls.
# NOTE: at some point this flow should be removed in favour of the one
# above, albeit without passing anti-exfil entropy or commitment data.
# Send all n inputs
requests = []
for txinput in inputs:
base_id += 1
msg_id = str(base_id)
request = self.jade.build_request(msg_id, 'tx_input', txinput)
self.jade.write_request(request)
requests.append(request)
time.sleep(0.1)
# Receive all n signatures
signatures = []
for request in requests:
reply = self.jade.read_response()
self.jade.validate_reply(request, reply)
signature = self._get_result_or_raise_error(reply)
signatures.append(signature)
assert len(signatures) == len(inputs)
return signatures
def sign_liquid_tx(self, network, txn, inputs, commitments, change, use_ae_signatures=False,
asset_info=None):
"""
RPC call to sign a liquid transaction.
Parameters
----------
network : str
Network to which the address should apply - eg. 'liquid', 'liquid-testnet', etc.
txn : bytes
The transaction to sign
inputs : [dict]
The tx inputs. Should contain keys:
is_witness, bool - whether this is a segwit input
value_commitment, 33-bytes - The value commitment of ths input
These are only required if signing this input:
script, bytes- the redeem script
path, [int] - the bip32 path to sign with
These are only required for Anti-Exfil signatures:
ae_host_commitment, 32-bytes - The host-commitment for Anti-Exfil signatures
ae_host_entropy, 32-bytes - The host-entropy for Anti-Exfil signatures
commitments : [dict]
An array sized for the number of outputs.
Unblinded outputs should have a 'null' placeholder element.
The commitments as retrieved from `get_commitments()`, with the addition of:
'blinding_key', <bytes> - the output's public blinding key
(as retrieved from `get_blinding_key()`)
change : [dict]
An array sized for the number of outputs.
Outputs which are not change should have a 'null' placeholder element.
Change elements with data will be automatically verified by Jade, and not by the user.
Populated elements should contain sufficient data to generate the change address.
See `get_receive_address()`
use_ae_signatures : bool, optional
Whether to use the anti-exfil protocol to generate the signatures.
Defaults to False.
asset_info : [dict]
Any asset-registry data relevant to the assets being transacted, such that Jade can
display a meaningful name, issuer, ticker etc. rather than just asset-id.
At the very least must contain 'asset_id', 'contract' and 'issuance_prevout' items,
exactly as in the registry data. NOTE: asset_info for the network policy-asset is
not required.
Defaults to None.
Returns
-------
1. if use_ae_signatures is False
[bytes]
An array of signatures corresponding to the array of inputs passed.
The signatures are in DER format with the sighash appended.
'None' placeholder elements are used for inputs not requiring a signature.
2. if use_ae_signatures is True
[(32-bytes, bytes)]
An array of pairs of signer-commitments and signatures corresponding to the inputs.
The signatures are in DER format with the sighash appended.
(None, None) placeholder elements are used for inputs not requiring a signature.
"""
# 1st message contains txn and number of inputs we are going to send.
# Reply ok if that corresponds to the expected number of inputs (n).
base_id = 100 * random.randint(1000, 9999)
params = {'network': network,
'txn': txn,
'num_inputs': len(inputs),
'trusted_commitments': commitments,
'use_ae_signatures': use_ae_signatures,
'change': change,
'asset_info': asset_info}
reply = self._jadeRpc('sign_liquid_tx', params, str(base_id))
assert reply
# Send inputs and receive signatures
return self._send_tx_inputs(base_id, inputs, use_ae_signatures)
def sign_tx(self, network, txn, inputs, change, use_ae_signatures=False):
"""
RPC call to sign a btc transaction.
Parameters
----------
network : str
Network to which the address should apply - eg. 'mainnet', 'testnet', etc.
txn : bytes
The transaction to sign
inputs : [dict]
The tx inputs. Should contain keys:
is_witness, bool - whether this is a segwit input
These are only required if signing this input:
script, bytes- the redeem script
path, [int] - the bip32 path to sign with
One of these is required:
input_tx, bytes - The prior transaction which created the utxo of this input
satoshi, int - The satoshi amount of this input - can be used in place of
'input_tx' for a tx with a single segwit input
These are only required for Anti-Exfil signatures:
ae_host_commitment, 32-bytes - The host-commitment for Anti-Exfil signatures
ae_host_entropy, 32-bytes - The host-entropy for Anti-Exfil signatures
change : [dict]
An array sized for the number of outputs.
Outputs which are not change should have a 'null' placeholder element.
Change elements with data will be automatically verified by Jade, and not by the user.
Populated elements should contain sufficient data to generate the change address.
See `get_receive_address()`
use_ae_signatures : bool
Whether to use the anti-exfil protocol to generate the signatures
Returns
-------
1. if use_ae_signatures is False
[bytes]
An array of signatures corresponding to the array of inputs passed.
The signatures are in DER format with the sighash appended.
'None' placeholder elements are used for inputs not requiring a signature.
2. if use_ae_signatures is True
[(32-bytes, bytes)]
An array of pairs of signer-commitments and signatures corresponding to the inputs.
The signatures are in DER format with the sighash appended.
(None, None) placeholder elements are used for inputs not requiring a signature.
"""
# 1st message contains txn and number of inputs we are going to send.
# Reply ok if that corresponds to the expected number of inputs (n).
base_id = 100 * random.randint(1000, 9999)
params = {'network': network,
'txn': txn,
'num_inputs': len(inputs),
'use_ae_signatures': use_ae_signatures,
'change': change}
reply = self._jadeRpc('sign_tx', params, str(base_id))
assert reply
# Send inputs and receive signatures
return self._send_tx_inputs(base_id, inputs, use_ae_signatures)
class JadeInterface:
"""
Mid-level interface to Jade
Wraps either a serial or a ble connection
Calls to send and receive bytes and cbor messages over the interface.
Either:
a) use wrapped with JadeAPI
(recommended)
or:
b) use with JadeInterface.create_[serial|ble]() as jade:
...
or:
c) use JadeInterface.create_[serial|ble], then call connect() before
using, and disconnect() when finished
(caveat cranium)
or:
d) use ctor to wrap existing low-level implementation instance
(caveat cranium)
"""
def __init__(self, impl):
assert impl is not None
self.impl = impl
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc, tb):
if (exc_type):
logger.error("Exception causing JadeInterface context exit.")
logger.error(exc_type)
logger.error(exc)
traceback.print_tb(tb)
self.disconnect(exc_type is not None)
@staticmethod
def create_serial(device=None, baud=None, timeout=None):
"""
Create a JadeInterface object using the serial interface described.
Parameters
----------
device : str, optional
The device identifier for the serial device.
Underlying implementation will default (to /dev/ttyUSB0)
baud : int, optional
The communication baud rate.
Underlying implementation will default (to 115200)
timeout : int, optional
The serial read timeout when awaiting messages.
Underlying implementation will default (to 120s)
Returns
-------
JadeInterface
Inerface object configured to use given serial parameters.
NOTE: the instance has not yet tried to contact the hw
- caller must call 'connect()' before trying to use the Jade.
"""
if device and JadeTCPImpl.isSupportedDevice(device):
impl = JadeTCPImpl(device)
else:
impl = JadeSerialImpl(device or DEFAULT_SERIAL_DEVICE,
baud or DEFAULT_BAUD_RATE,
timeout or DEFAULT_SERIAL_TIMEOUT)
return JadeInterface(impl)
@staticmethod
def create_ble(device_name=None, serial_number=None,
scan_timeout=None, loop=None):
"""
Create a JadeInterface object using the BLE interface described.
NOTE: raises JadeError if BLE dependencies not installed.
Parameters
----------
device_name : str, optional
The device name of the desired BLE device.
Underlying implementation will default (to 'Jade')
serial_number : int, optional
The serial number of the desired BLE device
- used to disambiguate multiple beacons with the same 'device name'
Underlying implementation will connect to the first beacon it scans
with the matching 'device name'.
scan_timeout : int, optional
The timeout when scanning for devices which match the device name/serial number.
Underlying implementation will default (to 60s)
loop : optional
The asynchio event loop to use, if required.
Underlying implementation will default (to asyncio.get_event_loop())
Returns
-------
JadeInterface
Inerface object configured to use given BLE parameters.
NOTE: the instance has not yet tried to contact the hw
- caller must call 'connect()' before trying to use the Jade.
Raises
------
JadeError if BLE backend not available (ie. BLE dependencies not installed)
"""
this_module = sys.modules[__name__]
if not hasattr(this_module, "JadeBleImpl"):
raise JadeError(1, "BLE support not installed", None)
impl = JadeBleImpl(device_name or DEFAULT_BLE_DEVICE_NAME,
serial_number or DEFAULT_BLE_SERIAL_NUMBER,
scan_timeout or DEFAULT_BLE_SCAN_TIMEOUT,
loop=loop)
return JadeInterface(impl)
def connect(self):
"""
Try to connect the underlying transport interface (eg. serial, ble, etc.)
Raises an exception on failure.
"""
self.impl.connect()
def disconnect(self, drain=False):
"""
Disconnect the underlying transport (eg. serial, ble, etc.)
Parameters
----------
drain : bool, optional
When true log any/all remaining messages/data, otherwise silently discard.
NOTE: can prevent disconnection if data is arriving constantly.
Defaults to False.
"""
if drain:
self.drain()
self.impl.disconnect()
def drain(self):
"""
Log any/all outstanding messages/data.
NOTE: can run indefinitely if data is arriving constantly.
"""
logger.warn("Draining interface...")
drained = bytearray()
finished = False
while not finished:
byte_ = self.impl.read(1)
drained.extend(byte_)
finished = byte_ == b''
if finished or byte_ == b'\n' or len(drained) > 256:
try:
device_logger.warn(drained.decode('utf-8'))
except Exception as e:
# Dump the bytes raw and as hex if decoding as utf-8 failed
device_logger.warn("Raw:")
device_logger.warn(drained)
device_logger.warn("----")
device_logger.warn("Hex dump:")
device_logger.warn(drained.hex())
# Clear and loop to continue collecting
drained.clear()
@staticmethod
def build_request(input_id, method, params=None):
"""
Build a request dict from passed parameters
Parameters
----------
input_id : str
The id of the request message to construct
method : str
rpc method to invoke
params : dict, optional
any parameters to pass to the rpc method
Defaults to None.
Returns
-------
dict
The request object as a dict
"""
request = {"method": method, "id": input_id}
if params is not None:
request["params"] = params
return request
@staticmethod
def serialise_cbor_request(request):
"""
Method to format a request dict as a cbor message
Parameters
----------
request : dict
The request dict
Returns
-------
bytes
The request formatted as cbor message bytes
"""
dump = cbor.dumps(request)
len_dump = len(dump)
if 'method' in request and 'ota_data' in request['method']:
msg = 'Sending ota_data message {} as cbor of size {}'.format(request['id'], len_dump)
logger.info(msg)
else:
logger.info('Sending: {} as cbor of size {}'.format(_hexlify(request), len_dump))
return dump
def write(self, bytes_):
"""
Write bytes over the underlying interface
Parameters
----------
bytes_ : bytes
The bytes to write
Returns
-------
int
The number of bytes written
"""
logger.debug("Sending: {} bytes".format(len(bytes_)))
wrote = self.impl.write(bytes_)
logger.debug("Sent: {} bytes".format(len(bytes_)))
return wrote
def write_request(self, request):
"""
Write a request dict over the underlying interface, formatted as cbor.
Parameters
----------
request : dict
The request dict to write
"""
msg = self.serialise_cbor_request(request)
written = 0
while written < len(msg):
written += self.write(msg[written:])
def read(self, n):
"""
Try to read bytes from the underlying interface.
Returns
-------
bytes
The bytes received
"""
logger.debug("Reading {} bytes...".format(n))
bytes_ = self.impl.read(n)
logger.debug("Received: {} bytes".format(len(bytes_)))
return bytes_
def read_cbor_message(self):
"""
Try to read a single cbor (response) message from the underlying interface.
Respects the any read timeout.
If any 'log' messages are received, logs them locally at the nearest corresponding level
and awaits the next message. Returns when it receives what appears to be a reply message.
Returns
-------
dict
The message received, as a dict
"""
while True:
# 'self' is sufficiently 'file-like' to act as a load source.
# Throws EOFError on end of stream/timeout/lost-connection etc.
message = cbor.load(self)
if isinstance(message, collections.abc.Mapping):
# A message response (to a prior request)
if 'id' in message:
logger.info("Received msg: {}".format(_hexlify(message)))
return message
# A log message - handle as normal
if 'log' in message:
response = message['log']
log_method = device_logger.error
try:
response = message['log'].decode("utf-8")
log_methods = {
'E': device_logger.error,
'W': device_logger.warn,
'I': device_logger.info,
'D': device_logger.debug,
'V': device_logger.debug,
}
if len(response) > 1 and response[1] == ' ':
lvl = response[0]
log_method = log_methods.get(lvl, device_logger.error)
except Exception as e:
logger.error('Error processing log message: {}'.format(e))
log_method('>> {}'.format(response))
continue
# Unknown/unhandled/unexpected message
logger.error("Unhandled message received")
device_logger.error(message)
def read_response(self, long_timeout=False):
"""
Try to read a single cbor (response) message from the underlying interface.
If any 'log' messages are received, logs them locally at the nearest corresponding level
and awaits the next message. Returns when it receives what appears to be a reply message.
If `long_timeout` is false, any read-timeout is respected. If True, the call will block
indefinitely awaiting a response message.
Parameters
----------
long_timeout : bool
Whether to wait indefinitely for the next (response) message.
Returns
-------
dict
The message received, as a dict
"""
while True:
try:
return self.read_cbor_message()
except EOFError as e:
if not long_timeout:
raise
@staticmethod
def validate_reply(request, reply):
"""
Helper to minimally validate a reply message, in the context of a request.
Asserts if the reply does contain the expected minimal fields.
"""
assert isinstance(reply, dict) and 'id' in reply
assert ('result' in reply) != ('error' in reply)
assert reply['id'] == request['id'] or \
reply['id'] == '00' and 'error' in reply
def make_rpc_call(self, request, long_timeout=False):
"""
Method to send a request over the underlying interface, and await a response.
The request is minimally validated before it is sent, and the response is simialrly
validated before being returned.
Any read-timeout is respected unless 'long_timeout' is passed, in which case the call
blocks indefinitely awaiting a response.
Parameters
----------
long_timeout : bool
Whether to wait indefinitely for the response.
Returns
-------
dict
The (minimally validated) response message received, as a dict
"""
# Write outgoing request message
assert isinstance(request, dict)
assert 'id' in request and len(request['id']) > 0
assert 'method' in request and len(request['method']) > 0
assert len(request['id']) < 16 and len(request['method']) < 32
self.write_request(request)
# Read and validate incoming message
reply = self.read_response(long_timeout)
self.validate_reply(request, reply)
return reply
| {
"content_hash": "f58d2ac5106b248a7ddd05c7759073c0",
"timestamp": "",
"source": "github",
"line_count": 1747,
"max_line_length": 100,
"avg_line_length": 36.88151116199199,
"alnum_prop": 0.582583188477775,
"repo_name": "spesmilo/electrum",
"id": "4b31f06e3a031e731d81224658eaf21d8fb29c5d",
"size": "64432",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "electrum/plugins/jade/jadepy/jade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13136"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2185"
},
{
"name": "NSIS",
"bytes": "7681"
},
{
"name": "Python",
"bytes": "5400804"
},
{
"name": "QML",
"bytes": "355804"
},
{
"name": "Ruby",
"bytes": "16748"
},
{
"name": "Shell",
"bytes": "105118"
},
{
"name": "kvlang",
"bytes": "67438"
}
],
"symlink_target": ""
} |
import os
from django.conf import settings
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import Permission, User
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from django.test import TestCase, Client
from django.test import override_settings, modify_settings
from glitter.forms import MoveBlockForm
from glitter.blocks.html.models import HTML
from glitter.models import Version, ContentBlock
from glitter.pages.admin import PageAdmin
from glitter.pages.models import Page
from glitter.tests.sample.models import Book
SAMPLE_BLOCK_MISSING = 'glitter.tests.sampleblocks' not in settings.INSTALLED_APPS
@modify_settings(
INSTALLED_APPS={
'append': 'glitter.tests.sample',
},
)
@override_settings(
TEMPLATE_DIRS=(os.path.join(os.path.dirname(__file__), 'templates'),),
GLITTER_SHOW_LOGIN_REQUIRED=True,
)
class TestAdmin(TestCase):
def setUp(self):
# Permissions
self.edit_permissions = Permission.objects.get_by_natural_key(
'edit_page', 'glitter_pages', 'page'
)
# Page
self.page = Page.objects.create(url='/test/', title='Test page')
# Information about model
self.info = self.page._meta.app_label, self.page._meta.model_name
# Superuser
self.super_user = User.objects.create_superuser('test', 'test@test.com', 'test')
self.super_user_client = Client()
self.super_user_client.login(username='test', password='test')
# Editor with editing permissions
self.editor = User.objects.create_user('editor', 'editor@test.com', 'editor')
self.editor.is_staff = True
self.editor.user_permissions.add(self.edit_permissions)
self.editor.save()
self.editor_client = Client()
self.editor_client.login(username='editor', password='editor')
# Editor with not editing permissions
self.editor_no_permissions = User.objects.create_user(
'editor_no_perm', 'editor_no_perm@test.com', 'editor_no_perm'
)
self.editor_no_permissions.is_staff = True
self.editor_no_permissions.save()
self.editor_no_permissions_client = Client()
self.editor_no_permissions_client.login(
username='editor_no_perm', password='editor_no_perm'
)
# Page version.
self.page_version = Version.objects.create(
content_type=ContentType.objects.get_for_model(Page), object_id=self.page.id,
template_name='glitter/sample.html', owner=self.editor
)
self.page_admin = PageAdmin(Page, AdminSite())
self.change_obj_url = reverse('admin:glitter_pages_page_change', args=(self.page.id,))
self.add_obj_url = reverse('admin:glitter_pages_page_add')
self.page_redirect_url = reverse('admin:glitter_pages_page_redirect', args=(self.page.id,))
def test_data_for_change_and_add_response(self):
response_data = {'url': '/testing2/', 'title': 'Testing2'}
response_save_and_edit_data = {
'url': '/testing223/', 'title': 'Testing2', '_saveandedit': True
}
response_save_and_continue_edit = {
'url': '/testing223/', 'title': 'Testing2', '_continue': True
}
return response_data, response_save_and_edit_data, response_save_and_continue_edit
def test_admin_url(self):
self.page_admin.view_url(self.page)
def test_unpublish_count(self):
self.page_admin.admin_unpublished_count(self.page)
@modify_settings(
INSTALLED_APPS={
'append': 'glitter.tests.sample',
},
)
class TestPermissions(TestCase):
def setUp(self):
# Permissions for objects we're testing
self.edit_page = Permission.objects.get_by_natural_key(
'edit_page', 'glitter_pages', 'page'
)
self.publish_page = Permission.objects.get_by_natural_key(
'publish_page', 'glitter_pages', 'page'
)
self.edit_book = Permission.objects.get_by_natural_key(
'edit_book', 'sample', 'book'
)
self.publish_book = Permission.objects.get_by_natural_key(
'publish_book', 'sample', 'book'
)
# Superuser
self.superuser = User.objects.create_superuser(
username='superuser', email='', password=None
)
# Editor with editing permissions
self.editor = User.objects.create_user(username='editor', email='', password=None)
self.editor.is_staff = True
self.editor.save()
self.editor.user_permissions.add(self.edit_page, self.edit_book)
# Publisher with edit and publish permissions
self.publisher = User.objects.create_user(username='publisher', email='', password=None)
self.publisher.is_staff = True
self.publisher.save()
self.publisher.user_permissions.add(
self.edit_page, self.publish_page, self.edit_book, self.publish_book
)
# Staff with no editing permissions
self.staff = User.objects.create_user(username='staff', email='', password=None)
self.staff.is_staff = True
self.staff.save()
# Page with an unsaved page version
self.page = Page.objects.create(url='/test/', title='Test page')
self.page_version = Version.objects.create(
content_type=ContentType.objects.get_for_model(Page),
object_id=self.page.id,
template_name='glitter/sample.html',
owner=self.editor,
)
self.page_admin = PageAdmin(Page, AdminSite())
# Sample model
self.book = Book.objects.create(title='Hello')
self.book_admin = PageAdmin(Book, AdminSite())
def test_edit_permission(self):
# Only people with glitter_pages.edit_page have edit permission
request = HttpRequest()
request.user = self.editor
self.assertTrue(self.page_admin.has_edit_permission(request=request))
request.user = self.staff
self.assertFalse(self.page_admin.has_edit_permission(request=request))
def test_edit_version(self):
# Only the creator of an unsaved version can edit it
request = HttpRequest()
request.user = self.superuser
self.assertFalse(self.page_admin.has_edit_permission(
request=request, version=self.page_version
))
request.user = self.editor
self.assertTrue(self.page_admin.has_edit_permission(
request=request, version=self.page_version
))
def test_publish_permission(self):
# Only people with glitter_pages.publish_page have publish permission
request = HttpRequest()
request.user = self.publisher
self.assertTrue(self.page_admin.has_publish_permission(request=request))
request.user = self.staff
self.assertFalse(self.page_admin.has_publish_permission(request=request))
def test_book_model(self):
# Test that permissions work with different types of models
request = HttpRequest()
request.user = self.editor
self.assertTrue(self.book_admin.has_edit_permission(request=request))
request.user = self.publisher
self.assertTrue(self.book_admin.has_publish_permission(request=request))
request.user = self.staff
self.assertFalse(self.book_admin.has_edit_permission(request=request))
self.assertFalse(self.book_admin.has_publish_permission(request=request))
class BaseViewsCase(TestAdmin):
def setUp(self):
super().setUp()
self.html1_block = HTML.objects.create(content='<p>HTML Block</p>')
self.html1_content_block = ContentBlock.objects.create(
obj_version=self.page_version,
column='main_content',
position=1,
content_type=ContentType.objects.get_for_model(self.html1_block),
object_id=self.html1_block.id
)
self.html1_block.content_block = self.html1_content_block
self.html1_block.save()
self.html2_block = HTML.objects.create(content='<p>HTML Block</p>')
self.html2_content_block = ContentBlock.objects.create(
obj_version=self.page_version,
column='main_content',
position=3,
content_type=ContentType.objects.get_for_model(self.html2_block),
object_id=self.html2_block.id
)
self.html2_block.content_block = self.html2_content_block
self.html2_block.save()
self.html3_block = HTML.objects.create(content='<p>HTML Block</p>')
self.html3_content_block = ContentBlock.objects.create(
obj_version=self.page_version,
column='side',
position=4,
content_type=ContentType.objects.get_for_model(self.html3_block),
object_id=self.html3_block.id
)
self.html3_block.content_block = self.html3_content_block
self.html3_block.save()
def change_page_version(self):
self.page_version.version_number = 1
self.page_version.save()
class TestPageChangeTemplateView(BaseViewsCase):
def setUp(self):
super().setUp()
self.change_template_url = reverse(
'admin:%s_%s_changetemplate' % self.info, args=(self.page_version.id,)
)
def test_permissions(self):
# Permission denied as user doesn't have permissions
response = self.editor_no_permissions_client.get(self.change_template_url)
self.assertEqual(response.status_code, 403)
# Editor with permissions
response = self.editor_client.get(self.change_template_url)
self.assertEqual(response.status_code, 302)
def test_post(self):
# Check POST
response = self.editor_client.post(self.change_template_url, {
'template_name': 'glitter/sample2.html',
})
self.assertEqual(response.status_code, 302)
# Check template gets changed
page_version = Version.objects.get(id=self.page_version.id)
self.assertEqual(page_version.template_name, 'glitter/sample2.html')
# These blocks get moved to a new column
html1_content_block = ContentBlock.objects.get(id=self.html1_content_block.id)
html2_content_block = ContentBlock.objects.get(id=self.html2_content_block.id)
self.assertEqual(html1_content_block.column, 'content')
self.assertEqual(html2_content_block.column, 'content')
self.assertEqual(html1_content_block.position, 1)
self.assertEqual(html2_content_block.position, 2)
# Index error if ContentBlock doesn't exist
self.editor_client.post(self.change_template_url, {
'template_name': 'glitter/sample.html',
})
def test_page_owner(self):
""" Check page owner. """
self.page_version.owner = self.super_user
self.page_version.save()
response = self.editor_client.get(self.change_template_url)
self.assertEqual(response.status_code, 403)
def test_page_version(self):
""" Check page version. """
self.change_page_version()
self.editor.user_permissions.add(self.edit_permissions)
response = self.editor_client.get(self.change_template_url)
self.assertEqual(response.status_code, 403)
class TestPageBlockAddView(BaseViewsCase):
def setUp(self):
super().setUp()
self.page_block_add_view_url = '{}?column=main_content'.format(reverse(
'block_admin:%s_%s_add' % (HTML._meta.app_label, HTML._meta.model_name),
kwargs={
'version_id': self.page_version.id,
}
))
def test_permissions(self):
# Permission denied as user doesn't have permissions
response = self.editor_no_permissions_client.get(self.page_block_add_view_url)
self.assertEqual(response.status_code, 403)
# Editor with permissions
response = self.editor_client.get(self.page_block_add_view_url)
self.assertEqual(response.status_code, 200)
def test_add_block(self):
response = self.editor_client.post(self.page_block_add_view_url, {
'content': '<p>Test</p>',
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/glitter/update_column.html')
def test_add_block_top(self):
response = self.editor_client.post(self.page_block_add_view_url + '&top=true', {
'content': '<p>Test</p>',
})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/glitter/update_column.html')
def test_add_continue(self):
response = self.editor_client.post(self.page_block_add_view_url, {
'content': '<p>Test</p>',
'_continue': '',
}, follow=True)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'blockadmin/continue.html')
def test_page_version(self):
""" Check page version. """
self.change_page_version()
self.editor.user_permissions.add(self.edit_permissions)
response = self.editor_client.get(self.page_block_add_view_url)
self.assertEqual(response.status_code, 403)
class TestPageBlockDeleteView(BaseViewsCase):
def setUp(self):
super().setUp()
self.page_block_delete_view_url = reverse(
'admin:%s_%s_block_delete' % self.info, args=(self.html1_content_block.id,)
)
def test_permissions(self):
""" Test permissions. """
response = self.editor_no_permissions_client.get(self.page_block_delete_view_url)
self.assertEqual(response.status_code, 403)
# Editor with permissions
response = self.editor_client.get(self.page_block_delete_view_url)
self.assertEqual(response.status_code, 200)
def test_post(self):
response = self.editor_client.post(self.page_block_delete_view_url, {
'column': 'main_content',
'block_type': 'glitter_html_block.HTML',
})
self.assertEqual(response.status_code, 200)
def test_page_version(self):
""" Check page version. """
self.change_page_version()
self.editor.user_permissions.add(self.edit_permissions)
response = self.editor_client.get(self.page_block_delete_view_url)
self.assertEqual(response.status_code, 403)
class TestPageBlockMoveView(BaseViewsCase):
def setUp(self):
super().setUp()
self.html1_block_move_view_url = reverse(
'admin:%s_%s_block_move' % self.info, args=(self.html1_content_block.id,)
)
self.html2_block_move_view_url = reverse(
'admin:%s_%s_block_move' % self.info, args=(self.html2_content_block.id,)
)
self.html3_block_move_view_url = reverse(
'admin:%s_%s_block_move' % self.info, args=(self.html3_content_block.id,)
)
def test_permissions(self):
""" Test permissions. """
response = self.editor_no_permissions_client.get(self.html1_block_move_view_url)
self.assertEqual(response.status_code, 403)
# Editor with permissions
response = self.editor_client.get(self.html1_block_move_view_url)
self.assertEqual(response.status_code, 200)
def test_html1_move_block_bottom(self):
""" Move block top. """
response = self.editor_client.post(self.html1_block_move_view_url, {
'move': MoveBlockForm.MOVE_BOTTOM,
})
self.assertEqual(response.status_code, 200)
def test_html2_move_block_bottom(self):
""" Move block up. """
response = self.editor_client.post(self.html2_block_move_view_url, {
'move': MoveBlockForm.MOVE_BOTTOM,
})
self.assertEqual(response.status_code, 200)
def test_html3_move_block_bottom(self):
""" Move block down. """
response = self.editor_client.post(self.html3_block_move_view_url, {
'move': MoveBlockForm.MOVE_BOTTOM
})
self.assertEqual(response.status_code, 200)
def test_move_html1_block_top(self):
# Move block bottom
response = self.editor_client.post(self.html1_block_move_view_url, {
'move': MoveBlockForm.MOVE_TOP
})
self.assertEqual(response.status_code, 200)
def test_move_html2_block_top(self):
# Move block bottom
response = self.editor_client.post(self.html2_block_move_view_url, {
'move': MoveBlockForm.MOVE_TOP
})
self.assertEqual(response.status_code, 200)
def test_move_html1_block_up(self):
# Move block bottom
response = self.editor_client.post(self.html1_block_move_view_url, {
'move': MoveBlockForm.MOVE_UP
})
self.assertEqual(response.status_code, 200)
def test_move_html1_block_down(self):
# Move block bottom
response = self.editor_client.post(self.html1_block_move_view_url, {
'move': MoveBlockForm.MOVE_DOWN
})
self.assertEqual(response.status_code, 200)
def test_page_version(self):
""" Check page version. """
self.change_page_version()
self.editor.user_permissions.add(self.edit_permissions)
response = self.editor_client.get(self.html1_block_move_view_url)
self.assertEqual(response.status_code, 403)
class TestPageBlockColumnView(BaseViewsCase):
def setUp(self):
super().setUp()
self.html1_block_column_url = reverse(
'admin:%s_%s_block_column' % self.info, args=(self.html1_content_block.id,)
)
def test_permissions(self):
""" Test permissions. """
response = self.editor_no_permissions_client.get(self.html1_block_column_url)
self.assertEqual(response.status_code, 403)
# Editor with permissions
response = self.editor_client.get(self.html1_block_column_url)
self.assertEqual(response.status_code, 200)
def test_move_block_side(self):
""" Move block side. """
response = self.editor_client.post(self.html1_block_column_url, {
'move': 'side',
})
self.assertEqual(response.status_code, 200)
def test_get_last_block(self):
self.html3_block.delete()
self.html3_content_block.delete()
response = self.editor_client.post(self.html1_block_column_url, {
'move': 'side',
})
self.assertEqual(response.status_code, 200)
def test_page_version(self):
""" Check page version. """
self.change_page_version()
self.editor.user_permissions.add(self.edit_permissions)
response = self.editor_client.get(self.html1_block_column_url)
self.assertEqual(response.status_code, 403)
| {
"content_hash": "a17760b704481ebc84e7cd06f8af8c8e",
"timestamp": "",
"source": "github",
"line_count": 506,
"max_line_length": 99,
"avg_line_length": 37.444664031620555,
"alnum_prop": 0.6384124135747083,
"repo_name": "blancltd/django-glitter",
"id": "e47f22de8d13ade8e6a5050eb2096b7c8af15762",
"size": "18947",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glitter/tests/test_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "89413"
},
{
"name": "HTML",
"bytes": "49583"
},
{
"name": "JavaScript",
"bytes": "229974"
},
{
"name": "Python",
"bytes": "309345"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from models import Item
class ItemAdmin(admin.ModelAdmin):
date_hierarchy = 'timestamp'
list_display = ('timestamp', 'object_str')
list_filter = ('content_type', 'timestamp')
search_fields = ('object_str', 'tags')
#admin.site.register(Item, ItemAdmin) | {
"content_hash": "bb5baf40746df1c435f3d3de4be47bc9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 47,
"avg_line_length": 23.76923076923077,
"alnum_prop": 0.6893203883495146,
"repo_name": "twatson/django-plugables",
"id": "bf2f0409dee86ecda7963f9fa08a27408f9dbf9f",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "applications/core/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "148563"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class vpnglobal_vpnintranetapplication_binding(base_resource) :
""" Binding class showing the vpnintranetapplication that can be bound to vpnglobal.
"""
def __init__(self) :
self._intranetapplication = ""
self.___count = 0
@property
def intranetapplication(self) :
"""The intranet vpn application.
"""
try :
return self._intranetapplication
except Exception as e:
raise e
@intranetapplication.setter
def intranetapplication(self, intranetapplication) :
"""The intranet vpn application.
"""
try :
self._intranetapplication = intranetapplication
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(vpnglobal_vpnintranetapplication_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.vpnglobal_vpnintranetapplication_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = vpnglobal_vpnintranetapplication_binding()
updateresource.intranetapplication = resource.intranetapplication
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [vpnglobal_vpnintranetapplication_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].intranetapplication = resource[i].intranetapplication
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = vpnglobal_vpnintranetapplication_binding()
deleteresource.intranetapplication = resource.intranetapplication
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [vpnglobal_vpnintranetapplication_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].intranetapplication = resource[i].intranetapplication
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service) :
""" Use this API to fetch a vpnglobal_vpnintranetapplication_binding resources.
"""
try :
obj = vpnglobal_vpnintranetapplication_binding()
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, filter_) :
""" Use this API to fetch filtered set of vpnglobal_vpnintranetapplication_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpnintranetapplication_binding()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service) :
""" Use this API to count vpnglobal_vpnintranetapplication_binding resources configued on NetScaler.
"""
try :
obj = vpnglobal_vpnintranetapplication_binding()
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, filter_) :
""" Use this API to count the filtered set of vpnglobal_vpnintranetapplication_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = vpnglobal_vpnintranetapplication_binding()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class vpnglobal_vpnintranetapplication_binding_response(base_response) :
def __init__(self, length=1) :
self.vpnglobal_vpnintranetapplication_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.vpnglobal_vpnintranetapplication_binding = [vpnglobal_vpnintranetapplication_binding() for _ in range(length)]
| {
"content_hash": "ef63216ca44c8d975755f49c51e996c2",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 142,
"avg_line_length": 33.554140127388536,
"alnum_prop": 0.7285497342444951,
"repo_name": "mahabs/nitro",
"id": "16a4e07dc041847b79a0d0c3fa7d1cf9f180d4e8",
"size": "5882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/vpn/vpnglobal_vpnintranetapplication_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
HTTP_METHODS = ['POST', 'PUT', 'GET', 'DELETE', 'PATCH']
BOOT_DEVICES = ['pxe', 'disk', 'cdrom', 'bios', 'safe', 'wanboot']
# Polling intervals in seconds.
_LONG_ACTION_POLL_INTERVAL = 10
_SHORT_ACTION_POLL_INTERVAL = 2
# This dict acts as both list of possible provision actions and arguments for
# wait_for_provision_state invocation.
PROVISION_ACTIONS = {
'active': {'expected_state': 'active',
'poll_interval': _LONG_ACTION_POLL_INTERVAL},
'deleted': {'expected_state': 'available',
'poll_interval': _LONG_ACTION_POLL_INTERVAL},
'rebuild': {'expected_state': 'active',
'poll_interval': _LONG_ACTION_POLL_INTERVAL},
'inspect': {'expected_state': 'manageable',
# This is suboptimal for in-band inspection, but it's probably
# not worth making people wait 10 seconds for OOB inspection
'poll_interval': _SHORT_ACTION_POLL_INTERVAL},
'provide': {'expected_state': 'available',
# This assumes cleaning is in place
'poll_interval': _LONG_ACTION_POLL_INTERVAL},
'manage': {'expected_state': 'manageable',
'poll_interval': _SHORT_ACTION_POLL_INTERVAL},
'clean': {'expected_state': 'manageable',
'poll_interval': _LONG_ACTION_POLL_INTERVAL},
'adopt': {'expected_state': 'active',
'poll_interval': _SHORT_ACTION_POLL_INTERVAL},
'abort': None, # no support for --wait in abort
'rescue': {'expected_state': 'rescue',
'poll_interval': _LONG_ACTION_POLL_INTERVAL},
'unrescue': {'expected_state': 'active',
'poll_interval': _LONG_ACTION_POLL_INTERVAL},
}
PROVISION_STATES = list(PROVISION_ACTIONS)
| {
"content_hash": "de0867c82b21f113c64e4f54de58b023",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 46.945945945945944,
"alnum_prop": 0.6108232584916523,
"repo_name": "openstack/python-ironicclient",
"id": "4c0719306ffb382d5b2716d95116858a73d57de5",
"size": "2370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ironicclient/v1/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1240609"
},
{
"name": "Shell",
"bytes": "218"
}
],
"symlink_target": ""
} |
"""Embedding layer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras import backend as K
from tensorflow.contrib.keras.python.keras import constraints
from tensorflow.contrib.keras.python.keras import initializers
from tensorflow.contrib.keras.python.keras import regularizers
from tensorflow.contrib.keras.python.keras.engine import Layer
from tensorflow.python.framework import tensor_shape
class Embedding(Layer):
"""Turns positive integers (indexes) into dense vectors of fixed size.
eg. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]]
This layer can only be used as the first layer in a model.
Example:
```python
model = Sequential()
model.add(Embedding(1000, 64, input_length=10))
# the model will take as input an integer matrix of size (batch,
input_length).
# the largest integer (i.e. word index) in the input should be no larger
than 999 (vocabulary size).
# now model.output_shape == (None, 10, 64), where None is the batch
dimension.
input_array = np.random.randint(1000, size=(32, 10))
model.compile('rmsprop', 'mse')
output_array = model.predict(input_array)
assert output_array.shape == (32, 10, 64)
```
Arguments:
input_dim: int > 0. Size of the vocabulary,
i.e. maximum integer index + 1.
output_dim: int >= 0. Dimension of the dense embedding.
embeddings_initializer: Initializer for the `embeddings` matrix.
embeddings_regularizer: Regularizer function applied to
the `embeddings` matrix.
embeddings_constraint: Constraint function applied to
the `embeddings` matrix.
mask_zero: Whether or not the input value 0 is a special "padding"
value that should be masked out.
This is useful when using recurrent layers,
which may take variable length inputs.
If this is `True` then all subsequent layers
in the model need to support masking or an exception will be raised.
If mask_zero is set to True, as a consequence, index 0 cannot be
used in the vocabulary (input_dim should equal size of
vocabulary + 1).
input_length: Length of input sequences, when it is constant.
This argument is required if you are going to connect
`Flatten` then `Dense` layers upstream
(without it, the shape of the dense outputs cannot be computed).
Input shape:
2D tensor with shape: `(batch_size, sequence_length)`.
Output shape:
3D tensor with shape: `(batch_size, sequence_length, output_dim)`.
References:
- [A Theoretically Grounded Application of Dropout in Recurrent Neural
Networks](http://arxiv.org/abs/1512.05287)
"""
def __init__(self,
input_dim,
output_dim,
embeddings_initializer='uniform',
embeddings_regularizer=None,
activity_regularizer=None,
embeddings_constraint=None,
mask_zero=False,
input_length=None,
**kwargs):
kwargs['dtype'] = 'int32'
if 'input_shape' not in kwargs:
if input_length:
kwargs['input_shape'] = (input_length,)
else:
kwargs['input_shape'] = (None,)
super(Embedding, self).__init__(**kwargs)
self.input_dim = input_dim
self.output_dim = output_dim
self.embeddings_initializer = initializers.get(embeddings_initializer)
self.embeddings_regularizer = regularizers.get(embeddings_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.embeddings_constraint = constraints.get(embeddings_constraint)
self.mask_zero = mask_zero
self.input_length = input_length
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
self.embeddings = self.add_weight(
(self.input_dim, self.output_dim),
initializer=self.embeddings_initializer,
name='embeddings',
regularizer=self.embeddings_regularizer,
constraint=self.embeddings_constraint)
self.built = True
def compute_mask(self, inputs, mask=None):
if not self.mask_zero:
return None
else:
return K.not_equal(inputs, 0)
def _compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not self.input_length:
input_length = input_shape[1]
else:
input_length = self.input_length
return tensor_shape.TensorShape(
[input_shape[0], input_length, self.output_dim])
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = K.cast(inputs, 'int32')
out = K.gather(self.embeddings, inputs)
return out
def get_config(self):
config = {
'input_dim':
self.input_dim,
'output_dim':
self.output_dim,
'embeddings_initializer':
initializers.serialize(self.embeddings_initializer),
'embeddings_regularizer':
regularizers.serialize(self.embeddings_regularizer),
'activity_regularizer':
regularizers.serialize(self.activity_regularizer),
'embeddings_constraint':
constraints.serialize(self.embeddings_constraint),
'mask_zero':
self.mask_zero,
'input_length':
self.input_length
}
base_config = super(Embedding, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
| {
"content_hash": "89cb2e71365d3e69b8575c78d0f713a5",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 76,
"avg_line_length": 36.1764705882353,
"alnum_prop": 0.6612466124661247,
"repo_name": "whn09/tensorflow",
"id": "12a2ce39ebdc8bc900e9c373e50562803cfa6524",
"size": "6224",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/keras/python/keras/layers/embeddings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182478"
},
{
"name": "C++",
"bytes": "23440224"
},
{
"name": "CMake",
"bytes": "158302"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "804382"
},
{
"name": "HTML",
"bytes": "654838"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "14005"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "213841"
},
{
"name": "Python",
"bytes": "20372706"
},
{
"name": "Shell",
"bytes": "335987"
},
{
"name": "TypeScript",
"bytes": "1108203"
}
],
"symlink_target": ""
} |
"""API tests which do not interact with a site."""
#
# (C) Pywikibot team, 2012-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import os
import datetime
import pywikibot
from pywikibot.data.api import (
CachedRequest,
ParamInfo,
Request,
QueryGenerator,
)
from pywikibot.family import Family
from tests import _images_dir
from tests.utils import DummySiteinfo
from tests.aspects import (
unittest, TestCase, DefaultDrySiteTestCase, SiteAttributeTestCase,
)
class DryCachedRequestTests(SiteAttributeTestCase):
"""Test CachedRequest using real site objects."""
sites = {
'basesite': {
'family': 'wikipedia',
'code': 'en',
},
'altsite': {
'family': 'wikipedia',
'code': 'de',
},
}
dry = True
def setUp(self):
super(DryCachedRequestTests, self).setUp()
self.parms = {'site': self.basesite,
'action': 'query',
'meta': 'userinfo'}
self.req = CachedRequest(expiry=1, **self.parms)
self.expreq = CachedRequest(expiry=0, **self.parms)
self.diffreq = CachedRequest(expiry=1, site=self.basesite, action='query', meta='siteinfo')
self.diffsite = CachedRequest(expiry=1, site=self.altsite, action='query', meta='userinfo')
def test_expiry_formats(self):
self.assertEqual(self.req.expiry,
CachedRequest(datetime.timedelta(days=1), **self.parms).expiry)
def test_expired(self):
self.assertFalse(self.req._expired(datetime.datetime.now()))
self.assertTrue(self.req._expired(datetime.datetime.now() - datetime.timedelta(days=2)))
def test_get_cache_dir(self):
retval = self.req._get_cache_dir()
self.assertIn('apicache', retval)
def test_create_file_name(self):
self.assertEqual(self.req._create_file_name(), self.req._create_file_name())
self.assertEqual(self.req._create_file_name(), self.expreq._create_file_name())
self.assertNotEqual(self.req._create_file_name(), self.diffreq._create_file_name())
def test_cachefile_path(self):
self.assertEqual(self.req._cachefile_path(), self.req._cachefile_path())
self.assertEqual(self.req._cachefile_path(), self.expreq._cachefile_path())
self.assertNotEqual(self.req._cachefile_path(), self.diffreq._cachefile_path())
self.assertNotEqual(self.req._cachefile_path(), self.diffsite._cachefile_path())
class MockCachedRequestKeyTests(TestCase):
"""Test CachedRequest using moke site objects."""
net = False
def setUp(self):
class MockFamily(Family):
@property
def name(self):
return 'mock'
class MockSite(pywikibot.site.APISite):
_loginstatus = pywikibot.site.LoginStatus.NOT_ATTEMPTED
_namespaces = {2: ['User']}
def __init__(self):
self._user = 'anon'
pywikibot.site.BaseSite.__init__(self, 'mock', MockFamily())
self._siteinfo = DummySiteinfo({'case': 'first-letter'})
def version(self):
return '1.13' # pre 1.14
def protocol(self):
return 'http'
def languages(self):
return ['mock']
def user(self):
return self._user
def encoding(self):
return 'utf-8'
def encodings(self):
return []
@property
def siteinfo(self):
return self._siteinfo
def __repr__(self):
return "MockSite()"
def __getattr__(self, attr):
raise Exception("Attribute %r not defined" % attr)
self.mocksite = MockSite()
super(MockCachedRequestKeyTests, self).setUp()
def test_cachefile_path_different_users(self):
req = CachedRequest(expiry=1, site=self.mocksite,
action='query', meta='siteinfo')
anonpath = req._cachefile_path()
self.mocksite._userinfo = {'name': u'MyUser'}
self.mocksite._loginstatus = 0
req = CachedRequest(expiry=1, site=self.mocksite,
action='query', meta='siteinfo')
userpath = req._cachefile_path()
self.assertNotEqual(anonpath, userpath)
self.mocksite._userinfo = {'name': u'MySysop'}
self.mocksite._loginstatus = 1
req = CachedRequest(expiry=1, site=self.mocksite,
action='query', meta='siteinfo')
sysoppath = req._cachefile_path()
self.assertNotEqual(anonpath, sysoppath)
self.assertNotEqual(userpath, sysoppath)
def test_unicode(self):
self.mocksite._userinfo = {'name': u'محمد الفلسطيني'}
self.mocksite._loginstatus = 0
req = CachedRequest(expiry=1, site=self.mocksite,
action='query', meta='siteinfo')
en_user_path = req._cachefile_path()
self.mocksite._namespaces = {2: [u'مستخدم']}
req = CachedRequest(expiry=1, site=self.mocksite,
action='query', meta='siteinfo')
expect = (u'MockSite()User(User:محمد الفلسطيني)' +
"[('action', 'query'), ('meta', 'siteinfo')]")
self.assertEqual(repr(req._uniquedescriptionstr()), repr(expect))
self.assertEqual(req._uniquedescriptionstr().encode('utf-8'),
expect.encode('utf-8'))
ar_user_path = req._cachefile_path()
self.assertEqual(en_user_path, ar_user_path)
class DryWriteAssertTests(DefaultDrySiteTestCase):
"""Test client site write assert."""
def test_no_user(self):
"""Test Request object when not a user."""
site = self.get_site()
del site._userinfo
self.assertRaisesRegex(pywikibot.Error, ' without userinfo',
Request, site=site, action='edit')
# Explicitly using str as the test expects it to be str (without the
# u-prefix) in Python 2 and this module is using unicode_literals
site._userinfo = {'name': str('1.2.3.4'), 'groups': []}
self.assertRaisesRegex(pywikibot.Error, " as IP '1.2.3.4'",
Request, site=site, action='edit')
def test_unexpected_user(self):
"""Test Request object when username is not correct."""
site = self.get_site()
site._userinfo = {'name': 'other_username', 'groups': []}
site._username[0] = 'myusername'
Request(site=site, action='edit')
def test_normal(self):
"""Test Request object when username is correct."""
site = self.get_site()
site._userinfo = {'name': 'myusername', 'groups': []}
site._username[0] = 'myusername'
Request(site=site, action='edit')
class DryMimeTests(TestCase):
"""Test MIME request handling without a real site."""
net = False
def test_mime_file_payload(self):
"""Test Request._generate_MIME_part loads binary as binary."""
local_filename = os.path.join(_images_dir, 'MP_sounds.png')
with open(local_filename, 'rb') as f:
file_content = f.read()
submsg = Request._generate_MIME_part(
'file', file_content, ('image', 'png'),
{'filename': local_filename})
self.assertEqual(file_content, submsg.get_payload(decode=True))
def test_mime_file_container(self):
"""Test Request._build_mime_request encodes binary."""
local_filename = os.path.join(_images_dir, 'MP_sounds.png')
with open(local_filename, 'rb') as f:
file_content = f.read()
body = Request._build_mime_request({}, {
'file': (file_content, ('image', 'png'),
{'filename': local_filename})
})[1]
self.assertNotEqual(body.find(file_content), -1)
class MimeTests(DefaultDrySiteTestCase):
"""Test MIME request handling with a real site."""
def test_upload_object(self):
"""Test Request object prepared to upload."""
# fake write test needs the config username
site = self.get_site()
site._username[0] = 'myusername'
site._userinfo = {'name': 'myusername', 'groups': []}
req = Request(site=site, action="upload",
file='MP_sounds.png', mime=True,
filename=os.path.join(_images_dir, 'MP_sounds.png'))
self.assertEqual(req.mime, True)
class ParamInfoDictTests(DefaultDrySiteTestCase):
"""Test extracting data from the ParamInfo."""
prop_info_param_data = { # data from 1.25
"name": "info",
"classname": "ApiQueryInfo",
"path": "query+info",
"group": "prop",
"prefix": "in",
"parameters": [
{
"name": "prop",
"multi": "",
"limit": 500,
"lowlimit": 50,
"highlimit": 500,
"type": [
"protection",
"talkid",
"watched",
"watchers",
"notificationtimestamp",
"subjectid",
"url",
"readable",
"preload",
"displaytitle"
]
},
{
"name": "token",
"deprecated": "",
"multi": "",
"limit": 500,
"lowlimit": 50,
"highlimit": 500,
"type": [
"edit",
"delete",
"protect",
"move",
"block",
"unblock",
"email",
"import",
"watch"
]
},
{
"name": "continue",
"type": "string"
}
],
"querytype": "prop"
}
edit_action_param_data = {
'name': 'edit',
'path': 'edit'
}
def setUp(self):
"""Add a real ParamInfo to the DrySite."""
super(ParamInfoDictTests, self).setUp()
site = self.get_site()
site._paraminfo = ParamInfo(site)
# Pretend that paraminfo has been loaded
for mod in site._paraminfo.init_modules:
site._paraminfo._paraminfo[mod] = {}
site._paraminfo._action_modules = frozenset(['edit'])
site._paraminfo._modules = {'query': frozenset(['info'])}
def test_new_format(self):
pi = self.get_site()._paraminfo
# Set it to the new limited set of keys.
pi.paraminfo_keys = frozenset(['modules'])
data = pi.normalize_paraminfo({
'paraminfo': {
'modules': [
self.prop_info_param_data,
self.edit_action_param_data,
]
}
})
pi._paraminfo.update(data)
self.assertIn('edit', pi._paraminfo)
self.assertIn('query+info', pi._paraminfo)
self.assertIn('edit', pi)
self.assertIn('info', pi)
def test_old_format(self):
pi = self.get_site()._paraminfo
# Reset it to the complete set of possible keys defined in the class
pi.paraminfo_keys = ParamInfo.paraminfo_keys
data = pi.normalize_paraminfo({
'paraminfo': {
'querymodules': [self.prop_info_param_data],
'modules': [self.edit_action_param_data],
}
})
pi._paraminfo.update(data)
self.assertIn('edit', pi._paraminfo)
self.assertIn('query+info', pi._paraminfo)
self.assertIn('edit', pi)
self.assertIn('info', pi)
def test_attribute(self):
pi = self.get_site()._paraminfo
# Reset it to the complete set of possible keys defined in the class
pi.paraminfo_keys = ParamInfo.paraminfo_keys
data = pi.normalize_paraminfo({
'paraminfo': {
'querymodules': [self.prop_info_param_data],
}
})
pi._paraminfo.update(data)
self.assertEqual(pi._paraminfo['query+info']['querytype'], 'prop')
self.assertEqual(pi['info']['prefix'], 'in')
def test_parameter(self):
pi = self.get_site()._paraminfo
# Reset it to the complete set of possible keys defined in the class
pi.paraminfo_keys = ParamInfo.paraminfo_keys
data = pi.normalize_paraminfo({
'paraminfo': {
'querymodules': [self.prop_info_param_data],
}
})
pi._paraminfo.update(data)
param = pi.parameter('info', 'token')
self.assertIsInstance(param, dict)
self.assertEqual(param['name'], 'token')
self.assertIn('deprecated', param)
self.assertIsInstance(param['type'], list)
self.assertIn('email', param['type'])
class QueryGenTests(DefaultDrySiteTestCase):
"""Test QueryGenerator with a real site."""
def test_query_constructor(self):
"""Test QueryGenerator constructor."""
qGen1 = QueryGenerator(site=self.get_site(), action="query", meta="siteinfo")
qGen2 = QueryGenerator(site=self.get_site(), meta="siteinfo")
self.assertCountEqual(qGen1.request._params.items(), qGen2.request._params.items())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "986eca4b11b010939d432394addf7193",
"timestamp": "",
"source": "github",
"line_count": 427,
"max_line_length": 99,
"avg_line_length": 31.91334894613583,
"alnum_prop": 0.5478094958538197,
"repo_name": "valhallasw/pywikibot-core",
"id": "5266256e3e62da17c0685f8e859c2032c6772da3",
"size": "13684",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/dry_api_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3763739"
}
],
"symlink_target": ""
} |
import hashlib, imp, os, sqlite3
# Load the OpenShift helper library
lib_path = os.environ['OPENSHIFT_REPO_DIR'] + 'wsgi/openshift/'
modinfo = imp.find_module('openshiftlibs', [lib_path])
openshiftlibs = imp.load_module('openshiftlibs', modinfo[0], modinfo[1], modinfo[2])
# Open the database
conn = sqlite3.connect(os.environ['OPENSHIFT_DATA_DIR'] + '/sqlite3.db')
c = conn.cursor()
# Grab the default security info
c.execute('SELECT password FROM AUTH_USER WHERE id = 1')
pw_info = c.fetchone()[0]
# The password is stored as [hashtype]$[salt]$[hashed]
pw_fields = pw_info.split("$")
hashtype = pw_fields[0]
old_salt = pw_fields[1]
old_pass = pw_fields[2]
# Randomly generate a new password and a new salt
# The PASSWORD value below just sets the length (12)
# for the real new password.
old_keys = { 'SALT': old_salt, 'PASS': '123456789ABC' }
use_keys = openshiftlibs.openshift_secure(old_keys)
# Encrypt the new password
new_salt = use_keys['SALT']
new_pass = use_keys['PASS']
new_hashed = hashlib.sha1(new_salt + new_pass).hexdigest()
new_pw_info = "$".join([hashtype,new_salt,new_hashed])
# Update the database
c.execute('UPDATE AUTH_USER SET password = ? WHERE id = 1', [new_pw_info])
conn.commit()
c.close()
conn.close()
# Print the new password info
print "Django application credentials:\n\tuser: admin\n\t" + new_pass
| {
"content_hash": "5d6168d167378a0564e436e2852779c5",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 84,
"avg_line_length": 33.19512195121951,
"alnum_prop": 0.6994856722997795,
"repo_name": "grafuls/openshiftdjango",
"id": "b48de05613be6aa63e5b0865b12255a37d391243",
"size": "1383",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": ".openshift/action_hooks/secure_db.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "62"
},
{
"name": "Python",
"bytes": "13347"
},
{
"name": "Shell",
"bytes": "1806"
}
],
"symlink_target": ""
} |
''' Provides a Server which instantiates Application instances as clients connect
'''
from __future__ import absolute_import, print_function
import atexit
import logging
log = logging.getLogger(__name__)
import signal
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado import netutil
from .tornado import BokehTornado
from bokeh import __version__
from bokeh.application import Application
from bokeh.resources import DEFAULT_SERVER_PORT
def _create_hosts_whitelist(host_list, port):
if not host_list:
return ['localhost:' + str(port)]
hosts = []
for host in host_list:
if '*' in host:
log.warning('Host wildcard %r can expose the application to HTTP '
'host header attacks. Host wildcard should only be '
'used for testing purpose.', host)
if host == '*':
# do not append the :80 port suffix in that case: any port is
# accepted
hosts.append(host)
continue
parts = host.split(':')
if len(parts) == 1:
if parts[0] == "":
raise ValueError("Empty host value")
hosts.append(host+":80")
elif len(parts) == 2:
try:
int(parts[1])
except ValueError:
raise ValueError("Invalid port in host value: %s" % host)
if parts[0] == "":
raise ValueError("Empty host value")
hosts.append(host)
else:
raise ValueError("Invalid host value: %s" % host)
return hosts
def _bind_sockets(address, port):
'''Like tornado.netutil.bind_sockets(), but also returns the
assigned port number.
'''
ss = netutil.bind_sockets(port=port or 0, address=address)
assert len(ss)
ports = {s.getsockname()[1] for s in ss}
assert len(ports) == 1, "Multiple ports assigned??"
actual_port = ports.pop()
if port:
assert actual_port == port
return ss, actual_port
class Server(object):
''' A Server which creates a new Session for each connection, using an Application to initialize each Session.
Args:
applications (dict of str: bokeh.application.Application) or bokeh.application.Application:
mapping from URL paths to Application instances, or a single Application to put at the root URL
The Application is a factory for Document, with a new Document initialized for each Session.
Each application should be identified by a path meant to go in a URL, like "/" or "/foo"
Kwargs:
num_procs (str):
Number of worker processes for an app. Default to one. Using 0 will autodetect number of cores
'''
def __init__(self, applications, io_loop=None, **kwargs):
log.info("Starting Bokeh server version %s" % __version__)
if isinstance(applications, Application):
self._applications = { '/' : applications }
else:
self._applications = applications
tornado_kwargs = { key: kwargs[key] for key in ['extra_patterns',
'secret_key',
'sign_sessions',
'generate_session_ids',
'keep_alive_milliseconds',
'check_unused_sessions_milliseconds',
'unused_session_lifetime_milliseconds',
'stats_log_frequency_milliseconds']
if key in kwargs }
prefix = kwargs.get('prefix')
if prefix is None:
prefix = ""
prefix = prefix.strip("/")
if prefix:
prefix = "/" + prefix
self._prefix = prefix
self._started = False
self._stopped = False
port = kwargs.get('port', DEFAULT_SERVER_PORT)
self._address = kwargs.get('address') or None
self._num_procs = kwargs.get('num_procs', 1)
if self._num_procs != 1:
assert all(app.safe_to_fork for app in self._applications.values()), (
'User code has ran before attempting to run multiple '
'processes. This is considered an unsafe operation.')
sockets, self._port = _bind_sockets(self._address, port)
try:
tornado_kwargs['hosts'] = _create_hosts_whitelist(kwargs.get('host'), self._port)
tornado_kwargs['extra_websocket_origins'] = _create_hosts_whitelist(kwargs.get('allow_websocket_origin'), self._port)
tornado_kwargs['use_index'] = kwargs.get('use_index', True)
tornado_kwargs['redirect_root'] = kwargs.get('redirect_root', True)
self._tornado = BokehTornado(self._applications, self.prefix, **tornado_kwargs)
self._http = HTTPServer(self._tornado, xheaders=kwargs.get('use_xheaders', False))
self._http.start(self._num_procs)
self._http.add_sockets(sockets)
except Exception:
for s in sockets:
s.close()
raise
# Can only instantiate the IO loop after HTTPServer.start() was
# called because of `num_procs`, see issue #5524
if io_loop is None:
io_loop = IOLoop.current()
self._loop = io_loop
self._tornado.initialize(io_loop=io_loop, **tornado_kwargs)
@property
def port(self):
'''The actual port number the server is listening on for HTTP
requests.
'''
return self._port
@property
def address(self):
'''The address the server is listening on for HTTP requests
(may be empty or None).
'''
return self._address
@property
def prefix(self):
return self._prefix
@property
def io_loop(self):
return self._loop
def start(self):
''' Start the Bokeh Server and its background tasks.
Notes:
This method does not block and does not affect the state of
the Tornado I/O loop. You must start and stop the loop yourself.
'''
assert not self._started, "Already started"
self._started = True
self._tornado.start()
def stop(self, wait=True):
''' Stop the Bokeh Server.
Args:
fast (boolean): whether to wait for orderly cleanup (default: True)
Returns:
None
'''
assert not self._stopped, "Already stopped"
self._stopped = True
self._tornado.stop(wait)
def run_until_shutdown(self):
''' Run the Bokeh Server until shutdown is requested by the user,
either via a Keyboard interrupt (Ctrl-C) or SIGTERM.
'''
if not self._started:
self.start()
# Install shutdown hooks
atexit.register(self._atexit)
signal.signal(signal.SIGTERM, self._sigterm)
try:
self._loop.start()
except KeyboardInterrupt:
print("\nInterrupted, shutting down")
self.stop()
_atexit_ran = False
def _atexit(self):
if self._atexit_ran:
return
self._atexit_ran = True
log.debug("Shutdown: cleaning up")
if not self._stopped:
self.stop(wait=False)
def _sigterm(self, signum, frame):
print("Received signal %d, shutting down" % (signum,))
# Tell self._loop.start() to return.
self._loop.add_callback_from_signal(self._loop.stop)
def unlisten(self):
'''Stop listening on ports (Server will no longer be usable after calling this)
Returns:
None
'''
self._http.close_all_connections()
self._http.stop()
def get_session(self, app_path, session_id):
'''Gets a session by name (session must already exist)'''
return self._tornado.get_session(app_path, session_id)
def get_sessions(self, app_path):
'''Gets all live sessions for an application.'''
return self._tornado.get_sessions(app_path)
def show(self, app_path, browser=None, new='tab'):
''' Opens an app in a browser window or tab.
Useful for testing server applications on your local desktop but
should not call when running bokeh-server on an actual server.
Args:
app_path (str) : the app path to open
The part of the URL after the hostname:port, with leading slash.
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : window or tab (default: "tab")
If ``new`` is 'tab', then opens a new tab.
If ``new`` is 'window', then opens a new window.
Returns:
None
'''
if not app_path.startswith("/"):
raise ValueError("app_path must start with a /")
from bokeh.util.browser import view
url = "http://localhost:%d%s%s" % (self.port, self.prefix, app_path)
view(url, browser=browser, new=new)
| {
"content_hash": "f337b59777c5e6b2f09aef61b7292a7a",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 129,
"avg_line_length": 35.80898876404494,
"alnum_prop": 0.5692919150716452,
"repo_name": "draperjames/bokeh",
"id": "bdcbb0d956c7dda324464bfe54fd70458f098dd8",
"size": "9561",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bokeh/server/server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "93011"
},
{
"name": "CoffeeScript",
"bytes": "1072438"
},
{
"name": "HTML",
"bytes": "46812"
},
{
"name": "JavaScript",
"bytes": "31782"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "2332463"
},
{
"name": "Shell",
"bytes": "3660"
},
{
"name": "TypeScript",
"bytes": "105726"
}
],
"symlink_target": ""
} |
import json
import io
import csv
import numpy as np
from django.shortcuts import render
from django.http import HttpResponse
from .forms import ReferencePointsForm
from .fit import *
# utility
def json_response(data, status=200):
return HttpResponse(json.dumps(data, ensure_ascii=False), content_type="application/json; charset=utf-8", status=status)
def index(request):
form = ReferencePointsForm()
return render(request, 'procrustes/index.html', { 'form': form })
def _read_reference_points(fp, field_names):
dialect = csv.Sniffer().sniff(fp.read(1024), delimiters=";, \t")
fp.seek(0)
reader = csv.DictReader(fp, fieldnames=field_names, delimiter=dialect.delimiter, skipinitialspace=True, strict=True)
data = np.array([(float(p['xs']), float(p['ys']), float(p['xt']), float(p['yt'])) for p in reader])
source_coords = data[:, 0:2]
target_coords = data[:, 2:4]
return (source_coords, target_coords)
def upload_reference(request):
if request.method == 'POST':
form_data = ReferencePointsForm(request.POST, request.FILES)
if form_data.is_valid():
field_names = form_data.cleaned_data['points_format'].split(',')
f = io.TextIOWrapper(form_data.cleaned_data['reference_points'], encoding='utf-8')
source_coords, target_coords = _read_reference_points(f, field_names)
transf_type = TransformationType(int(form_data.cleaned_data['transformation_type']))
if transf_type == TransformationType.Similarity:
tr = SimilarityTransformation2D(source_coords, target_coords)
elif transf_type == TransformationType.Affine:
tr = AffineTransformation2D(source_coords, target_coords)
else:
tr = PolynomialTransformation2D(source_coords, target_coords)
tr_stats = ResidualStatistics(source_coords, target_coords, tr)
rescor_type = ResidualCorrectionType(int(form_data.cleaned_data['residual_correction_type']))
has_residual_correction = rescor_type != ResidualCorrectionType.NoCorrection
if rescor_type == ResidualCorrectionType.Collocation:
cov_function_type = CovarianceFunctionType(int(form_data.cleaned_data['cov_function_type']))
rescor = Collocation(source_coords, target_coords, tr, cov_function_type)
elif rescor_type == ResidualCorrectionType.Hausbrandt:
rescor = HausbrandtCorrection(source_coords, target_coords, tr)
has_validation = form_data.cleaned_data['validation_points'] is not None
if has_validation:
f = io.TextIOWrapper(form_data.cleaned_data['validation_points'], encoding='utf-8')
val_source_coords, val_target_coords = _read_reference_points(f, field_names)
val_stats = ResidualStatistics(val_source_coords, val_target_coords, tr)
if has_residual_correction:
val_stats_rescor = ResidualStatistics(val_source_coords, val_target_coords, rescor)
else:
validation_statistics = None
result = {
"input": {
"cs_source": {
"coords": source_coords.tolist(),
"units": "meters",
},
"cs_target": {
"coords": target_coords.tolist(),
"units": "meters",
},
},
"transformation": {
"type": transf_type.name,
"fitted_parameters": tr.get_parameters().tolist(),
"statistics": tr_stats.__dict__,
},
}
if has_residual_correction and rescor_type == ResidualCorrectionType.Collocation:
collocation = rescor
result["collocation"] = {
"type": collocation.cov_func.type.name.lower(), # CovarianceFunctionType
"distance_intervals": collocation.cov_func.distance_intervals.tolist(),
"empirical_cov": collocation.cov_func.empirical_cov.tolist(),
"fitted_cov": collocation.cov_func.fitted_cov.tolist(),
}
if has_validation:
result["transformation"]["validation_statistics"] = val_stats.__dict__
if has_residual_correction:
result["residual_correction"] = {
"validation_statistics": val_stats_rescor.__dict__
}
return json_response(result)
return index(request)
| {
"content_hash": "381979a3d65f5ce38c43f93579fa097a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 121,
"avg_line_length": 37.57692307692308,
"alnum_prop": 0.7090583418628454,
"repo_name": "georgeouzou/survgr",
"id": "c478d2868e5c2da8f227e2a101cfa98d802e4921",
"size": "3908",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "procrustes/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "461"
},
{
"name": "HTML",
"bytes": "27928"
},
{
"name": "JavaScript",
"bytes": "29738"
},
{
"name": "Python",
"bytes": "85411"
}
],
"symlink_target": ""
} |
"""Webhook handlers for mobile_app."""
import asyncio
from functools import wraps
import logging
import secrets
from aiohttp.web import HTTPBadRequest, Request, Response, json_response
from nacl.secret import SecretBox
import voluptuous as vol
from homeassistant.components import notify as hass_notify, tag
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES as BINARY_SENSOR_CLASSES,
)
from homeassistant.components.camera import SUPPORT_STREAM as CAMERA_SUPPORT_STREAM
from homeassistant.components.device_tracker import (
ATTR_BATTERY,
ATTR_GPS,
ATTR_GPS_ACCURACY,
ATTR_LOCATION_NAME,
)
from homeassistant.components.frontend import MANIFEST_JSON
from homeassistant.components.sensor import DEVICE_CLASSES as SENSOR_CLASSES
from homeassistant.components.zone.const import DOMAIN as ZONE_DOMAIN
from homeassistant.const import (
ATTR_DOMAIN,
ATTR_SERVICE,
ATTR_SERVICE_DATA,
ATTR_SUPPORTED_FEATURES,
CONF_WEBHOOK_ID,
HTTP_BAD_REQUEST,
HTTP_CREATED,
)
from homeassistant.core import EventOrigin
from homeassistant.exceptions import HomeAssistantError, ServiceNotFound
from homeassistant.helpers import (
config_validation as cv,
device_registry as dr,
template,
)
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.decorator import Registry
from .const import (
ATTR_ALTITUDE,
ATTR_APP_DATA,
ATTR_APP_VERSION,
ATTR_CAMERA_ENTITY_ID,
ATTR_COURSE,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_EVENT_DATA,
ATTR_EVENT_TYPE,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_VERSION,
ATTR_SENSOR_ATTRIBUTES,
ATTR_SENSOR_DEVICE_CLASS,
ATTR_SENSOR_ICON,
ATTR_SENSOR_NAME,
ATTR_SENSOR_STATE,
ATTR_SENSOR_TYPE,
ATTR_SENSOR_TYPE_BINARY_SENSOR,
ATTR_SENSOR_TYPE_SENSOR,
ATTR_SENSOR_UNIQUE_ID,
ATTR_SENSOR_UOM,
ATTR_SPEED,
ATTR_SUPPORTS_ENCRYPTION,
ATTR_TEMPLATE,
ATTR_TEMPLATE_VARIABLES,
ATTR_VERTICAL_ACCURACY,
ATTR_WEBHOOK_DATA,
ATTR_WEBHOOK_ENCRYPTED,
ATTR_WEBHOOK_ENCRYPTED_DATA,
ATTR_WEBHOOK_TYPE,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
DATA_CONFIG_ENTRIES,
DATA_DELETED_IDS,
DATA_STORE,
DOMAIN,
ERR_ENCRYPTION_ALREADY_ENABLED,
ERR_ENCRYPTION_NOT_AVAILABLE,
ERR_ENCRYPTION_REQUIRED,
ERR_INVALID_FORMAT,
ERR_SENSOR_NOT_REGISTERED,
SIGNAL_LOCATION_UPDATE,
SIGNAL_SENSOR_UPDATE,
)
from .helpers import (
_decrypt_payload,
empty_okay_response,
error_response,
registration_context,
safe_registration,
savable_state,
supports_encryption,
webhook_response,
)
_LOGGER = logging.getLogger(__name__)
DELAY_SAVE = 10
WEBHOOK_COMMANDS = Registry()
COMBINED_CLASSES = set(BINARY_SENSOR_CLASSES + SENSOR_CLASSES)
SENSOR_TYPES = [ATTR_SENSOR_TYPE_BINARY_SENSOR, ATTR_SENSOR_TYPE_SENSOR]
WEBHOOK_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_WEBHOOK_TYPE): cv.string,
vol.Required(ATTR_WEBHOOK_DATA, default={}): vol.Any(dict, list),
vol.Optional(ATTR_WEBHOOK_ENCRYPTED, default=False): cv.boolean,
vol.Optional(ATTR_WEBHOOK_ENCRYPTED_DATA): cv.string,
}
)
def validate_schema(schema):
"""Decorate a webhook function with a schema."""
if isinstance(schema, dict):
schema = vol.Schema(schema)
def wrapper(func):
"""Wrap function so we validate schema."""
@wraps(func)
async def validate_and_run(hass, config_entry, data):
"""Validate input and call handler."""
try:
data = schema(data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(data, ex)
_LOGGER.error("Received invalid webhook payload: %s", err)
return empty_okay_response()
return await func(hass, config_entry, data)
return validate_and_run
return wrapper
async def handle_webhook(
hass: HomeAssistantType, webhook_id: str, request: Request
) -> Response:
"""Handle webhook callback."""
if webhook_id in hass.data[DOMAIN][DATA_DELETED_IDS]:
return Response(status=410)
config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES][webhook_id]
device_name = config_entry.data[ATTR_DEVICE_NAME]
try:
req_data = await request.json()
except ValueError:
_LOGGER.warning("Received invalid JSON from mobile_app device: %s", device_name)
return empty_okay_response(status=HTTP_BAD_REQUEST)
if (
ATTR_WEBHOOK_ENCRYPTED not in req_data
and config_entry.data[ATTR_SUPPORTS_ENCRYPTION]
):
_LOGGER.warning(
"Refusing to accept unencrypted webhook from %s",
device_name,
)
return error_response(ERR_ENCRYPTION_REQUIRED, "Encryption required")
try:
req_data = WEBHOOK_PAYLOAD_SCHEMA(req_data)
except vol.Invalid as ex:
err = vol.humanize.humanize_error(req_data, ex)
_LOGGER.error(
"Received invalid webhook from %s with payload: %s", device_name, err
)
return empty_okay_response()
webhook_type = req_data[ATTR_WEBHOOK_TYPE]
webhook_payload = req_data.get(ATTR_WEBHOOK_DATA, {})
if req_data[ATTR_WEBHOOK_ENCRYPTED]:
enc_data = req_data[ATTR_WEBHOOK_ENCRYPTED_DATA]
webhook_payload = _decrypt_payload(config_entry.data[CONF_SECRET], enc_data)
if webhook_type not in WEBHOOK_COMMANDS:
_LOGGER.error(
"Received invalid webhook from %s of type: %s", device_name, webhook_type
)
return empty_okay_response()
_LOGGER.debug(
"Received webhook payload from %s for type %s: %s",
device_name,
webhook_type,
webhook_payload,
)
# Shield so we make sure we finish the webhook, even if sender hangs up.
return await asyncio.shield(
WEBHOOK_COMMANDS[webhook_type](hass, config_entry, webhook_payload)
)
@WEBHOOK_COMMANDS.register("call_service")
@validate_schema(
{
vol.Required(ATTR_DOMAIN): cv.string,
vol.Required(ATTR_SERVICE): cv.string,
vol.Optional(ATTR_SERVICE_DATA, default={}): dict,
}
)
async def webhook_call_service(hass, config_entry, data):
"""Handle a call service webhook."""
try:
await hass.services.async_call(
data[ATTR_DOMAIN],
data[ATTR_SERVICE],
data[ATTR_SERVICE_DATA],
blocking=True,
context=registration_context(config_entry.data),
)
except (vol.Invalid, ServiceNotFound, Exception) as ex:
_LOGGER.error(
"Error when calling service during mobile_app "
"webhook (device name: %s): %s",
config_entry.data[ATTR_DEVICE_NAME],
ex,
)
raise HTTPBadRequest() from ex
return empty_okay_response()
@WEBHOOK_COMMANDS.register("fire_event")
@validate_schema(
{
vol.Required(ATTR_EVENT_TYPE): cv.string,
vol.Optional(ATTR_EVENT_DATA, default={}): dict,
}
)
async def webhook_fire_event(hass, config_entry, data):
"""Handle a fire event webhook."""
event_type = data[ATTR_EVENT_TYPE]
hass.bus.async_fire(
event_type,
data[ATTR_EVENT_DATA],
EventOrigin.remote,
context=registration_context(config_entry.data),
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("stream_camera")
@validate_schema({vol.Required(ATTR_CAMERA_ENTITY_ID): cv.string})
async def webhook_stream_camera(hass, config_entry, data):
"""Handle a request to HLS-stream a camera."""
camera = hass.states.get(data[ATTR_CAMERA_ENTITY_ID])
if camera is None:
return webhook_response(
{"success": False},
registration=config_entry.data,
status=HTTP_BAD_REQUEST,
)
resp = {"mjpeg_path": "/api/camera_proxy_stream/%s" % (camera.entity_id)}
if camera.attributes[ATTR_SUPPORTED_FEATURES] & CAMERA_SUPPORT_STREAM:
try:
resp["hls_path"] = await hass.components.camera.async_request_stream(
camera.entity_id, "hls"
)
except HomeAssistantError:
resp["hls_path"] = None
else:
resp["hls_path"] = None
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("render_template")
@validate_schema(
{
str: {
vol.Required(ATTR_TEMPLATE): cv.string,
vol.Optional(ATTR_TEMPLATE_VARIABLES, default={}): dict,
}
}
)
async def webhook_render_template(hass, config_entry, data):
"""Handle a render template webhook."""
resp = {}
for key, item in data.items():
try:
tpl = template.Template(item[ATTR_TEMPLATE], hass)
resp[key] = tpl.async_render(item.get(ATTR_TEMPLATE_VARIABLES))
except template.TemplateError as ex:
resp[key] = {"error": str(ex)}
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("update_location")
@validate_schema(
{
vol.Optional(ATTR_LOCATION_NAME): cv.string,
vol.Required(ATTR_GPS): cv.gps,
vol.Required(ATTR_GPS_ACCURACY): cv.positive_int,
vol.Optional(ATTR_BATTERY): cv.positive_int,
vol.Optional(ATTR_SPEED): cv.positive_int,
vol.Optional(ATTR_ALTITUDE): vol.Coerce(float),
vol.Optional(ATTR_COURSE): cv.positive_int,
vol.Optional(ATTR_VERTICAL_ACCURACY): cv.positive_int,
}
)
async def webhook_update_location(hass, config_entry, data):
"""Handle an update location webhook."""
hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_LOCATION_UPDATE.format(config_entry.entry_id), data
)
return empty_okay_response()
@WEBHOOK_COMMANDS.register("update_registration")
@validate_schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
}
)
async def webhook_update_registration(hass, config_entry, data):
"""Handle an update registration webhook."""
new_registration = {**config_entry.data, **data}
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, config_entry.data[ATTR_DEVICE_ID])},
manufacturer=new_registration[ATTR_MANUFACTURER],
model=new_registration[ATTR_MODEL],
name=new_registration[ATTR_DEVICE_NAME],
sw_version=new_registration[ATTR_OS_VERSION],
)
hass.config_entries.async_update_entry(config_entry, data=new_registration)
await hass_notify.async_reload(hass, DOMAIN)
return webhook_response(
safe_registration(new_registration),
registration=new_registration,
)
@WEBHOOK_COMMANDS.register("enable_encryption")
async def webhook_enable_encryption(hass, config_entry, data):
"""Handle a encryption enable webhook."""
if config_entry.data[ATTR_SUPPORTS_ENCRYPTION]:
_LOGGER.warning(
"Refusing to enable encryption for %s because it is already enabled!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(
ERR_ENCRYPTION_ALREADY_ENABLED, "Encryption already enabled"
)
if not supports_encryption():
_LOGGER.warning(
"Unable to enable encryption for %s because libsodium is unavailable!",
config_entry.data[ATTR_DEVICE_NAME],
)
return error_response(ERR_ENCRYPTION_NOT_AVAILABLE, "Encryption is unavailable")
secret = secrets.token_hex(SecretBox.KEY_SIZE)
data = {**config_entry.data, ATTR_SUPPORTS_ENCRYPTION: True, CONF_SECRET: secret}
hass.config_entries.async_update_entry(config_entry, data=data)
return json_response({"secret": secret})
@WEBHOOK_COMMANDS.register("register_sensor")
@validate_schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_DEVICE_CLASS): vol.All(
vol.Lower, vol.In(COMBINED_CLASSES)
),
vol.Required(ATTR_SENSOR_NAME): cv.string,
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
vol.Optional(ATTR_SENSOR_UOM): cv.string,
vol.Optional(ATTR_SENSOR_STATE, default=None): vol.Any(
None, bool, str, int, float
),
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
}
)
async def webhook_register_sensor(hass, config_entry, data):
"""Handle a register sensor webhook."""
entity_type = data[ATTR_SENSOR_TYPE]
unique_id = data[ATTR_SENSOR_UNIQUE_ID]
device_name = config_entry.data[ATTR_DEVICE_NAME]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
existing_sensor = unique_store_key in hass.data[DOMAIN][entity_type]
data[CONF_WEBHOOK_ID] = config_entry.data[CONF_WEBHOOK_ID]
# If sensor already is registered, update current state instead
if existing_sensor:
_LOGGER.debug(
"Re-register for %s of existing sensor %s", device_name, unique_id
)
entry = hass.data[DOMAIN][entity_type][unique_store_key]
data = {**entry, **data}
hass.data[DOMAIN][entity_type][unique_store_key] = data
hass.data[DOMAIN][DATA_STORE].async_delay_save(
lambda: savable_state(hass), DELAY_SAVE
)
if existing_sensor:
async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, data)
else:
register_signal = f"{DOMAIN}_{data[ATTR_SENSOR_TYPE]}_register"
async_dispatcher_send(hass, register_signal, data)
return webhook_response(
{"success": True},
registration=config_entry.data,
status=HTTP_CREATED,
)
@WEBHOOK_COMMANDS.register("update_sensor_states")
@validate_schema(
vol.All(
cv.ensure_list,
[
# Partial schema, enough to identify schema.
# We don't validate everything because otherwise 1 invalid sensor
# will invalidate all sensors.
vol.Schema(
{
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
],
)
)
async def webhook_update_sensor_states(hass, config_entry, data):
"""Handle an update sensor states webhook."""
sensor_schema_full = vol.Schema(
{
vol.Optional(ATTR_SENSOR_ATTRIBUTES, default={}): dict,
vol.Optional(ATTR_SENSOR_ICON, default="mdi:cellphone"): cv.icon,
vol.Required(ATTR_SENSOR_STATE): vol.Any(None, bool, str, int, float),
vol.Required(ATTR_SENSOR_TYPE): vol.In(SENSOR_TYPES),
vol.Required(ATTR_SENSOR_UNIQUE_ID): cv.string,
}
)
device_name = config_entry.data[ATTR_DEVICE_NAME]
resp = {}
for sensor in data:
entity_type = sensor[ATTR_SENSOR_TYPE]
unique_id = sensor[ATTR_SENSOR_UNIQUE_ID]
unique_store_key = f"{config_entry.data[CONF_WEBHOOK_ID]}_{unique_id}"
if unique_store_key not in hass.data[DOMAIN][entity_type]:
_LOGGER.error(
"Refusing to update %s non-registered sensor: %s",
device_name,
unique_store_key,
)
err_msg = f"{entity_type} {unique_id} is not registered"
resp[unique_id] = {
"success": False,
"error": {"code": ERR_SENSOR_NOT_REGISTERED, "message": err_msg},
}
continue
entry = hass.data[DOMAIN][entity_type][unique_store_key]
try:
sensor = sensor_schema_full(sensor)
except vol.Invalid as err:
err_msg = vol.humanize.humanize_error(sensor, err)
_LOGGER.error(
"Received invalid sensor payload from %s for %s: %s",
device_name,
unique_id,
err_msg,
)
resp[unique_id] = {
"success": False,
"error": {"code": ERR_INVALID_FORMAT, "message": err_msg},
}
continue
new_state = {**entry, **sensor}
hass.data[DOMAIN][entity_type][unique_store_key] = new_state
async_dispatcher_send(hass, SIGNAL_SENSOR_UPDATE, new_state)
resp[unique_id] = {"success": True}
hass.data[DOMAIN][DATA_STORE].async_delay_save(
lambda: savable_state(hass), DELAY_SAVE
)
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_zones")
async def webhook_get_zones(hass, config_entry, data):
"""Handle a get zones webhook."""
zones = [
hass.states.get(entity_id)
for entity_id in sorted(hass.states.async_entity_ids(ZONE_DOMAIN))
]
return webhook_response(zones, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("get_config")
async def webhook_get_config(hass, config_entry, data):
"""Handle a get config webhook."""
hass_config = hass.config.as_dict()
resp = {
"latitude": hass_config["latitude"],
"longitude": hass_config["longitude"],
"elevation": hass_config["elevation"],
"unit_system": hass_config["unit_system"],
"location_name": hass_config["location_name"],
"time_zone": hass_config["time_zone"],
"components": hass_config["components"],
"version": hass_config["version"],
"theme_color": MANIFEST_JSON["theme_color"],
}
if CONF_CLOUDHOOK_URL in config_entry.data:
resp[CONF_CLOUDHOOK_URL] = config_entry.data[CONF_CLOUDHOOK_URL]
try:
resp[CONF_REMOTE_UI_URL] = hass.components.cloud.async_remote_ui_url()
except hass.components.cloud.CloudNotAvailable:
pass
return webhook_response(resp, registration=config_entry.data)
@WEBHOOK_COMMANDS.register("scan_tag")
@validate_schema({vol.Required("tag_id"): cv.string})
async def webhook_scan_tag(hass, config_entry, data):
"""Handle a fire event webhook."""
await tag.async_scan_tag(
hass,
data["tag_id"],
config_entry.data[ATTR_DEVICE_ID],
registration_context(config_entry.data),
)
return empty_okay_response()
| {
"content_hash": "b10cacfea8f9c196daeca804e23005f7",
"timestamp": "",
"source": "github",
"line_count": 582,
"max_line_length": 88,
"avg_line_length": 32.08247422680412,
"alnum_prop": 0.6410132819194516,
"repo_name": "tboyce1/home-assistant",
"id": "043a555b6b757cdf3938a5f4e43cff09e39cb49b",
"size": "18672",
"binary": false,
"copies": "6",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mobile_app/webhook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1510140"
},
{
"name": "Python",
"bytes": "5144365"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
} |
from os import path
from gluon import *
from gluon.storage import Storage
from s3 import *
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
request = current.request
response = current.response
response.title = current.deployment_settings.get_system_name()
T = current.T
db = current.db
auth = current.auth
s3db = current.s3db
s3 = response.s3
appname = request.application
settings = current.deployment_settings
has_module = settings.has_module
if has_module("cr"):
table = s3db.cr_shelter
SHELTERS = s3.crud_strings["cr_shelter"].title_list
else:
SHELTERS = ""
# Menu Boxes
menu_btns = [#div, label, app, function
#["col1", T("Staff"), "hrm", "staff"],
#["col1", T("Volunteers"), "vol", "volunteer"],
["col1", T("Projects"), "project", "project"],
["col1", T("Vehicles"), "vehicle", "vehicle"],
["col2", T("Assets"), "asset", "asset"],
["col2", T("Inventory Items"), "inv", "inv_item"],
#["facility", T("Facilities"), "org", "facility"],
["facility", T("Hospitals"), "hms", "hospital"],
["facility", T("Offices"), "org", "office"],
["facility", SHELTERS, "cr", "shelter"],
["facility", T("Transport"), "transport", "index"],
["facility", T("Warehouses"), "inv", "warehouse"],
]
menu_divs = {"col1": DIV(_id="menu_div_col1", _class="menu_div"),
"col2": DIV(_id="menu_div_col2", _class="menu_div"),
"facility": DIV(H3(T("Facilities")),
_id = "facility_box",
_class = "menu_box"),
}
for div, label, app, function in menu_btns:
if has_module(app):
# @ToDo: Also check permissions (e.g. for anonymous users)
menu_divs[div].append(A(DIV(label,
_class="menu-btn-r"),
_class="menu-btn-l",
_href = URL(app, function)
)
)
cols_box = DIV(H3(T("Humanitarian Projects")),
DIV(_id="menu_div_col0"),
menu_divs["col1"],
menu_divs["col2"],
_id="cols_box",
#_class="menu_box fleft swidth"
_class="menu_box"
)
facility_box = menu_divs["facility"]
facility_box.append(A(IMG(_src="/%s/static/img/map_icon_128.png" % \
appname),
_href = URL(c="gis", f="index"),
_title = T("Map")
)
)
datatable_ajax_source = ""
# Check logged in AND permissions
roles = current.session.s3.roles
system_roles = auth.get_system_roles()
AUTHENTICATED = system_roles.AUTHENTICATED
table = s3db.org_organisation
has_permission = auth.s3_has_permission
if AUTHENTICATED in roles and \
has_permission("read", table):
org_items = self.organisation()
datatable_ajax_source = "/%s/default/organisation.aadata" % \
appname
s3.actions = None
permit = auth.permission
permit.controller = "org"
permit.function = "site"
permitted_facilities = auth.permitted_facilities(redirect_on_error=False)
if permitted_facilities:
facilities = s3db.org_SiteRepresent().bulk(permitted_facilities)
facility_list = [(fac, facilities[fac]) for fac in facilities]
facility_list = sorted(facility_list, key=lambda fac: fac[1])
facility_opts = [OPTION(fac[1], _value=fac[0])
for fac in facility_list]
manage_facility_box = DIV(H3(T("Manage Your Facilities")),
SELECT(_id = "manage_facility_select",
_style = "max-width:400px;",
*facility_opts
),
A(T("Go"),
_href = URL(c="default", f="site",
args=[facility_list[0][0]]),
#_disabled = "disabled",
_id = "manage_facility_btn",
_class = "action-btn"
),
_id = "manage_facility_box",
_class = "menu_box fleft"
)
s3.jquery_ready.append(
'''$('#manage_facility_select').change(function(){
$('#manage_facility_btn').attr('href',S3.Ap.concat('/default/site/',$('#manage_facility_select').val()))
})''')
else:
manage_facility_box = ""
if has_permission("create", table):
create = A(T("Add Organization"),
_href = URL(c="org", f="organisation",
args=["create"]),
_id = "add-btn",
_class = "action-btn",
_style = "margin-right: 10px;")
else:
create = ""
org_box = DIV(H3(T("Organizations")),
create,
org_items,
_id = "org_box",
_class = "menu_box fleft"
)
else:
manage_facility_box = ""
org_box = ""
# Login/Registration forms
self_registration = settings.get_security_self_registration()
registered = False
login_form = None
login_div = None
register_form = None
register_div = None
if AUTHENTICATED not in roles:
# This user isn't yet logged-in
if request.cookies.has_key("registered"):
# This browser has logged-in before
registered = True
if self_registration:
# Provide a Registration box on front page
register_form = auth.s3_registration_form()
register_div = DIV(H3(T("Register")),
P(XML(T("If you would like to help, then please %(sign_up_now)s") % \
dict(sign_up_now=B(T("sign-up now"))))))
if request.env.request_method == "POST":
post_script = \
'''$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')'''
else:
post_script = ""
register_script = \
'''$('#register-btn').attr('href','#register')
$('#login-btn').attr('href','#login')
%s
$('#register-btn').click(function(){
$('#register_form').removeClass('hide')
$('#login_form').addClass('hide')
})
$('#login-btn').click(function(){
$('#register_form').addClass('hide')
$('#login_form').removeClass('hide')
})''' % post_script
s3.jquery_ready.append(register_script)
# Provide a login box on front page
request.args = ["login"]
auth.messages.submit_button = T("Login")
login_form = auth()
login_div = DIV(H3(T("Login")),
P(XML(T("Registered users can %(login)s to access the system") % \
dict(login=B(T("login"))))))
if settings.frontpage.rss:
s3.external_stylesheets.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.css")
s3.scripts.append("http://www.google.com/jsapi?key=notsupplied-wizard")
s3.scripts.append("http://www.google.com/uds/solutions/dynamicfeed/gfdynamicfeedcontrol.js")
counter = 0
feeds = ""
for feed in settings.frontpage.rss:
counter += 1
feeds = "".join((feeds,
"{title:'%s',\n" % feed["title"],
"url:'%s'}" % feed["url"]))
# Don't add a trailing comma for old IEs
if counter != len(settings.frontpage.rss):
feeds += ",\n"
# feedCycleTime: milliseconds before feed is reloaded (5 minutes)
feed_control = "".join(('''
function LoadDynamicFeedControl(){
var feeds=[
''', feeds, '''
]
var options={
feedCycleTime:300000,
numResults:5,
stacked:true,
horizontal:false,
title:"''', str(T("News")), '''"
}
new GFdynamicFeedControl(feeds,'feed-control',options)
}
google.load('feeds','1')
google.setOnLoadCallback(LoadDynamicFeedControl)'''))
s3.js_global.append(feed_control)
view = path.join(request.folder, "private", "templates",
"EUROSHA", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP(404, "Unable to open Custom View: %s" % view)
return dict(title = response.title,
cols_box = cols_box,
facility_box = facility_box,
manage_facility_box = manage_facility_box,
org_box = org_box,
r = None, # Required for dataTable to work
datatable_ajax_source = datatable_ajax_source,
self_registration=self_registration,
registered=registered,
login_form=login_form,
login_div=login_div,
register_form=register_form,
register_div=register_div
)
# -------------------------------------------------------------------------
@staticmethod
def organisation():
"""
Function to handle pagination for the org list on the homepage
"""
request = current.request
get_vars = request.get_vars
resource = current.s3db.resource("org_organisation")
totalrows = resource.count()
if "iDisplayLength" in get_vars:
display_length = int(get_vars["iDisplayLength"])
else:
display_length = 10
limit = 4 * display_length
list_fields = ["id", "name"]
filter, orderby, left = resource.datatable_filter(list_fields,
get_vars)
resource.add_filter(filter)
data = resource.select(list_fields,
start=0,
limit=limit,
orderby=orderby,
left=left,
count=True,
represent=True)
filteredrows = data["numrows"]
rfields = data["rfields"]
rows = data["rows"]
dt = S3DataTable(rfields, rows)
dt_id = "org_dt"
if request.extension == "html":
dt.defaultActionButtons(resource)
current.response.s3.no_formats = True
items = dt.html(totalrows,
filteredrows,
dt_id,
dt_displayLength=display_length,
dt_ajax_url=URL(c="default",
f="organisation",
extension="aadata",
vars={"id": dt_id},
),
dt_pagination="true",
)
elif request.extension == "aadata":
if "sEcho" in get_vars:
echo = int(get_vars.sEcho)
else:
echo = None
items = dt.json(totalrows,
filteredrows,
dt_id,
echo)
current.response.headers["Content-Type"] = "application/json"
else:
from gluon.http import HTTP
raise HTTP(501, resource.ERROR.BAD_FORMAT)
return items
# END =========================================================================
| {
"content_hash": "d1645a3a042549b1c95f553b0f69da10",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 118,
"avg_line_length": 41.5062893081761,
"alnum_prop": 0.43677551329646186,
"repo_name": "sammyshj/gci",
"id": "2134fd947c6daf18e9bdf4f2253e41fe5e207158",
"size": "13224",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "private/templates/EUROSHA/controllers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1375094"
},
{
"name": "JavaScript",
"bytes": "16625771"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "25684403"
},
{
"name": "Racket",
"bytes": "166"
},
{
"name": "Shell",
"bytes": "727"
},
{
"name": "XSLT",
"bytes": "2003150"
}
],
"symlink_target": ""
} |
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,re,sys,shutil
from waflib import Utils,Errors
exclude_regs='''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.DS_Store'''
def split_path(path):
return path.split('/')
def split_path_cygwin(path):
if path.startswith('//'):
ret=path.split('/')[2:]
ret[0]='/'+ret[0]
return ret
return path.split('/')
re_sp=re.compile('[/\\\\]')
def split_path_win32(path):
if path.startswith('\\\\'):
ret=re.split(re_sp,path)[2:]
ret[0]='\\'+ret[0]
return ret
return re.split(re_sp,path)
if sys.platform=='cygwin':
split_path=split_path_cygwin
elif Utils.is_win32:
split_path=split_path_win32
class Node(object):
__slots__=('name','sig','children','parent','cache_abspath','cache_isdir','cache_sig')
def __init__(self,name,parent):
self.name=name
self.parent=parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already'%(name,parent))
parent.children[name]=self
def __setstate__(self,data):
self.name=data[0]
self.parent=data[1]
if data[2]is not None:
self.children=data[2]
if data[3]is not None:
self.sig=data[3]
def __getstate__(self):
return(self.name,self.parent,getattr(self,'children',None),getattr(self,'sig',None))
def __str__(self):
return self.name
def __repr__(self):
return self.abspath()
def __hash__(self):
return id(self)
def __eq__(self,node):
return id(self)==id(node)
def __copy__(self):
raise Errors.WafError('nodes are not supposed to be copied')
def read(self,flags='r',encoding='ISO8859-1'):
return Utils.readf(self.abspath(),flags,encoding)
def write(self,data,flags='w',encoding='ISO8859-1'):
Utils.writef(self.abspath(),data,flags,encoding)
def chmod(self,val):
os.chmod(self.abspath(),val)
def delete(self):
try:
if getattr(self,'children',None):
shutil.rmtree(self.abspath())
else:
os.unlink(self.abspath())
except OSError:
pass
self.evict()
def evict(self):
del self.parent.children[self.name]
def suffix(self):
k=max(0,self.name.rfind('.'))
return self.name[k:]
def height(self):
d=self
val=-1
while d:
d=d.parent
val+=1
return val
def listdir(self):
lst=Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
if getattr(self,'cache_isdir',None):
return
try:
self.parent.mkdir()
except OSError:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not os.path.isdir(self.abspath()):
raise Errors.WafError('Could not create the directory %s'%self.abspath())
try:
self.children
except AttributeError:
self.children={}
self.cache_isdir=True
def find_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
try:
ch=cur.children
except AttributeError:
cur.children={}
else:
try:
cur=cur.children[x]
continue
except KeyError:
pass
cur=self.__class__(x,cur)
try:
os.stat(cur.abspath())
except OSError:
cur.evict()
return None
ret=cur
try:
os.stat(ret.abspath())
except OSError:
ret.evict()
return None
try:
while not getattr(cur.parent,'cache_isdir',None):
cur=cur.parent
cur.cache_isdir=True
except AttributeError:
pass
return ret
def make_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
continue
if getattr(cur,'children',{}):
if x in cur.children:
cur=cur.children[x]
continue
else:
cur.children={}
cur=self.__class__(x,cur)
return cur
def search_node(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
cur=self
for x in lst:
if x=='..':
cur=cur.parent or cur
else:
try:
cur=cur.children[x]
except(AttributeError,KeyError):
return None
return cur
def path_from(self,node):
c1=self
c2=node
c1h=c1.height()
c2h=c2.height()
lst=[]
up=0
while c1h>c2h:
lst.append(c1.name)
c1=c1.parent
c1h-=1
while c2h>c1h:
up+=1
c2=c2.parent
c2h-=1
while id(c1)!=id(c2):
lst.append(c1.name)
up+=1
c1=c1.parent
c2=c2.parent
for i in range(up):
lst.append('..')
lst.reverse()
return os.sep.join(lst)or'.'
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if os.sep=='/':
if not self.parent:
val=os.sep
elif not self.parent.name:
val=os.sep+self.name
else:
val=self.parent.abspath()+os.sep+self.name
else:
if not self.parent:
val=''
elif not self.parent.name:
val=self.name+os.sep
else:
val=self.parent.abspath().rstrip(os.sep)+os.sep+self.name
self.cache_abspath=val
return val
def is_child_of(self,node):
p=self
diff=self.height()-node.height()
while diff>0:
diff-=1
p=p.parent
return id(p)==id(node)
def ant_iter(self,accept=None,maxdepth=25,pats=[],dir=False,src=True,remove=True):
dircont=self.listdir()
dircont.sort()
try:
lst=set(self.children.keys())
except AttributeError:
self.children={}
else:
if remove:
for x in lst-set(dircont):
self.children[x].evict()
for name in dircont:
npats=accept(name,pats)
if npats and npats[0]:
accepted=[]in npats[0]
node=self.make_node([name])
isdir=os.path.isdir(node.abspath())
if accepted:
if isdir:
if dir:
yield node
else:
if src:
yield node
if getattr(node,'cache_isdir',None)or isdir:
node.cache_isdir=True
if maxdepth:
for k in node.ant_iter(accept=accept,maxdepth=maxdepth-1,pats=npats,dir=dir,src=src,remove=remove):
yield k
raise StopIteration
def ant_glob(self,*k,**kw):
src=kw.get('src',True)
dir=kw.get('dir',False)
excl=kw.get('excl',exclude_regs)
incl=k and k[0]or kw.get('incl','**')
reflags=kw.get('ignorecase',0)and re.I
def to_pat(s):
lst=Utils.to_list(s)
ret=[]
for x in lst:
x=x.replace('\\','/').replace('//','/')
if x.endswith('/'):
x+='**'
lst2=x.split('/')
accu=[]
for k in lst2:
if k=='**':
accu.append(k)
else:
k=k.replace('.','[.]').replace('*','.*').replace('?','.').replace('+','\\+')
k='^%s$'%k
try:
accu.append(re.compile(k,flags=reflags))
except Exception ,e:
raise Errors.WafError("Invalid pattern: %s"%k,e)
ret.append(accu)
return ret
def filtre(name,nn):
ret=[]
for lst in nn:
if not lst:
pass
elif lst[0]=='**':
ret.append(lst)
if len(lst)>1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name,pats):
nacc=filtre(name,pats[0])
nrej=filtre(name,pats[1])
if[]in nrej:
nacc=[]
return[nacc,nrej]
ret=[x for x in self.ant_iter(accept=accept,pats=[to_pat(incl),to_pat(excl)],maxdepth=25,dir=dir,src=src,remove=kw.get('remove',True))]
if kw.get('flat',False):
return' '.join([x.path_from(self)for x in ret])
return ret
def is_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return False
if id(cur)==x:
return True
cur=cur.parent
return False
def is_bld(self):
cur=self
y=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==y:
return True
cur=cur.parent
return False
def get_src(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
lst.reverse()
return self.ctx.srcnode.make_node(lst)
if id(cur)==x:
return self
lst.append(cur.name)
cur=cur.parent
return self
def get_bld(self):
cur=self
x=id(self.ctx.srcnode)
y=id(self.ctx.bldnode)
lst=[]
while cur.parent:
if id(cur)==y:
return self
if id(cur)==x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur=cur.parent
lst.reverse()
if lst and Utils.is_win32 and len(lst[0])==2 and lst[0].endswith(':'):
lst[0]=lst[0][0]
return self.ctx.bldnode.make_node(['__root__']+lst)
def find_resource(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search_node(lst)
if not node:
self=self.get_src()
node=self.find_node(lst)
if node:
if os.path.isdir(node.abspath()):
return None
return node
def find_or_declare(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.get_bld().search_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
node.parent.mkdir()
return node
self=self.get_src()
node=self.find_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig=None
node.parent.mkdir()
return node
node=self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self,lst):
if isinstance(lst,str):
lst=[x for x in split_path(lst)if x and x!='.']
node=self.find_node(lst)
try:
if not os.path.isdir(node.abspath()):
return None
except(OSError,AttributeError):
return None
return node
def change_ext(self,ext,ext_in=None):
name=self.name
if ext_in is None:
k=name.rfind('.')
if k>=0:
name=name[:k]+ext
else:
name=name+ext
else:
name=name[:-len(ext_in)]+ext
return self.parent.find_or_declare([name])
def nice_path(self,env=None):
return self.path_from(self.ctx.launch_node())
def bldpath(self):
return self.path_from(self.ctx.bldnode)
def srcpath(self):
return self.path_from(self.ctx.srcnode)
def relpath(self):
cur=self
x=id(self.ctx.bldnode)
while cur.parent:
if id(cur)==x:
return self.bldpath()
cur=cur.parent
return self.srcpath()
def bld_dir(self):
return self.parent.bldpath()
def bld_base(self):
s=os.path.splitext(self.name)[0]
return self.bld_dir()+os.sep+s
def get_bld_sig(self):
try:
return self.cache_sig
except AttributeError:
pass
if not self.is_bld()or self.ctx.bldnode is self.ctx.srcnode:
self.sig=Utils.h_file(self.abspath())
self.cache_sig=ret=self.sig
return ret
search=search_node
pickle_lock=Utils.threading.Lock()
class Nod3(Node):
pass
| {
"content_hash": "6df57913b5c2db97e9e39e66feb2b0e5",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 137,
"avg_line_length": 22.957081545064376,
"alnum_prop": 0.6277808936249766,
"repo_name": "pipsiscool/audacity",
"id": "79d6bbfcb6585eb9de4e4429c59063bd7c1027fd",
"size": "10698",
"binary": false,
"copies": "62",
"ref": "refs/heads/master",
"path": "lib-src/lv2/sratom/waflib/Node.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "141298"
},
{
"name": "Awk",
"bytes": "2350"
},
{
"name": "C",
"bytes": "16931951"
},
{
"name": "C++",
"bytes": "21277015"
},
{
"name": "CMake",
"bytes": "102838"
},
{
"name": "CSS",
"bytes": "87696"
},
{
"name": "Common Lisp",
"bytes": "533537"
},
{
"name": "Groff",
"bytes": "65243"
},
{
"name": "HTML",
"bytes": "2177363"
},
{
"name": "Inno Setup",
"bytes": "19531"
},
{
"name": "Java",
"bytes": "84589"
},
{
"name": "M",
"bytes": "6242"
},
{
"name": "Makefile",
"bytes": "141297"
},
{
"name": "Matlab",
"bytes": "2467"
},
{
"name": "NewLisp",
"bytes": "2831"
},
{
"name": "Objective-C",
"bytes": "17554"
},
{
"name": "Pascal",
"bytes": "17208"
},
{
"name": "Perl",
"bytes": "129212"
},
{
"name": "Prolog",
"bytes": "939"
},
{
"name": "Python",
"bytes": "3636067"
},
{
"name": "QMake",
"bytes": "971"
},
{
"name": "R",
"bytes": "305850"
},
{
"name": "Shell",
"bytes": "6354469"
},
{
"name": "Smarty",
"bytes": "172490"
},
{
"name": "TeX",
"bytes": "146115"
}
],
"symlink_target": ""
} |
from flask import Flask
from flask_mongoengine import MongoEngine
app = Flask(__name__)
app.config.from_object("config")
db = MongoEngine(app)
from app import views,models
| {
"content_hash": "e17536eeb24ec55a361b2491d18a921f",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 42,
"avg_line_length": 19.555555555555557,
"alnum_prop": 0.7613636363636364,
"repo_name": "dangger/awesome-flask-todo",
"id": "d32a838943adb33cc758e3cf68c04187e2825104",
"size": "176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2314"
},
{
"name": "Python",
"bytes": "4060"
}
],
"symlink_target": ""
} |
import os
import sys
from datetime import datetime
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'gulpy'
author = 'Gilmar Quinelato'
copyright = '%d, %s' % (datetime.now().year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'description': 'A python task execution library',
'github_user': 'gilmarsquinelato',
'github_repo': 'gulpy',
'github_button': True,
'github_banner': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_show_sourcelink = False
display_toc = False
html_sidebars = {
'**': [
'globaltoc.html',
'links.html'
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'gulpydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'gulpy.tex', 'gulpy Documentation',
'Author', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'gulpy', 'gulpy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'gulpy', 'gulpy Documentation',
author, 'gulpy', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| {
"content_hash": "805d2439c16cee1560943eda0dff823d",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 78,
"avg_line_length": 28.033519553072626,
"alnum_prop": 0.6476683937823834,
"repo_name": "gilmarsquinelato/gulpy",
"id": "03c5e613d721df61766d2c601c7e4ef33cb80219",
"size": "5699",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "16746"
}
],
"symlink_target": ""
} |
"""OSF mailing utilities.
Email templates go in website/templates/emails
Templates must end in ``.txt.mako`` for plaintext emails or``.html.mako`` for html emails.
You can then create a `Mail` object given the basename of the template and
the email subject. ::
CONFIRM_EMAIL = Mail(tpl_prefix='confirm', subject="Confirm your email address")
You can then use ``send_mail`` to send the email.
Usage: ::
from website import mails
...
mails.send_mail('foo@bar.com', mails.CONFIRM_EMAIL, user=user)
"""
import os
import logging
from mako.lookup import TemplateLookup, Template
from framework.email import tasks
from website import settings
logger = logging.getLogger(__name__)
EMAIL_TEMPLATES_DIR = os.path.join(settings.TEMPLATES_PATH, 'emails')
_tpl_lookup = TemplateLookup(
directories=[EMAIL_TEMPLATES_DIR],
)
TXT_EXT = '.txt.mako'
HTML_EXT = '.html.mako'
class Mail(object):
"""An email object.
:param str tpl_prefix: The template name prefix.
:param str subject: The subject of the email.
:param iterable categories: Categories to add to the email using SendGrid's
SMTPAPI. Used for email analytics.
See https://sendgrid.com/docs/User_Guide/Statistics/categories.html
"""
def __init__(self, tpl_prefix, subject, categories=None):
self.tpl_prefix = tpl_prefix
self._subject = subject
self.categories = categories
def html(self, **context):
"""Render the HTML email message."""
tpl_name = self.tpl_prefix + HTML_EXT
return render_message(tpl_name, **context)
def text(self, **context):
"""Render the plaintext email message"""
tpl_name = self.tpl_prefix + TXT_EXT
return render_message(tpl_name, **context)
def subject(self, **context):
return Template(self._subject).render(**context)
def render_message(tpl_name, **context):
"""Render an email message."""
tpl = _tpl_lookup.get_template(tpl_name)
return tpl.render(**context)
def send_mail(to_addr, mail, mimetype='plain', from_addr=None, mailer=None,
username=None, password=None, callback=None, **context):
"""Send an email from the OSF.
Example: ::
from website import mails
mails.send_email('foo@bar.com', mails.TEST, name="Foo")
:param str to_addr: The recipient's email address
:param Mail mail: The mail object
:param str mimetype: Either 'plain' or 'html'
:param function callback: celery task to execute after send_mail completes
:param **context: Context vars for the message template
.. note:
Uses celery if available
"""
from_addr = from_addr or settings.FROM_EMAIL
mailer = mailer or tasks.send_email
subject = mail.subject(**context)
message = mail.text(**context) if mimetype in ('plain', 'txt') else mail.html(**context)
# Don't use ttls and login in DEBUG_MODE
ttls = login = not settings.DEBUG_MODE
logger.debug('Sending email...')
logger.debug(u'To: {to_addr}\nFrom: {from_addr}\nSubject: {subject}\nMessage: {message}'.format(**locals()))
kwargs = dict(
from_addr=from_addr,
to_addr=to_addr,
subject=subject,
message=message,
mimetype=mimetype,
ttls=ttls,
login=login,
username=username,
password=password,
categories=mail.categories,
)
if settings.USE_EMAIL:
if settings.USE_CELERY:
return mailer.apply_async(kwargs=kwargs, link=callback)
else:
ret = mailer(**kwargs)
if callback:
callback()
return ret
def get_english_article(word):
"""
Decide whether to use 'a' or 'an' for a given English word.
:param word: the word immediately after the article
:return: 'a' or 'an'
"""
return 'a' + ('n' if word[0].lower() in 'aeiou' else '')
# Predefined Emails
TEST = Mail('test', subject='A test email to ${name}', categories=['test'])
# Emails for first-time login through external identity providers.
EXTERNAL_LOGIN_CONFIRM_EMAIL_CREATE = Mail(
'external_confirm_create',
subject='Open Science Framework Account Verification'
)
EXTERNAL_LOGIN_CONFIRM_EMAIL_LINK = Mail(
'external_confirm_link',
subject='Open Science Framework Account Verification'
)
EXTERNAL_LOGIN_LINK_SUCCESS = Mail(
'external_confirm_success',
subject='Open Science Framework Account Verification Success'
)
# Sign up confirmation emails for OSF, native campaigns and branded campaigns
INITIAL_CONFIRM_EMAIL = Mail(
'initial_confirm',
subject='Open Science Framework Account Verification'
)
CONFIRM_EMAIL = Mail(
'confirm',
subject='Add a new email to your OSF account'
)
CONFIRM_EMAIL_PREREG = Mail(
'confirm_prereg',
subject='Open Science Framework Account Verification, Preregistration Challenge'
)
CONFIRM_EMAIL_ERPC = Mail(
'confirm_erpc',
subject='Open Science Framework Account Verification, Election Research Preacceptance Competition'
)
CONFIRM_EMAIL_PREPRINTS = lambda name, provider: Mail(
'confirm_preprints_{}'.format(name),
subject='Open Science Framework Account Verification, {} Preprints Service'.format(provider)
)
# Merge account, add or remove email confirmation emails.
CONFIRM_MERGE = Mail('confirm_merge', subject='Confirm account merge')
REMOVED_EMAIL = Mail('email_removed', subject='Email address removed from your OSF account')
PRIMARY_EMAIL_CHANGED = Mail('primary_email_changed', subject='Primary email changed')
# Contributor added confirmation emails
INVITE_DEFAULT = Mail(
'invite_default',
subject='You have been added as a contributor to an OSF project.'
)
INVITE_PREPRINT = lambda template, provider: Mail(
'invite_preprints_{}'.format(template),
subject='You have been added as a contributor to {} {} preprint.'.format(get_english_article(provider), provider)
)
CONTRIBUTOR_ADDED_DEFAULT = Mail(
'contributor_added_default',
subject='You have been added as a contributor to an OSF project.'
)
CONTRIBUTOR_ADDED_PREPRINT = lambda template, provider: Mail(
'contributor_added_preprints_{}'.format(template),
subject='You have been added as a contributor to {} {} preprint.'.format(get_english_article(provider), provider)
)
FORWARD_INVITE = Mail('forward_invite', subject='Please forward to ${fullname}')
FORWARD_INVITE_REGISTERED = Mail('forward_invite_registered', subject='Please forward to ${fullname}')
FORGOT_PASSWORD = Mail('forgot_password', subject='Reset Password')
PASSWORD_RESET = Mail('password_reset', subject='Your OSF password has been reset')
PENDING_VERIFICATION = Mail('pending_invite', subject='Your account is almost ready!')
PENDING_VERIFICATION_REGISTERED = Mail('pending_registered', subject='Received request to be a contributor')
REQUEST_EXPORT = Mail('support_request', subject='[via OSF] Export Request')
REQUEST_DEACTIVATION = Mail('support_request', subject='[via OSF] Deactivation Request')
SPAM_USER_BANNED = Mail('spam_user_banned', subject='[OSF] Account flagged as spam')
CONFERENCE_SUBMITTED = Mail(
'conference_submitted',
subject='Project created on Open Science Framework',
)
CONFERENCE_INACTIVE = Mail(
'conference_inactive',
subject='Open Science Framework Error: Conference inactive',
)
CONFERENCE_FAILED = Mail(
'conference_failed',
subject='Open Science Framework Error: No files attached',
)
DIGEST = Mail(
'digest', subject='OSF Notifications',
categories=['notifications', 'notifications-digest']
)
TRANSACTIONAL = Mail(
'transactional', subject='OSF: ${subject}',
categories=['notifications', 'notifications-transactional']
)
# Retraction related Mail objects
PENDING_RETRACTION_ADMIN = Mail(
'pending_retraction_admin',
subject='Withdrawal pending for one of your projects.'
)
PENDING_RETRACTION_NON_ADMIN = Mail(
'pending_retraction_non_admin',
subject='Withdrawal pending for one of your projects.'
)
# Embargo related Mail objects
PENDING_EMBARGO_ADMIN = Mail(
'pending_embargo_admin',
subject='Registration pending for one of your projects.'
)
PENDING_EMBARGO_NON_ADMIN = Mail(
'pending_embargo_non_admin',
subject='Registration pending for one of your projects.'
)
# Registration related Mail Objects
PENDING_REGISTRATION_ADMIN = Mail(
'pending_registration_admin',
subject='Registration pending for one of your projects.'
)
PENDING_REGISTRATION_NON_ADMIN = Mail(
'pending_registration_non_admin',
subject='Registration pending for one of your projects.'
)
PENDING_EMBARGO_TERMINATION_ADMIN = Mail(
'pending_embargo_termination_admin',
subject='Request to end an embargo early for one of your projects.'
)
PENDING_EMBARGO_TERMINATION_NON_ADMIN = Mail(
'pending_embargo_termination_non_admin',
subject='Request to end an embargo early for one of your projects.'
)
FILE_OPERATION_SUCCESS = Mail(
'file_operation_success',
subject='Your ${action} has finished',
)
FILE_OPERATION_FAILED = Mail(
'file_operation_failed',
subject='Your ${action} has failed',
)
UNESCAPE = '<% from website.util.sanitize import unescape_entities %> ${unescape_entities(src.title)}'
PROBLEM_REGISTERING = 'Problem registering ' + UNESCAPE
PROBLEM_REGISTERING_STUCK = PROBLEM_REGISTERING + '- Stuck Registration'
ARCHIVE_SIZE_EXCEEDED_DESK = Mail(
'archive_size_exceeded_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_SIZE_EXCEEDED_USER = Mail(
'archive_size_exceeded_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_COPY_ERROR_DESK = Mail(
'archive_copy_error_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_COPY_ERROR_USER = Mail(
'archive_copy_error_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_FILE_NOT_FOUND_DESK = Mail(
'archive_file_not_found_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_FILE_NOT_FOUND_USER = Mail(
'archive_file_not_found_user',
subject='Registration failed because of altered files'
)
ARCHIVE_UNCAUGHT_ERROR_DESK = Mail(
'archive_uncaught_error_desk',
subject=PROBLEM_REGISTERING
)
ARCHIVE_REGISTRATION_STUCK_DESK = Mail(
'archive_registration_stuck_desk',
subject=PROBLEM_REGISTERING_STUCK
)
ARCHIVE_UNCAUGHT_ERROR_USER = Mail(
'archive_uncaught_error_user',
subject=PROBLEM_REGISTERING
)
ARCHIVE_SUCCESS = Mail(
'archive_success',
subject='Registration of ' + UNESCAPE + ' complete'
)
WELCOME = Mail(
'welcome',
subject='Welcome to the Open Science Framework'
)
WELCOME_OSF4I = Mail(
'welcome_osf4i',
subject='Welcome to the Open Science Framework'
)
PREREG_CHALLENGE_REJECTED = Mail(
'prereg_challenge_rejected',
subject='Revisions required, your submission for the Preregistration Challenge is not yet registered'
)
PREREG_CHALLENGE_ACCEPTED = Mail(
'prereg_challenge_accepted',
subject='Your research plan has been registered and accepted for the Preregistration Challenge'
)
EMPTY = Mail('empty', subject='${subject}')
| {
"content_hash": "ed4bd0d0c5ac3c74acd50d50ca7a0464",
"timestamp": "",
"source": "github",
"line_count": 351,
"max_line_length": 117,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.7046735770140026,
"repo_name": "rdhyee/osf.io",
"id": "2afb0e160195038d90972287581f1e5b1aaa6ace",
"size": "11022",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "website/mails/mails.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "174764"
},
{
"name": "HTML",
"bytes": "131860"
},
{
"name": "JavaScript",
"bytes": "1663707"
},
{
"name": "Mako",
"bytes": "679787"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "6720626"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import os
from collections import defaultdict
from datetime import datetime, timedelta
from boto3 import Session
from moto.core import ACCOUNT_ID, BaseBackend, CloudFormationModel
from moto.core.utils import unix_time
from moto.utilities.tagging_service import TaggingService
from moto.core.exceptions import JsonRESTError
from .utils import decrypt, encrypt, generate_key_id, generate_master_key
class Key(CloudFormationModel):
def __init__(
self, policy, key_usage, customer_master_key_spec, description, region
):
self.id = generate_key_id()
self.creation_date = unix_time()
self.policy = policy
self.key_usage = key_usage
self.key_state = "Enabled"
self.description = description
self.enabled = True
self.region = region
self.account_id = ACCOUNT_ID
self.key_rotation_status = False
self.deletion_date = None
self.key_material = generate_master_key()
self.origin = "AWS_KMS"
self.key_manager = "CUSTOMER"
self.customer_master_key_spec = customer_master_key_spec or "SYMMETRIC_DEFAULT"
@property
def physical_resource_id(self):
return self.id
@property
def arn(self):
return "arn:aws:kms:{0}:{1}:key/{2}".format(
self.region, self.account_id, self.id
)
@property
def encryption_algorithms(self):
if self.key_usage == "SIGN_VERIFY":
return None
elif self.customer_master_key_spec == "SYMMETRIC_DEFAULT":
return ["SYMMETRIC_DEFAULT"]
else:
return ["RSAES_OAEP_SHA_1", "RSAES_OAEP_SHA_256"]
@property
def signing_algorithms(self):
if self.key_usage == "ENCRYPT_DECRYPT":
return None
elif self.customer_master_key_spec in ["ECC_NIST_P256", "ECC_SECG_P256K1"]:
return ["ECDSA_SHA_256"]
elif self.customer_master_key_spec == "ECC_NIST_P384":
return ["ECDSA_SHA_384"]
elif self.customer_master_key_spec == "ECC_NIST_P521":
return ["ECDSA_SHA_512"]
else:
return [
"RSASSA_PKCS1_V1_5_SHA_256",
"RSASSA_PKCS1_V1_5_SHA_384",
"RSASSA_PKCS1_V1_5_SHA_512",
"RSASSA_PSS_SHA_256",
"RSASSA_PSS_SHA_384",
"RSASSA_PSS_SHA_512",
]
def to_dict(self):
key_dict = {
"KeyMetadata": {
"AWSAccountId": self.account_id,
"Arn": self.arn,
"CreationDate": self.creation_date,
"CustomerMasterKeySpec": self.customer_master_key_spec,
"Description": self.description,
"Enabled": self.enabled,
"EncryptionAlgorithms": self.encryption_algorithms,
"KeyId": self.id,
"KeyManager": self.key_manager,
"KeyUsage": self.key_usage,
"KeyState": self.key_state,
"Origin": self.origin,
"SigningAlgorithms": self.signing_algorithms,
}
}
if self.key_state == "PendingDeletion":
key_dict["KeyMetadata"]["DeletionDate"] = unix_time(self.deletion_date)
return key_dict
def delete(self, region_name):
kms_backends[region_name].delete_key(self.id)
@staticmethod
def cloudformation_name_type():
return None
@staticmethod
def cloudformation_type():
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kms-key.html
return "AWS::KMS::Key"
@classmethod
def create_from_cloudformation_json(
self, resource_name, cloudformation_json, region_name
):
kms_backend = kms_backends[region_name]
properties = cloudformation_json["Properties"]
key = kms_backend.create_key(
policy=properties["KeyPolicy"],
key_usage="ENCRYPT_DECRYPT",
customer_master_key_spec="SYMMETRIC_DEFAULT",
description=properties["Description"],
tags=properties.get("Tags", []),
region=region_name,
)
key.key_rotation_status = properties["EnableKeyRotation"]
key.enabled = properties["Enabled"]
return key
def get_cfn_attribute(self, attribute_name):
from moto.cloudformation.exceptions import UnformattedGetAttTemplateException
if attribute_name == "Arn":
return self.arn
raise UnformattedGetAttTemplateException()
class KmsBackend(BaseBackend):
def __init__(self):
self.keys = {}
self.key_to_aliases = defaultdict(set)
self.tagger = TaggingService(keyName="TagKey", valueName="TagValue")
def create_key(
self, policy, key_usage, customer_master_key_spec, description, tags, region
):
key = Key(policy, key_usage, customer_master_key_spec, description, region)
self.keys[key.id] = key
if tags is not None and len(tags) > 0:
self.tag_resource(key.id, tags)
return key
def update_key_description(self, key_id, description):
key = self.keys[self.get_key_id(key_id)]
key.description = description
def delete_key(self, key_id):
if key_id in self.keys:
if key_id in self.key_to_aliases:
self.key_to_aliases.pop(key_id)
self.tagger.delete_all_tags_for_resource(key_id)
return self.keys.pop(key_id)
def describe_key(self, key_id):
# allow the different methods (alias, ARN :key/, keyId, ARN alias) to
# describe key not just KeyId
key_id = self.get_key_id(key_id)
if r"alias/" in str(key_id).lower():
key_id = self.get_key_id_from_alias(key_id.split("alias/")[1])
return self.keys[self.get_key_id(key_id)]
def list_keys(self):
return self.keys.values()
@staticmethod
def get_key_id(key_id):
# Allow use of ARN as well as pure KeyId
if key_id.startswith("arn:") and ":key/" in key_id:
return key_id.split(":key/")[1]
return key_id
@staticmethod
def get_alias_name(alias_name):
# Allow use of ARN as well as alias name
if alias_name.startswith("arn:") and ":alias/" in alias_name:
return alias_name.split(":alias/")[1]
return alias_name
def any_id_to_key_id(self, key_id):
"""Go from any valid key ID to the raw key ID.
Acceptable inputs:
- raw key ID
- key ARN
- alias name
- alias ARN
"""
key_id = self.get_alias_name(key_id)
key_id = self.get_key_id(key_id)
if key_id.startswith("alias/"):
key_id = self.get_key_id_from_alias(key_id)
return key_id
def alias_exists(self, alias_name):
for aliases in self.key_to_aliases.values():
if alias_name in aliases:
return True
return False
def add_alias(self, target_key_id, alias_name):
self.key_to_aliases[target_key_id].add(alias_name)
def delete_alias(self, alias_name):
"""Delete the alias."""
for aliases in self.key_to_aliases.values():
if alias_name in aliases:
aliases.remove(alias_name)
def get_all_aliases(self):
return self.key_to_aliases
def get_key_id_from_alias(self, alias_name):
for key_id, aliases in dict(self.key_to_aliases).items():
if alias_name in ",".join(aliases):
return key_id
return None
def enable_key_rotation(self, key_id):
self.keys[self.get_key_id(key_id)].key_rotation_status = True
def disable_key_rotation(self, key_id):
self.keys[self.get_key_id(key_id)].key_rotation_status = False
def get_key_rotation_status(self, key_id):
return self.keys[self.get_key_id(key_id)].key_rotation_status
def put_key_policy(self, key_id, policy):
self.keys[self.get_key_id(key_id)].policy = policy
def get_key_policy(self, key_id):
return self.keys[self.get_key_id(key_id)].policy
def disable_key(self, key_id):
self.keys[key_id].enabled = False
self.keys[key_id].key_state = "Disabled"
def enable_key(self, key_id):
self.keys[key_id].enabled = True
self.keys[key_id].key_state = "Enabled"
def cancel_key_deletion(self, key_id):
self.keys[key_id].key_state = "Disabled"
self.keys[key_id].deletion_date = None
def schedule_key_deletion(self, key_id, pending_window_in_days):
if 7 <= pending_window_in_days <= 30:
self.keys[key_id].enabled = False
self.keys[key_id].key_state = "PendingDeletion"
self.keys[key_id].deletion_date = datetime.now() + timedelta(
days=pending_window_in_days
)
return unix_time(self.keys[key_id].deletion_date)
def encrypt(self, key_id, plaintext, encryption_context):
key_id = self.any_id_to_key_id(key_id)
ciphertext_blob = encrypt(
master_keys=self.keys,
key_id=key_id,
plaintext=plaintext,
encryption_context=encryption_context,
)
arn = self.keys[key_id].arn
return ciphertext_blob, arn
def decrypt(self, ciphertext_blob, encryption_context):
plaintext, key_id = decrypt(
master_keys=self.keys,
ciphertext_blob=ciphertext_blob,
encryption_context=encryption_context,
)
arn = self.keys[key_id].arn
return plaintext, arn
def re_encrypt(
self,
ciphertext_blob,
source_encryption_context,
destination_key_id,
destination_encryption_context,
):
destination_key_id = self.any_id_to_key_id(destination_key_id)
plaintext, decrypting_arn = self.decrypt(
ciphertext_blob=ciphertext_blob,
encryption_context=source_encryption_context,
)
new_ciphertext_blob, encrypting_arn = self.encrypt(
key_id=destination_key_id,
plaintext=plaintext,
encryption_context=destination_encryption_context,
)
return new_ciphertext_blob, decrypting_arn, encrypting_arn
def generate_data_key(
self, key_id, encryption_context, number_of_bytes, key_spec, grant_tokens
):
key_id = self.any_id_to_key_id(key_id)
if key_spec:
# Note: Actual validation of key_spec is done in kms.responses
if key_spec == "AES_128":
plaintext_len = 16
else:
plaintext_len = 32
else:
plaintext_len = number_of_bytes
plaintext = os.urandom(plaintext_len)
ciphertext_blob, arn = self.encrypt(
key_id=key_id, plaintext=plaintext, encryption_context=encryption_context
)
return plaintext, ciphertext_blob, arn
def list_resource_tags(self, key_id):
if key_id in self.keys:
return self.tagger.list_tags_for_resource(key_id)
raise JsonRESTError(
"NotFoundException",
"The request was rejected because the specified entity or resource could not be found.",
)
def tag_resource(self, key_id, tags):
if key_id in self.keys:
self.tagger.tag_resource(key_id, tags)
return {}
raise JsonRESTError(
"NotFoundException",
"The request was rejected because the specified entity or resource could not be found.",
)
def untag_resource(self, key_id, tag_names):
if key_id in self.keys:
self.tagger.untag_resource_using_names(key_id, tag_names)
return {}
raise JsonRESTError(
"NotFoundException",
"The request was rejected because the specified entity or resource could not be found.",
)
kms_backends = {}
for region in Session().get_available_regions("kms"):
kms_backends[region] = KmsBackend()
for region in Session().get_available_regions("kms", partition_name="aws-us-gov"):
kms_backends[region] = KmsBackend()
for region in Session().get_available_regions("kms", partition_name="aws-cn"):
kms_backends[region] = KmsBackend()
| {
"content_hash": "8ed64681958ca708d26173e67acd44a0",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 100,
"avg_line_length": 34.027472527472526,
"alnum_prop": 0.5971257871790732,
"repo_name": "william-richard/moto",
"id": "7a9918f2bbb5c4917beef89994fee2bb7d347250",
"size": "12386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moto/kms/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "443"
},
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "6637538"
},
{
"name": "Ruby",
"bytes": "188"
},
{
"name": "Scala",
"bytes": "782"
},
{
"name": "Shell",
"bytes": "797"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future.builtins import str
from future import standard_library
from future.utils import text_to_native_str
with standard_library.hooks():
from http import cookies
from future.standard_library.test.support import run_unittest, run_doctest # , check_warnings
from future.tests.base import unittest
import warnings
class CookieTests(unittest.TestCase):
def setUp(self):
# self._warnings_manager = check_warnings()
# self._warnings_manager.__enter__()
warnings.filterwarnings("ignore", ".* class is insecure.*",
DeprecationWarning)
def tearDown(self):
# self._warnings_manager.__exit__(None, None, None)
pass
def test_basic(self):
cases = [
{'data': 'chips=ahoy; vienna=finger',
'dict': {'chips':'ahoy', 'vienna':'finger'},
'repr': "<SimpleCookie: chips='ahoy' vienna='finger'>",
'output': 'Set-Cookie: chips=ahoy\nSet-Cookie: vienna=finger'},
{'data': 'keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"',
'dict': {'keebler' : 'E=mc2; L="Loves"; fudge=\012;'},
'repr': '''<SimpleCookie: keebler='E=mc2; L="Loves"; fudge=\\n;'>''',
'output': 'Set-Cookie: keebler="E=mc2; L=\\"Loves\\"; fudge=\\012;"'},
# Check illegal cookies that have an '=' char in an unquoted value
{'data': 'keebler=E=mc2',
'dict': {'keebler' : 'E=mc2'},
'repr': "<SimpleCookie: keebler='E=mc2'>",
'output': 'Set-Cookie: keebler=E=mc2'},
# Cookies with ':' character in their name. Though not mentioned in
# RFC, servers / browsers allow it.
{'data': 'key:term=value:term',
'dict': {'key:term' : 'value:term'},
'repr': "<SimpleCookie: key:term='value:term'>",
'output': 'Set-Cookie: key:term=value:term'},
]
for case in cases:
C = cookies.SimpleCookie()
C.load(case['data'])
self.assertEqual(repr(C), text_to_native_str(case['repr']))
self.assertEqual(C.output(sep='\n'), case['output'])
for k, v in sorted(case['dict'].items()):
self.assertEqual(C[k].value, v)
def test_load(self):
C = cookies.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version=1; Path=/acme')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
self.assertEqual(C.output(['path']),
'Set-Cookie: Customer="WILE_E_COYOTE"; Path=/acme')
self.assertEqual(C.js_output(), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme; Version=1";
// end hiding -->
</script>
""")
self.assertEqual(C.js_output(['path']), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme";
// end hiding -->
</script>
""")
def test_extended_encode(self):
# Issue 9824: some browsers don't follow the standard; we now
# encode , and ; to keep them from tripping up.
C = cookies.SimpleCookie()
C['val'] = "some,funky;stuff"
self.assertEqual(C.output(['val']),
'Set-Cookie: val="some\\054funky\\073stuff"')
def test_special_attrs(self):
# 'expires'
C = cookies.SimpleCookie('Customer="WILE_E_COYOTE"')
C['Customer']['expires'] = 0
# can't test exact output, it always depends on current date/time
self.assertTrue(C.output().endswith('GMT'))
# loading 'expires'
C = cookies.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01 Jan 2010 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01 Jan 2010 00:00:00 GMT')
C = cookies.SimpleCookie()
C.load('Customer="W"; expires=Wed, 01 Jan 98 00:00:00 GMT')
self.assertEqual(C['Customer']['expires'],
'Wed, 01 Jan 98 00:00:00 GMT')
# 'max-age'
C = cookies.SimpleCookie('Customer="WILE_E_COYOTE"')
C['Customer']['max-age'] = 10
self.assertEqual(C.output(),
'Set-Cookie: Customer="WILE_E_COYOTE"; Max-Age=10')
def test_set_secure_httponly_attrs(self):
C = cookies.SimpleCookie('Customer="WILE_E_COYOTE"')
C['Customer']['secure'] = True
C['Customer']['httponly'] = True
self.assertEqual(C.output(),
'Set-Cookie: Customer="WILE_E_COYOTE"; httponly; secure')
def test_secure_httponly_false_if_not_present(self):
C = cookies.SimpleCookie()
C.load('eggs=scrambled; Path=/bacon')
self.assertFalse(C['eggs']['httponly'])
self.assertFalse(C['eggs']['secure'])
def test_secure_httponly_true_if_present(self):
# Issue 16611
C = cookies.SimpleCookie()
C.load('eggs=scrambled; httponly; secure; Path=/bacon')
self.assertTrue(C['eggs']['httponly'])
self.assertTrue(C['eggs']['secure'])
def test_secure_httponly_true_if_have_value(self):
# This isn't really valid, but demonstrates what the current code
# is expected to do in this case.
C = cookies.SimpleCookie()
C.load('eggs=scrambled; httponly=foo; secure=bar; Path=/bacon')
self.assertTrue(C['eggs']['httponly'])
self.assertTrue(C['eggs']['secure'])
# Here is what it actually does; don't depend on this behavior. These
# checks are testing backward compatibility for issue 16611.
self.assertEqual(C['eggs']['httponly'], 'foo')
self.assertEqual(C['eggs']['secure'], 'bar')
def test_bad_attrs(self):
# issue 16611: make sure we don't break backward compatibility.
C = cookies.SimpleCookie()
C.load('cookie=with; invalid; version; second=cookie;')
self.assertEqual(C.output(),
'Set-Cookie: cookie=with\r\nSet-Cookie: second=cookie')
def test_extra_spaces(self):
C = cookies.SimpleCookie()
C.load('eggs = scrambled ; secure ; path = bar ; foo=foo ')
self.assertEqual(C.output(),
'Set-Cookie: eggs=scrambled; Path=bar; secure\r\nSet-Cookie: foo=foo')
def test_quoted_meta(self):
# Try cookie with quoted meta-data
C = cookies.SimpleCookie()
C.load('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"')
self.assertEqual(C['Customer'].value, 'WILE_E_COYOTE')
self.assertEqual(C['Customer']['version'], '1')
self.assertEqual(C['Customer']['path'], '/acme')
self.assertEqual(C.output(['path']),
'Set-Cookie: Customer="WILE_E_COYOTE"; Path=/acme')
self.assertEqual(C.js_output(), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme; Version=1";
// end hiding -->
</script>
""")
self.assertEqual(C.js_output(['path']), r"""
<script type="text/javascript">
<!-- begin hiding
document.cookie = "Customer=\"WILE_E_COYOTE\"; Path=/acme";
// end hiding -->
</script>
""")
class MorselTests(unittest.TestCase):
"""Tests for the Morsel object."""
def test_reserved_keys(self):
M = cookies.Morsel()
# tests valid and invalid reserved keys for Morsels
for i in M._reserved:
# Test that all valid keys are reported as reserved and set them
self.assertTrue(M.isReservedKey(i))
M[i] = '%s_value' % i
for i in M._reserved:
# Test that valid key values come out fine
self.assertEqual(M[i], '%s_value' % i)
for i in "the holy hand grenade".split():
# Test that invalid keys raise CookieError
self.assertRaises(cookies.CookieError,
M.__setitem__, i, '%s_value' % i)
def test_setter(self):
M = cookies.Morsel()
# tests the .set method to set keys and their values
for i in M._reserved:
# Makes sure that all reserved keys can't be set this way
self.assertRaises(cookies.CookieError,
M.set, i, '%s_value' % i, '%s_value' % i)
for i in "thou cast _the- !holy! ^hand| +*grenade~".split():
# Try typical use case. Setting decent values.
# Check output and js_output.
M['path'] = '/foo' # Try a reserved key as well
M.set(i, "%s_val" % i, "%s_coded_val" % i)
self.assertEqual(
M.output(),
"Set-Cookie: %s=%s; Path=/foo" % (i, "%s_coded_val" % i))
expected_js_output = """
<script type="text/javascript">
<!-- begin hiding
document.cookie = "%s=%s; Path=/foo";
// end hiding -->
</script>
""" % (i, "%s_coded_val" % i)
self.assertEqual(M.js_output(), expected_js_output)
for i in ["foo bar", "foo@bar"]:
# Try some illegal characters
self.assertRaises(cookies.CookieError,
M.set, i, '%s_value' % i, '%s_value' % i)
# def test_main():
# run_unittest(CookieTests, MorselTests)
# run_doctest(cookies)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "07be063da2c4be5579d47426696d04c2",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 95,
"avg_line_length": 39.94672131147541,
"alnum_prop": 0.5585308299989741,
"repo_name": "krischer/python-future",
"id": "ef1a11266bb7b245d05896185ae5a8cbb8de482a",
"size": "9975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_future/disabled/disabled_test_http_cookies.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2917437"
},
{
"name": "Shell",
"bytes": "539"
}
],
"symlink_target": ""
} |
from __future__ import division
import re
import os
import json
import io
import sys
import pprint
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
import requests
from requests_oauthlib import OAuth2Session
from apiclient import errors
from gdcmdtools.auth import GDAuth
from gdcmdtools.auth import SCOPE
from base import GDBase
export_format = {
"application/vnd.google-apps.spreadsheet": ["pdf", "ods", "xlsx"],
"application/vnd.google-apps.document": ["pdf", "docx", "rtf", "odt", "html", "txt"],
"application/vnd.google-apps.presentation": ["pdf", "pptx", "txt"],
"application/vnd.google-apps.drawing": ["png", "pdf", "jpeg", "svg"],
"application/vnd.google-apps.script+json": ["json"],
}
class GDGet:
def __init__(self, file_id, format, save_as):
# base
auth = GDAuth()
self.credentials = auth.get_credentials()
if self.credentials is None:
raise Exception("Failed to retrieve credentials")
self.http = auth.get_authorized_http()
base = GDBase()
self.service = base.get_drive_service(self.http)
self.file_id = base.get_id_from_url(file_id)
self.format = format
if save_as is None:
self.save_as = None
else:
self.save_as = os.path.abspath(save_as)
def parse_gas_json(self, file_content, save_as):
map_type_ext = {"server_js": "js", "html": "html"}
try:
jsons = json.loads(file_content)
new_json = {"files": []}
for j in jsons["files"]:
file_id = j["id"]
file_name = j["name"]
file_source = j["source"]
file_type = j["type"]
if file_type in map_type_ext.keys():
file_ext = map_type_ext[file_type]
else:
file_ext = file_type
file_fullname = "%s.%s" % (file_name, file_ext)
with open(file_fullname, 'wb+') as f:
f.write(file_source.encode('utf8')) # We need unicode!
j.pop("source")
new_json["files"].append(j)
# save the project id, we need the id to upload project
new_json["id"] = self.file_id
with open(save_as, 'wb+') as f:
f.write(json.dumps(new_json, indent=4))
except Exception as e:
logger.error(e)
raise
def run(self):
try:
service_response = self.get()
# Content-Length from http header is None
self.file_size = service_response.get('fileSize', None)
result_title_format = self.get_title_format(service_response)
logger.debug(result_title_format)
title, return_format = result_title_format
if self.format != "raw":
_, ext = os.path.splitext(title)
if(self.format != ext[1:]):
title = title + "." + self.format
if self.format not in return_format.keys():
raise Exception(
"The specified format \'%s\' is not allowed, available format are \"%s\", please correct option: --export_format" %
(self.format, ', '.join(
return_format.keys())))
if self.save_as is None:
self.save_as = title
if self.format == "json":
result, file_content, local_size = self.get_by_format(
self.save_as, return_format[self.format])
self.parse_gas_json(file_content, self.save_as)
else:
# FIXME: handle return value
result, file_content, local_size = self.get_by_format(
self.save_as, return_format[self.format])
if(result == False):
raise Exception(
"File size check failed, download may be incompleted. local size is %d" %
local_size)
except Exception as e:
logger.error(e)
raise
return return_format
def get(self):
try:
response = self.service.files().get(fileId=self.file_id).execute()
logger.debug(pprint.pformat(response))
return response
except errors.HttpError as error:
logger.error('An error occurred: %s' % error)
return None
def get_title_format(self, service_response):
export_links = service_response.get('exportLinks', None)
return_format = {}
title = service_response.get('title', None)
logger.debug(title)
logger.debug(export_links)
if export_links is None:
download_link = service_response.get(u'downloadUrl', None)
return_format["raw"] = download_link
else:
export_link_values = export_links.values()
if len(export_link_values) > 0:
for link in export_link_values:
m = re.match(r'^.*[Ff]ormat=(.*)$', link)
return_format[m.group(1)] = link
return title, return_format
def get_by_format(self, save_as, url):
'''
Get file from URL and save to save_as.
Return result,content,filesize
'''
fd = io.FileIO(save_as, mode='wb')
creds = self.credentials
# move to auth.py?
token = {"access_token": creds.access_token, "token_type": "Bearer"}
session = OAuth2Session(creds.client_id, scope=SCOPE, token=token)
with open(save_as, 'wb') as f:
response = session.get(url, stream=True)
return_content = response.content
if self.file_size:
total_length = int(self.file_size)
print "total size = %d Bytes" % total_length
mega = 1048576 # 1024*1024
downloaded = 0
total_in_mega = int(total_length / mega)
for data in response.iter_content(chunk_size=mega):
f.write(data)
downloaded += len(data)
done = int(50 * downloaded / total_length)
done_percent = int(downloaded / total_length * 100)
done_in_mega = int(downloaded / mega)
sys.stdout.write("\r[%s%s] %3d%%, %d of %d MB" % (
'=' * done, ' ' * (50 - done), done_percent, done_in_mega, total_in_mega))
sys.stdout.flush()
else:
f.write(return_content)
# for sys.stdout.flush()
print ""
# local size check
local_size = int(os.path.getsize(save_as))
print "File location: %s" % save_as
if self.file_size:
if(int(self.file_size) == local_size):
return True, return_content, local_size
else:
return False, return_content, local_size
else:
print "File size in bytes: %d" % local_size
return True, return_content, local_size
| {
"content_hash": "67e0caf9d6d305ddeece05cf3e74252b",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 135,
"avg_line_length": 34.196172248803826,
"alnum_prop": 0.5308521057786484,
"repo_name": "timchen86/gdcmdtools",
"id": "3abd3495445c678dee228141f831c0adbf6a288d",
"size": "7193",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gdcmdtools/get.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "95513"
}
],
"symlink_target": ""
} |
import hashlib
import logging
import threading
import traceback
from datetime import datetime, timedelta, timezone
from logging import Logger
from typing import Optional, Tuple, Union
import orjson
from django.conf import settings
from django.core.cache import cache
from django.utils.timezone import now as timezone_now
class _RateLimitFilter:
"""This class is designed to rate-limit Django error reporting
notifications so that it won't send thousands of emails if the
database or cache is completely down. It uses a remote shared
cache (shared by all Django processes) for its default behavior
(so that the deduplication is global, not per-process), and a
local in-process cache for when it can't access the remote cache.
This is critical code because it is called every time
`logging.error` or `logging.exception` (or an exception) happens
in the codebase.
Adapted from https://djangosnippets.org/snippets/2242/.
"""
last_error = datetime.min.replace(tzinfo=timezone.utc)
# This thread-local variable is used to detect recursive
# exceptions during exception handling (primarily intended for
# when accessing the shared cache throws an exception).
handling_exception = threading.local()
should_reset_handling_exception = False
def can_use_remote_cache(self) -> Tuple[bool, bool]:
if getattr(self.handling_exception, "value", False):
# If we're processing an exception that occurred
# while handling an exception, this almost
# certainly was because interacting with the
# remote cache is failing (e.g. because the cache
# is down). Fall back to tracking duplicate
# exceptions in memory without the remote shared cache.
return False, False
# Now we test if the remote cache is accessible.
#
# This code path can only be reached if we are not potentially
# handling a recursive exception, so here we set
# self.handling_exception (in case the cache access we're
# about to do triggers a `logging.error` or exception that
# might recurse into this filter class), and actually record
# that this is the main exception handler thread.
try:
self.handling_exception.value = True
cache.set("RLF_TEST_KEY", 1, 1)
return cache.get("RLF_TEST_KEY") == 1, True
except Exception:
return False, True
def filter(self, record: logging.LogRecord) -> bool:
# When the original filter() call finishes executing, it's
# going to change handling_exception.value to False. The
# local variable below tracks whether the *current*,
# potentially recursive, filter() call is allowed to touch
# that value (only the original will find this to be True
# at the end of its execution)
should_reset_handling_exception = False
try:
# Track duplicate errors
duplicate = False
rate = getattr(settings, f"{self.__class__.__name__.upper()}_LIMIT", 600) # seconds
if rate > 0:
(use_cache, should_reset_handling_exception) = self.can_use_remote_cache()
if use_cache:
if record.exc_info is not None:
tb = "\n".join(traceback.format_exception(*record.exc_info))
else:
tb = str(record)
key = self.__class__.__name__.upper() + hashlib.sha1(tb.encode()).hexdigest()
duplicate = cache.get(key) == 1
if not duplicate:
cache.set(key, 1, rate)
else:
min_date = timezone_now() - timedelta(seconds=rate)
duplicate = self.last_error >= min_date
if not duplicate:
self.last_error = timezone_now()
return not duplicate
finally:
if should_reset_handling_exception:
self.handling_exception.value = False
class ZulipLimiter(_RateLimitFilter):
pass
class EmailLimiter(_RateLimitFilter):
pass
class ReturnTrue(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return True
class ReturnEnabled(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return settings.LOGGING_ENABLED
class RequireReallyDeployed(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
return settings.PRODUCTION
def skip_200_and_304(record: logging.LogRecord) -> bool:
# Apparently, `status_code` is added by Django and is not an actual
# attribute of LogRecord; as a result, mypy throws an error if we
# access the `status_code` attribute directly.
if getattr(record, "status_code", None) in [200, 304]:
return False
return True
def skip_site_packages_logs(record: logging.LogRecord) -> bool:
# This skips the log records that are generated from libraries
# installed in site packages.
# Workaround for https://code.djangoproject.com/ticket/26886
if "site-packages" in record.pathname:
return False
return True
def find_log_caller_module(record: logging.LogRecord) -> Optional[str]:
"""Find the module name corresponding to where this record was logged.
Sadly `record.module` is just the innermost component of the full
module name, so we have to go reconstruct this ourselves.
"""
# Repeat a search similar to that in logging.Logger.findCaller.
# The logging call should still be on the stack somewhere; search until
# we find something in the same source file, and that should give the
# right module name.
f = logging.currentframe()
while True:
if f.f_code.co_filename == record.pathname:
return f.f_globals.get("__name__")
if f.f_back is None:
return None
f = f.f_back
logger_nicknames = {
"root": "", # This one is more like undoing a nickname.
"zulip.requests": "zr", # Super common.
}
def find_log_origin(record: logging.LogRecord) -> str:
logger_name = logger_nicknames.get(record.name, record.name)
if settings.LOGGING_SHOW_MODULE:
module_name = find_log_caller_module(record)
if module_name == logger_name or module_name == record.name:
# Abbreviate a bit.
pass
else:
logger_name = "{}/{}".format(logger_name, module_name or "?")
if settings.RUNNING_INSIDE_TORNADO:
# In multi-sharded Tornado, it's often valuable to have which shard is
# responsible for the request in the logs.
from zerver.tornado.ioloop_logging import logging_data
shard = logging_data.get("port", "unknown")
logger_name = f"{logger_name}:{shard}"
return logger_name
log_level_abbrevs = {
"DEBUG": "DEBG",
"INFO": "INFO",
"WARNING": "WARN",
"ERROR": "ERR",
"CRITICAL": "CRIT",
}
def abbrev_log_levelname(levelname: str) -> str:
# It's unlikely someone will set a custom log level with a custom name,
# but it's an option, so we shouldn't crash if someone does.
return log_level_abbrevs.get(levelname, levelname[:4])
class ZulipFormatter(logging.Formatter):
# Used in the base implementation. Default uses `,`.
default_msec_format = "%s.%03d"
def __init__(self) -> None:
super().__init__(fmt=self._compute_fmt())
def _compute_fmt(self) -> str:
pieces = ["%(asctime)s", "%(zulip_level_abbrev)-4s"]
if settings.LOGGING_SHOW_PID:
pieces.append("pid:%(process)d")
pieces.extend(["[%(zulip_origin)s]", "%(message)s"])
return " ".join(pieces)
def format(self, record: logging.LogRecord) -> str:
if not getattr(record, "zulip_decorated", False):
# The `setattr` calls put this logic explicitly outside the bounds of the
# type system; otherwise mypy would complain LogRecord lacks these attributes.
setattr(record, "zulip_level_abbrev", abbrev_log_levelname(record.levelname))
setattr(record, "zulip_origin", find_log_origin(record))
setattr(record, "zulip_decorated", True)
return super().format(record)
class ZulipWebhookFormatter(ZulipFormatter):
def _compute_fmt(self) -> str:
basic = super()._compute_fmt()
multiline = [
basic,
"user: %(user)s",
"client: %(client)s",
"url: %(url)s",
"content_type: %(content_type)s",
"custom_headers:",
"%(custom_headers)s",
"payload:",
"%(payload)s",
]
return "\n".join(multiline)
def format(self, record: logging.LogRecord) -> str:
from zerver.lib.request import get_current_request
request = get_current_request()
if not request:
setattr(record, "user", None)
setattr(record, "client", None)
setattr(record, "url", None)
setattr(record, "content_type", None)
setattr(record, "custom_headers", None)
setattr(record, "payload", None)
return super().format(record)
if request.content_type == "application/json":
payload: Union[str, bytes] = request.body
else:
payload = request.POST["payload"]
try:
payload = orjson.dumps(orjson.loads(payload), option=orjson.OPT_INDENT_2).decode()
except orjson.JSONDecodeError:
pass
header_text = "".join(
f"{header}: {value}\n"
for header, value in request.headers.items()
if header.lower().startswith("x-")
)
from zerver.lib.request import RequestNotes
client = RequestNotes.get_notes(request).client
assert client is not None
assert request.user.is_authenticated
setattr(record, "user", f"{request.user.delivery_email} ({request.user.realm.string_id})")
setattr(record, "client", client.name)
setattr(record, "url", request.META.get("PATH_INFO", None))
setattr(record, "content_type", request.content_type)
setattr(record, "custom_headers", header_text or None)
setattr(record, "payload", payload)
return super().format(record)
def log_to_file(
logger: Logger,
filename: str,
log_format: str = "%(asctime)s %(levelname)-8s %(message)s",
) -> None:
"""Note: `filename` should be declared in zproject/computed_settings.py with zulip_path."""
formatter = logging.Formatter(log_format)
handler = logging.FileHandler(filename)
handler.setFormatter(formatter)
logger.addHandler(handler)
| {
"content_hash": "ce76616a9f8221fa1d6c5b365550f8cb",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 98,
"avg_line_length": 36.59121621621622,
"alnum_prop": 0.6262579632536238,
"repo_name": "andersk/zulip",
"id": "dc902ea8408a2c4cfb31a81a5a5a23cded02b1b2",
"size": "10917",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "zerver/lib/logging_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "490256"
},
{
"name": "Dockerfile",
"bytes": "4025"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "749848"
},
{
"name": "Handlebars",
"bytes": "377098"
},
{
"name": "JavaScript",
"bytes": "4006373"
},
{
"name": "Perl",
"bytes": "10163"
},
{
"name": "Puppet",
"bytes": "112128"
},
{
"name": "Python",
"bytes": "10168530"
},
{
"name": "Ruby",
"bytes": "3459"
},
{
"name": "Shell",
"bytes": "146797"
},
{
"name": "TypeScript",
"bytes": "284837"
}
],
"symlink_target": ""
} |
from fine_mapping_pipeline.finemap.paintor import run_paintor, _select_annotations, _do_lrt, _get_likelihood
import logging
logging.basicConfig(level=logging.INFO)
def test_run_paintor(tmpdir):
input_directory = 'tests/paintor_data/'
output_directory = tmpdir.mkdir('output')
run_paintor(input_directory, annotation_header='tests/paintor_data/annotation.header',
output_directory=output_directory)
# TODO Check and read the data from the directory.
def test_select_annotations(tmpdir):
input_directory = 'tests/paintor_data/'
annotations = _select_annotations(input_directory, causal_snp_number=3,
annotation_header='tests/paintor_data/annotation.header')
assert annotations == [1]
def test_do_lrf():
p_value = _do_lrt(-10039, -10036)
assert p_value == 0.014305878435429631
def test_get_likelihood(tmpdir):
input_directory = 'tests/paintor_data/'
annot1 = _get_likelihood(input_directory, 1, 'TESTANNOTATION1', 3, 0)
assert annot1 == -279.196775
annot2 = _get_likelihood(input_directory, 0, 'TESTANNOTATION2', 3, 0)
assert annot2 == -284.941121
| {
"content_hash": "53b3f115faf2bd46e2f7a58be63915d5",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 109,
"avg_line_length": 43.111111111111114,
"alnum_prop": 0.6950171821305842,
"repo_name": "theboocock/fine_mapping_pipeline",
"id": "649d815cf1896f02fe270f5b2c8966a71d1f7ae1",
"size": "1165",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_run_paintor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4843"
},
{
"name": "C++",
"bytes": "588622"
},
{
"name": "Jupyter Notebook",
"bytes": "2651"
},
{
"name": "Makefile",
"bytes": "1490"
},
{
"name": "Objective-C",
"bytes": "9465"
},
{
"name": "Python",
"bytes": "95656"
},
{
"name": "Shell",
"bytes": "384"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'jps'
copyright = u'2016, Author'
author = u'Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'jpsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'jps.tex', u'jps Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jps', u'jps Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'jps', u'jps Documentation',
author, 'jps', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| {
"content_hash": "73b204ce41d64be4278919051e4ccf9a",
"timestamp": "",
"source": "github",
"line_count": 344,
"max_line_length": 80,
"avg_line_length": 31.325581395348838,
"alnum_prop": 0.7042501855976243,
"repo_name": "OTL/jps",
"id": "9fc4cdba2615df135227207c6519015e5cebfb9b",
"size": "11192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64712"
}
],
"symlink_target": ""
} |
"""Task sub-commands"""
import importlib
import json
import logging
import os
import textwrap
from contextlib import contextmanager, redirect_stderr, redirect_stdout, suppress
from typing import List, Optional
from pendulum.parsing.exceptions import ParserError
from sqlalchemy.orm.exc import NoResultFound
from airflow import settings
from airflow.cli.simple_table import AirflowConsole
from airflow.configuration import conf
from airflow.exceptions import AirflowException, DagRunNotFound
from airflow.executors.executor_loader import ExecutorLoader
from airflow.jobs.local_task_job import LocalTaskJob
from airflow.models import DagPickle, TaskInstance
from airflow.models.dag import DAG
from airflow.models.dagrun import DagRun
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.dependencies_deps import SCHEDULER_QUEUED_DEPS
from airflow.utils import cli as cli_utils
from airflow.utils.cli import (
get_dag,
get_dag_by_file_location,
get_dag_by_pickle,
get_dags,
suppress_logs_and_warning,
)
from airflow.utils.dates import timezone
from airflow.utils.log.logging_mixin import StreamLogWriter
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session, provide_session
def _get_dag_run(dag, exec_date_or_run_id, create_if_necssary, session):
dag_run = dag.get_dagrun(run_id=exec_date_or_run_id, session=session)
if dag_run:
return dag_run
execution_date = None
with suppress(ParserError, TypeError):
execution_date = timezone.parse(exec_date_or_run_id)
if create_if_necssary and not execution_date:
return DagRun(dag_id=dag.dag_id, run_id=exec_date_or_run_id)
try:
return (
session.query(DagRun)
.filter(
DagRun.dag_id == dag.dag_id,
DagRun.execution_date == execution_date,
)
.one()
)
except NoResultFound:
if create_if_necssary:
return DagRun(dag.dag_id, execution_date=execution_date)
raise DagRunNotFound(
f"DagRun for {dag.dag_id} with run_id or execution_date of {exec_date_or_run_id!r} not found"
) from None
@provide_session
def _get_ti(task, exec_date_or_run_id, create_if_necssary=False, session=None):
"""Get the task instance through DagRun.run_id, if that fails, get the TI the old way"""
dag_run = _get_dag_run(task.dag, exec_date_or_run_id, create_if_necssary, session)
ti = dag_run.get_task_instance(task.task_id)
if not ti and create_if_necssary:
ti = TaskInstance(task, run_id=None)
ti.dag_run = dag_run
ti.refresh_from_task(task)
return ti
def _run_task_by_selected_method(args, dag: DAG, ti: TaskInstance) -> None:
"""
Runs the task in one of 3 modes
- using LocalTaskJob
- as raw task
- by executor
"""
if args.local:
_run_task_by_local_task_job(args, ti)
elif args.raw:
_run_raw_task(args, ti)
else:
_run_task_by_executor(args, dag, ti)
def _run_task_by_executor(args, dag, ti):
"""
Sends the task to the executor for execution. This can result in the task being started by another host
if the executor implementation does
"""
pickle_id = None
if args.ship_dag:
try:
# Running remotely, so pickling the DAG
with create_session() as session:
pickle = DagPickle(dag)
session.add(pickle)
pickle_id = pickle.id
# TODO: This should be written to a log
print(f'Pickled dag {dag} as pickle_id: {pickle_id}')
except Exception as e:
print('Could not pickle the DAG')
print(e)
raise e
executor = ExecutorLoader.get_default_executor()
executor.job_id = "manual"
executor.start()
print("Sending to executor.")
executor.queue_task_instance(
ti,
mark_success=args.mark_success,
pickle_id=pickle_id,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
)
executor.heartbeat()
executor.end()
def _run_task_by_local_task_job(args, ti):
"""Run LocalTaskJob, which monitors the raw task execution process"""
run_job = LocalTaskJob(
task_instance=ti,
mark_success=args.mark_success,
pickle_id=args.pickle,
ignore_all_deps=args.ignore_all_dependencies,
ignore_depends_on_past=args.ignore_depends_on_past,
ignore_task_deps=args.ignore_dependencies,
ignore_ti_state=args.force,
pool=args.pool,
external_executor_id=_extract_external_executor_id(args),
)
try:
run_job.run()
finally:
if args.shut_down_logging:
logging.shutdown()
RAW_TASK_UNSUPPORTED_OPTION = [
"ignore_all_dependencies",
"ignore_depends_on_past",
"ignore_dependencies",
"force",
]
def _run_raw_task(args, ti: TaskInstance) -> None:
"""Runs the main task handling code"""
ti._run_raw_task(
mark_success=args.mark_success,
job_id=args.job_id,
pool=args.pool,
error_file=args.error_file,
)
def _extract_external_executor_id(args) -> Optional[str]:
if hasattr(args, "external_executor_id"):
return getattr(args, "external_executor_id")
return os.environ.get("external_executor_id", None)
@contextmanager
def _capture_task_logs(ti):
"""Manage logging context for a task run
- Replace the root logger configuration with the airflow.task configuration
so we can capture logs from any custom loggers used in the task.
- Redirect stdout and stderr to the task instance log, as INFO and WARNING
level messages, respectively.
"""
modify = not settings.DONOT_MODIFY_HANDLERS
if modify:
root_logger, task_logger = logging.getLogger(), logging.getLogger('airflow.task')
orig_level = root_logger.level
root_logger.setLevel(task_logger.level)
orig_handlers = root_logger.handlers.copy()
root_logger.handlers[:] = task_logger.handlers
try:
info_writer = StreamLogWriter(ti.log, logging.INFO)
warning_writer = StreamLogWriter(ti.log, logging.WARNING)
with redirect_stdout(info_writer), redirect_stderr(warning_writer):
yield
finally:
if modify:
# Restore the root logger to its original state.
root_logger.setLevel(orig_level)
root_logger.handlers[:] = orig_handlers
@cli_utils.action_logging
def task_run(args, dag=None):
"""Runs a single task instance"""
# Load custom airflow config
if args.local and args.raw:
raise AirflowException(
"Option --raw and --local are mutually exclusive. "
"Please remove one option to execute the command."
)
if args.raw:
unsupported_options = [o for o in RAW_TASK_UNSUPPORTED_OPTION if getattr(args, o)]
if unsupported_options:
unsupported_raw_task_flags = ', '.join(f'--{o}' for o in RAW_TASK_UNSUPPORTED_OPTION)
unsupported_flags = ', '.join(f'--{o}' for o in unsupported_options)
raise AirflowException(
"Option --raw does not work with some of the other options on this command. "
"You can't use --raw option and the following options: "
f"{unsupported_raw_task_flags}. "
f"You provided the option {unsupported_flags}. "
"Delete it to execute the command."
)
if dag and args.pickle:
raise AirflowException("You cannot use the --pickle option when using DAG.cli() method.")
if args.cfg_path:
with open(args.cfg_path) as conf_file:
conf_dict = json.load(conf_file)
if os.path.exists(args.cfg_path):
os.remove(args.cfg_path)
conf.read_dict(conf_dict, source=args.cfg_path)
settings.configure_vars()
settings.MASK_SECRETS_IN_LOGS = True
# IMPORTANT, have to use the NullPool, otherwise, each "run" command may leave
# behind multiple open sleeping connections while heartbeating, which could
# easily exceed the database connection limit when
# processing hundreds of simultaneous tasks.
settings.configure_orm(disable_connection_pool=True)
if args.pickle:
print(f'Loading pickle id: {args.pickle}')
dag = get_dag_by_pickle(args.pickle)
elif not dag:
dag = get_dag(args.subdir, args.dag_id)
else:
# Use DAG from parameter
pass
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
ti.init_run_context(raw=args.raw)
hostname = get_hostname()
print(f"Running {ti} on host {hostname}")
if args.interactive:
_run_task_by_selected_method(args, dag, ti)
else:
with _capture_task_logs(ti):
_run_task_by_selected_method(args, dag, ti)
@cli_utils.action_logging
def task_failed_deps(args):
"""
Returns the unmet dependencies for a task instance from the perspective of the
scheduler (i.e. why a task instance doesn't get scheduled and then queued by the
scheduler, and then run by an executor).
>>> airflow tasks failed-deps tutorial sleep 2015-01-01
Task instance dependencies not met:
Dagrun Running: Task instance's dagrun did not exist: Unknown reason
Trigger Rule: Task's trigger rule 'all_success' requires all upstream tasks
to have succeeded, but found 1 non-success(es).
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
dep_context = DepContext(deps=SCHEDULER_QUEUED_DEPS)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
# TODO, Do we want to print or log this
if failed_deps:
print("Task instance dependencies not met:")
for dep in failed_deps:
print(f"{dep.dep_name}: {dep.reason}")
else:
print("Task instance dependencies are all met.")
@cli_utils.action_logging
@suppress_logs_and_warning
def task_state(args):
"""
Returns the state of a TaskInstance at the command line.
>>> airflow tasks state tutorial sleep 2015-01-01
success
"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id)
print(ti.current_state())
@cli_utils.action_logging
@suppress_logs_and_warning
def task_list(args, dag=None):
"""Lists the tasks within a DAG at the command line"""
dag = dag or get_dag(args.subdir, args.dag_id)
if args.tree:
dag.tree_view()
else:
tasks = sorted(t.task_id for t in dag.tasks)
print("\n".join(tasks))
SUPPORTED_DEBUGGER_MODULES: List[str] = [
"pudb",
"web_pdb",
"ipdb",
"pdb",
]
def _guess_debugger():
"""
Trying to guess the debugger used by the user. When it doesn't find any user-installed debugger,
returns ``pdb``.
List of supported debuggers:
* `pudb <https://github.com/inducer/pudb>`__
* `web_pdb <https://github.com/romanvm/python-web-pdb>`__
* `ipdb <https://github.com/gotcha/ipdb>`__
* `pdb <https://docs.python.org/3/library/pdb.html>`__
"""
for mod in SUPPORTED_DEBUGGER_MODULES:
try:
return importlib.import_module(mod)
except ImportError:
continue
return importlib.import_module("pdb")
@cli_utils.action_logging
@suppress_logs_and_warning
@provide_session
def task_states_for_dag_run(args, session=None):
"""Get the status of all task instances in a DagRun"""
dag_run = (
session.query(DagRun)
.filter(DagRun.run_id == args.execution_date_or_run_id, DagRun.dag_id == args.dag_id)
.one_or_none()
)
if not dag_run:
try:
execution_date = timezone.parse(args.execution_date_or_run_id)
dag_run = (
session.query(DagRun)
.filter(DagRun.execution_date == execution_date, DagRun.dag_id == args.dag_id)
.one_or_none()
)
except (ParserError, TypeError) as err:
raise AirflowException(f"Error parsing the supplied execution_date. Error: {str(err)}")
if dag_run is None:
raise DagRunNotFound(
f"DagRun for {args.dag_id} with run_id or execution_date of {args.execution_date_or_run_id!r} "
"not found"
)
AirflowConsole().print_as(
data=dag_run.task_instances,
output=args.output,
mapper=lambda ti: {
"dag_id": ti.dag_id,
"execution_date": dag_run.execution_date.isoformat(),
"task_id": ti.task_id,
"state": ti.state,
"start_date": ti.start_date.isoformat() if ti.start_date else "",
"end_date": ti.end_date.isoformat() if ti.end_date else "",
},
)
@cli_utils.action_logging
def task_test(args, dag=None):
"""Tests task for a given dag_id"""
# We want to log output from operators etc to show up here. Normally
# airflow.task would redirect to a file, but here we want it to propagate
# up to the normal airflow handler.
settings.MASK_SECRETS_IN_LOGS = True
handlers = logging.getLogger('airflow.task').handlers
already_has_stream_handler = False
for handler in handlers:
already_has_stream_handler = isinstance(handler, logging.StreamHandler)
if already_has_stream_handler:
break
if not already_has_stream_handler:
logging.getLogger('airflow.task').propagate = True
env_vars = {'AIRFLOW_TEST_MODE': 'True'}
if args.env_vars:
env_vars.update(args.env_vars)
os.environ.update(env_vars)
dag = dag or get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
# Add CLI provided task_params to task.params
if args.task_params:
passed_in_params = json.loads(args.task_params)
task.params.update(passed_in_params)
if task.params:
task.params.validate()
ti = _get_ti(task, args.execution_date_or_run_id, create_if_necssary=True)
try:
if args.dry_run:
ti.dry_run()
else:
ti.run(ignore_task_deps=True, ignore_ti_state=True, test_mode=True)
except Exception:
if args.post_mortem:
debugger = _guess_debugger()
debugger.post_mortem()
else:
raise
finally:
if not already_has_stream_handler:
# Make sure to reset back to normal. When run for CLI this doesn't
# matter, but it does for test suite
logging.getLogger('airflow.task').propagate = False
@cli_utils.action_logging
@suppress_logs_and_warning
def task_render(args):
"""Renders and displays templated fields for a given task"""
dag = get_dag(args.subdir, args.dag_id)
task = dag.get_task(task_id=args.task_id)
ti = _get_ti(task, args.execution_date_or_run_id, create_if_necssary=True)
ti.render_templates()
for attr in task.__class__.template_fields:
print(
textwrap.dedent(
f""" # ----------------------------------------------------------
# property: {attr}
# ----------------------------------------------------------
{getattr(task, attr)}
"""
)
)
@cli_utils.action_logging
def task_clear(args):
"""Clears all task instances or only those matched by regex for a DAG(s)"""
logging.basicConfig(level=settings.LOGGING_LEVEL, format=settings.SIMPLE_LOG_FORMAT)
if args.dag_id and not args.subdir and not args.dag_regex and not args.task_regex:
dags = [get_dag_by_file_location(args.dag_id)]
else:
# todo clear command only accepts a single dag_id. no reason for get_dags with 's' except regex?
dags = get_dags(args.subdir, args.dag_id, use_regex=args.dag_regex)
if args.task_regex:
for idx, dag in enumerate(dags):
dags[idx] = dag.partial_subset(
task_ids_or_regex=args.task_regex,
include_downstream=args.downstream,
include_upstream=args.upstream,
)
DAG.clear_dags(
dags,
start_date=args.start_date,
end_date=args.end_date,
only_failed=args.only_failed,
only_running=args.only_running,
confirm_prompt=not args.yes,
include_subdags=not args.exclude_subdags,
include_parentdag=not args.exclude_parentdag,
)
| {
"content_hash": "f56184846d24257586856174daa5c114",
"timestamp": "",
"source": "github",
"line_count": 503,
"max_line_length": 107,
"avg_line_length": 33.4572564612326,
"alnum_prop": 0.6333709667835284,
"repo_name": "apache/incubator-airflow",
"id": "565a19e5094746067e3b2b91069d195ab9b6b935",
"size": "17616",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/cli/commands/task_command.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
from django.db.models import get_model
from oscar.test.factories import create_product
from oscar.test.testcases import WebTestCase
WishList = get_model('wishlists', 'WishList')
class TestProductDetailPage(WebTestCase):
is_anonymous = False
def setUp(self):
super(TestProductDetailPage, self).setUp()
self.product = create_product()
def test_allows_a_product_to_be_added_to_wishlist(self):
# Click add to wishlist button
detail_page = self.get(self.product.get_absolute_url())
form = detail_page.forms['add_to_wishlist_form']
response = form.submit()
self.assertIsRedirect(response)
# Check a wishlist has been created
wishlists = self.user.wishlists.all()
self.assertEqual(1, len(wishlists))
wishlist = wishlists[0]
self.assertEqual(1, len(wishlist.lines.all()))
| {
"content_hash": "bb4fc90f8e93ffce8c85062eec01f6c2",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 63,
"avg_line_length": 31.428571428571427,
"alnum_prop": 0.6806818181818182,
"repo_name": "elliotthill/django-oscar",
"id": "76b540aa192c135362fb59033a38814f53ffe31f",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/functional/customer/wishlists_tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1099824"
},
{
"name": "JavaScript",
"bytes": "818932"
},
{
"name": "Puppet",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "4153712"
},
{
"name": "Shell",
"bytes": "4738"
},
{
"name": "XSLT",
"bytes": "49764"
}
],
"symlink_target": ""
} |
DEBUG = False
TEMPLATE_DEBUG = DEBUG
COMPRESS_ENABLED = not DEBUG
COMPRESS_OFFLINE = not DEBUG
| {
"content_hash": "ec9aa31bfb7d30c46274fd39a8fb064b",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 28,
"avg_line_length": 23.75,
"alnum_prop": 0.7789473684210526,
"repo_name": "wonderbeyond/ezlog",
"id": "61b2c6947063dbee430bb2f49b60287a73ce1be2",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ezlog/settings/production.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "91185"
},
{
"name": "HTML",
"bytes": "21021"
},
{
"name": "JavaScript",
"bytes": "144870"
},
{
"name": "Makefile",
"bytes": "674"
},
{
"name": "Python",
"bytes": "62061"
},
{
"name": "Shell",
"bytes": "509"
}
],
"symlink_target": ""
} |
import cherrypy
# import config
import sys
import json
import os
config = {}
class Alice(object):
exposed = True
def __init__(self) :
self.modules = {}
# load modules
for mod_name in config['module_list']:
mod = __import__('modules.' + mod_name + '.' + mod_name, fromlist=[mod_name])
# create a module instance
try :
mod_obj = getattr(mod, mod_name)()
self.modules[mod_name] = mod_obj
setattr(self, mod_name, mod_obj)
except AttributeError as e :
print ('Error while loading module \'' + mod_name + '\': ' + str(e))
def GET(self, module, **kwarg) :
if module == 'modules' :
return json.dumps(config['module_list']).encode('utf-8')
else :
cherrypy.response.status = 404
return 'No module named \'' + module + '\''
def POST(self, module, method, **kwarg):
return ''
def OPTIONS(self):
cherrypy.response.headers['Access-Control-Allow-Credentials'] = True
cherrypy.response.headers["Access-Control-Allow-Origin"] = "*"
cherrypy.response.headers['Access-Control-Allow-Methods'] = 'GET, POST'
cherrypy.response.headers['Access-Control-Allow-Headers'] = cherrypy.request.headers['ACCESS-CONTROL-REQUEST-HEADERS']
def shutdown(self) :
print('===== Exiting =====')
for mod_name in modules :
try :
deinit_func = getattr(modules[mod_name], 'deinit')
deinit_func()
except AttributeError as e :
print ('Error while deinitializing module \'' + mod_name + '\': ' + str(e))
sys.exit()
if __name__ == '__main__':
print( sys.argv )
conf = open(sys.argv[1], 'r')
config = json.loads(conf.read())
print(config)
alice = Alice()
cherrypy.engine.signal_handler.handlers["SIGINT"] = alice.shutdown
BASEDIR = os.path.dirname(os.path.realpath(__file__)) + '/../../static/core' # FIXME: probably this is ugly, change it later ## Maybe determine basedir in run.py?
conf = {
'global' : {
'server.socket_host': config['host'],
'server.socket_port': config['port']
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.sessions.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'text/plain')],
'tools.staticdir.on' : True,
'tools.staticdir.dir' : os.path.join(BASEDIR, 'html'),
'tools.staticdir.index' : 'index.html'
},
'/css': {
'tools.staticdir.on': True,
'tools.staticdir.dir' : os.path.join(BASEDIR, 'css')
},
'/js': {
'tools.staticdir.on': True,
'tools.staticdir.dir' : os.path.join(BASEDIR, 'js')
},
'/modules': {
'tools.staticdir.on': True,
'tools.staticdir.dir' : os.path.join(BASEDIR, '../modules')
},
'/tests': {
'tools.staticdir.on': True,
'tools.staticdir.dir' : os.path.join(BASEDIR, '../tests')
}
}
print('===== Starting Alice at \'' + config['host'] + ':' + str(config['port']) + '\' =====')
cherrypy.quickstart(alice, '/', conf)
| {
"content_hash": "ef67eee5a716835d502c5eefb7631c7c",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 166,
"avg_line_length": 34.673469387755105,
"alnum_prop": 0.5329605650382578,
"repo_name": "SelenaProject/selena",
"id": "358d8ed1b7dbd92e99eff0c7fbf139a81db77f47",
"size": "3422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/core/selena.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2750"
},
{
"name": "JavaScript",
"bytes": "143143"
},
{
"name": "Python",
"bytes": "12085"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import unittest
import os
from pymatgen import Structure
from pymatgen.io.feff.sets import MPXANESSet, MPELNESSet, FEFFDictSet, MPEXAFSSet
from pymatgen.io.feff.inputs import Potential, Tags, Atoms, Header
from pymatgen.io.cif import CifParser, CifFile
import shutil
import numpy as np
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class FeffInputSetTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.header_string = """* This FEFF.inp file generated by pymatgen
TITLE comment: From cif file
TITLE Source: CoO19128.cif
TITLE Structure Summary: Co2 O2
TITLE Reduced formula: CoO
TITLE space group: (P6_3mc), space number: (186)
TITLE abc: 3.297078 3.297078 5.254213
TITLE angles: 90.000000 90.000000 120.000000
TITLE sites: 4
* 1 Co 0.666667 0.333333 0.003676
* 2 Co 0.333334 0.666666 0.503676
* 3 O 0.333334 0.666666 0.121324
* 4 O 0.666667 0.333333 0.621325"""
cif_file = os.path.join(test_dir, 'CoO19128.cif')
cls.structure = CifParser(cif_file).get_structures()[0]
cls.absorbing_atom = 'O'
cls.mp_xanes = MPXANESSet(cls.absorbing_atom, cls.structure)
def test_get_header(self):
comment = 'From cif file'
header = str(self.mp_xanes.header(source='CoO19128.cif', comment=comment))
ref = self.header_string.splitlines()
last4 = [" ".join(l.split()[2:]) for l in ref[-4:]]
for i, l in enumerate(header.splitlines()):
if i < 9:
self.assertEqual(l, ref[i])
else:
s = " ".join(l.split()[2:])
self.assertIn(s, last4)
def test_getfefftags(self):
tags = self.mp_xanes.tags.as_dict()
self.assertEqual(tags['COREHOLE'], "FSR",
"Failed to generate PARAMETERS string")
def test_get_feffPot(self):
POT = str(self.mp_xanes.potential)
d, dr = Potential.pot_dict_from_string(POT)
self.assertEqual(d['Co'], 1, "Wrong symbols read in for Potential")
def test_get_feff_atoms(self):
atoms = str(self.mp_xanes.atoms)
self.assertEqual(atoms.splitlines()[3].split()[4], self.absorbing_atom,
"failed to create ATOMS string")
def test_to_and_from_dict(self):
f1_dict = self.mp_xanes.as_dict()
f2 = MPXANESSet.from_dict(f1_dict)
self.assertEqual(f1_dict, f2.as_dict())
def test_user_tag_settings(self):
tags_dict_ans = self.mp_xanes.tags.as_dict()
tags_dict_ans["COREHOLE"] = "RPA"
tags_dict_ans["EDGE"] = "L1"
user_tag_settings = {"COREHOLE": "RPA", "EDGE": "L1"}
mp_xanes_2 = MPXANESSet(self.absorbing_atom, self.structure,
user_tag_settings=user_tag_settings)
self.assertEqual(mp_xanes_2.tags.as_dict(), tags_dict_ans)
def test_eels_to_from_dict(self):
elnes = MPELNESSet(self.absorbing_atom, self.structure, radius=5.0,
beam_energy=100, beam_direction=[1, 0, 0],
collection_angle=7, convergence_angle=6)
elnes_dict = elnes.as_dict()
elnes_2 = MPELNESSet.from_dict(elnes_dict)
self.assertEqual(elnes_dict, elnes_2.as_dict())
def test_eels_tags_set(self):
radius = 5.0
user_eels_settings = {
'ENERGY': '4 0.04 0.1',
'BEAM_ENERGY': '200 1 0 1',
'ANGLES': '2 3'}
elnes = MPELNESSet(self.absorbing_atom, self.structure, radius=radius,
user_eels_settings=user_eels_settings)
elnes_2 = MPELNESSet(self.absorbing_atom, self.structure, radius=radius,
beam_energy=100, beam_direction=[1, 0, 0],
collection_angle=7, convergence_angle=6)
self.assertEqual(elnes.tags["ELNES"]["ENERGY"],
user_eels_settings["ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["BEAM_ENERGY"],
user_eels_settings["BEAM_ENERGY"])
self.assertEqual(elnes.tags["ELNES"]["ANGLES"],
user_eels_settings["ANGLES"])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_ENERGY"], [100, 0, 1, 1])
self.assertEqual(elnes_2.tags["ELNES"]["BEAM_DIRECTION"], [1, 0, 0])
self.assertEqual(elnes_2.tags["ELNES"]["ANGLES"], [7, 6])
def test_reciprocal_tags_and_input(self):
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet(self.absorbing_atom, self.structure,
user_tag_settings=user_tag_settings)
self.assertTrue("RECIPROCAL" in elnes.tags)
self.assertEqual(elnes.tags["TARGET"], 3)
self.assertEqual(elnes.tags["KMESH"], "1000")
self.assertEqual(elnes.tags["CIF"], "Co2O2.cif")
self.assertEqual(elnes.tags["COREHOLE"], "RPA")
all_input = elnes.all_input()
self.assertNotIn("ATOMS", all_input)
self.assertNotIn("POTENTIALS", all_input)
elnes.write_input()
structure = Structure.from_file("Co2O2.cif")
self.assertTrue(self.structure.matches(structure))
os.remove("HEADER")
os.remove("PARAMETERS")
os.remove("feff.inp")
os.remove("Co2O2.cif")
def test_small_system_EXAFS(self):
exafs_settings = MPEXAFSSet(self.absorbing_atom, self.structure)
self.assertFalse(exafs_settings.small_system)
self.assertTrue('RECIPROCAL' not in exafs_settings.tags)
user_tag_settings = {"RECIPROCAL": ""}
exafs_settings_2 = MPEXAFSSet(self.absorbing_atom, self.structure, nkpts=1000,
user_tag_settings=user_tag_settings)
self.assertFalse(exafs_settings_2.small_system)
self.assertTrue('RECIPROCAL' not in exafs_settings_2.tags)
def test_number_of_kpoints(self):
user_tag_settings = {"RECIPROCAL": ""}
elnes = MPELNESSet(self.absorbing_atom, self.structure, nkpts=1000,
user_tag_settings=user_tag_settings)
self.assertEqual(elnes.tags["KMESH"], [12, 12, 7])
def test_large_systems(self):
struct = Structure.from_file(os.path.join(test_dir, "La4Fe4O12.cif"))
user_tag_settings = {"RECIPROCAL": "", "KMESH": "1000"}
elnes = MPELNESSet("Fe", struct, user_tag_settings=user_tag_settings)
self.assertNotIn("RECIPROCAL", elnes.tags)
self.assertNotIn("KMESH", elnes.tags)
self.assertNotIn("CIF", elnes.tags)
self.assertNotIn("TARGET", elnes.tags)
def test_postfeffset(self):
self.mp_xanes.write_input(os.path.join('.', 'xanes_3'))
feff_dict_input = FEFFDictSet.from_directory(os.path.join('.', 'xanes_3'))
self.assertTrue(feff_dict_input.tags == Tags.from_file(os.path.join('.', 'xanes_3/feff.inp')))
self.assertTrue(str(feff_dict_input.header()) == str(Header.from_file(os.path.join('.', 'xanes_3/HEADER'))))
feff_dict_input.write_input('xanes_3_regen')
origin_tags = Tags.from_file(os.path.join('.', 'xanes_3/PARAMETERS'))
output_tags = Tags.from_file(os.path.join('.', 'xanes_3_regen/PARAMETERS'))
origin_mole = Atoms.cluster_from_file(os.path.join('.', 'xanes_3/feff.inp'))
output_mole = Atoms.cluster_from_file(os.path.join('.', 'xanes_3_regen/feff.inp'))
original_mole_dist = np.array(origin_mole.distance_matrix[0, :]).astype(np.float64)
output_mole_dist = np.array(output_mole.distance_matrix[0, :]).astype(np.float64)
original_mole_shell = [x.species_string for x in origin_mole]
output_mole_shell = [x.species_string for x in output_mole]
self.assertTrue(np.allclose(original_mole_dist, output_mole_dist))
self.assertTrue(origin_tags == output_tags)
self.assertTrue(original_mole_shell == output_mole_shell)
shutil.rmtree(os.path.join('.', 'xanes_3'))
shutil.rmtree(os.path.join('.', 'xanes_3_regen'))
reci_mp_xanes = MPXANESSet(self.absorbing_atom, self.structure,
user_tag_settings={"RECIPROCAL": ""})
reci_mp_xanes.write_input('xanes_reci')
feff_reci_input = FEFFDictSet.from_directory(os.path.join('.', 'xanes_reci'))
self.assertTrue("RECIPROCAL" in feff_reci_input.tags)
feff_reci_input.write_input('Dup_reci')
self.assertTrue(os.path.exists(os.path.join('.', 'Dup_reci', 'HEADER')))
self.assertTrue(os.path.exists(os.path.join('.', 'Dup_reci', 'feff.inp')))
self.assertTrue(os.path.exists(os.path.join('.', 'Dup_reci', 'PARAMETERS')))
self.assertFalse(os.path.exists(os.path.join('.', 'Dup_reci', 'ATOMS')))
self.assertFalse(os.path.exists(os.path.join('.', 'Dup_reci', 'POTENTIALS')))
tags_original = Tags.from_file(os.path.join('.', 'xanes_reci/feff.inp'))
tags_output = Tags.from_file(os.path.join('.', 'Dup_reci/feff.inp'))
self.assertTrue(tags_original == tags_output)
stru_orig = Structure.from_file(os.path.join('.', 'xanes_reci/Co2O2.cif'))
stru_reci = Structure.from_file(os.path.join('.', 'Dup_reci/Co2O2.cif'))
self.assertTrue(stru_orig.__eq__(stru_reci))
shutil.rmtree(os.path.join('.', 'Dup_reci'))
shutil.rmtree(os.path.join('.', 'xanes_reci'))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "9011e4b8f81c3e273081a6fe44897648",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 116,
"avg_line_length": 46.395121951219515,
"alnum_prop": 0.6062454000630848,
"repo_name": "johnson1228/pymatgen",
"id": "d191b5d66ccb306678dc6a2aeffcf295cd677f03",
"size": "9621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/feff/tests/test_sets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5938"
},
{
"name": "CSS",
"bytes": "7550"
},
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "HTML",
"bytes": "4886182"
},
{
"name": "Makefile",
"bytes": "5573"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "6064350"
},
{
"name": "Roff",
"bytes": "868"
}
],
"symlink_target": ""
} |
"""Define the base whispy lispy types
From now on, all the functions will operate on these
"""
from __future__ import unicode_literals, absolute_import
class Type(object):
"""Abstract base type"""
def __init__(self, values):
"""
:param tuple values: a tuple of values
"""
self.values = values
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
return self.values == other.values
def __hash__(self):
return hash(self.values)
class String(Type):
@classmethod
def from_quoted_values(cls, values):
"""The concrete syntax nodes didn't know much difference
between strings and symbols. They determined the difference between
these by letting the start and end quotes on the strings.
This "madness" stops here"""
return cls((values[0][1:-1],))
def __repr__(self):
return '$String {}'.format(self.values[0])
def __eq__(self, other):
if self.__class__ != other.__class__:
return False
return self.values == other.values
class Int(Type):
def __repr__(self):
return '$Int {}'.format(self.values[0])
class Bool(Type):
def __repr__(self):
return '$Bool {}'.format(self.values[0])
class Float(Type):
def __repr__(self):
return '$Float {}'.format(self.values[0])
class List(Type):
def __repr__(self):
return '$List {}'.format(self.values)
class Symbol(Type):
def __repr__(self):
return '$Symbol {}'.format((self.values[0]))
class Function(Type):
"""The Function object.
Its values list must contain (on the given positions):
0: the function name
1: the formal parameter names (a tuple)
2: the AST that will get executed
3: A Scope
...Stuff will get added here (like the closure scope)
"""
def __init__(self, *args, **kwrgs):
super(Function, self).__init__(*args, **kwrgs)
def __repr__(self):
params = '(' + ', '.join(str(val.values[0]) for val in self.values[1]) + ')'
return '$[Func {name}{params} at {address}]'.format(
name=self.name, address=id(self), params=params)
@property
def code(self):
return self.values[2]
@property
def params(self):
return self.values[1]
@property
def scope(self):
return self.values[3]
@property
def name(self):
return self.values[0].values[0]
def __call__(self, interpreter, scope, *args):
"""
:param args: instances of the whispy_lispy.types classes
"""
from whispy_lispy import scopes2
local_scope = scopes2.FunctionScope(
parent=scope, param_names=self.params,
arguments=args, closure_scope=self.scope)
result = interpreter(self.code, local_scope)
return result
| {
"content_hash": "1f061fdd6762f2fbba986c32194bf4c6",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 84,
"avg_line_length": 25.00862068965517,
"alnum_prop": 0.5853154084798345,
"repo_name": "vladiibine/whispy_lispy",
"id": "c964e69c274dc962d65e4f4901b14ee242a9baf4",
"size": "2924",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/whispy_lispy/types.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1489"
},
{
"name": "PowerShell",
"bytes": "2986"
},
{
"name": "Python",
"bytes": "99352"
}
],
"symlink_target": ""
} |
"""
Built-in, globally-available admin actions.
"""
from django.core.exceptions import PermissionDenied
from django.contrib import messages
from django.contrib.admin import helpers
from django.contrib.admin.util import get_deleted_objects, model_ngettext
from django.contrib.admin.admin_names import extended_verbose_name
from django.db import router
from django.template.response import TemplateResponse
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy, ugettext as _
def delete_selected(modeladmin, request, queryset):
"""
Default action which deletes the selected objects.
This action first displays a confirmation page whichs shows all the
deleteable objects, or, if the user has no permission one of the related
childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected objects and redirects back to the change list.
"""
opts = modeladmin.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not modeladmin.has_delete_permission(request):
raise PermissionDenied
using = router.db_for_write(modeladmin.model)
# Populate deletable_objects, a data structure of all related objects that
# will also be deleted.
deletable_objects, perms_needed, protected = get_deleted_objects(
queryset, opts, request.user, modeladmin.admin_site, using)
# The user has already confirmed the deletion.
# Do the deletion and return a None to display the change list view again.
if request.POST.get('post'):
if perms_needed:
raise PermissionDenied
n = queryset.count()
if n:
for obj in queryset:
obj_display = force_text(obj)
modeladmin.log_deletion(request, obj, obj_display)
queryset.delete()
modeladmin.message_user(request, _("Successfully deleted %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
}, messages.SUCCESS)
# Return None to display the change list page again.
return None
if len(queryset) == 1:
objects_name = force_text(
extended_verbose_name(request, opts, 'delete')
)
else:
objects_name = force_text(opts.verbose_name_plural)
if perms_needed or protected:
title = _("Cannot delete %(name)s") % {"name": objects_name}
else:
title = _("Are you sure?")
context = {
"title": title,
"objects_name": objects_name,
"deletable_objects": [deletable_objects],
'queryset': queryset,
"perms_lacking": perms_needed,
"protected": protected,
"opts": opts,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
}
# Display the confirmation page
return TemplateResponse(request, modeladmin.delete_selected_confirmation_template or [
"admin/%s/%s/delete_selected_confirmation.html" % (app_label, opts.model_name),
"admin/%s/delete_selected_confirmation.html" % app_label,
"admin/delete_selected_confirmation.html"
], context, current_app=modeladmin.admin_site.name)
delete_selected.short_description = ugettext_lazy("Delete selected %(verbose_name_plural)s")
| {
"content_hash": "4e1aa76b5a0bdadd40872f20f72c604d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 95,
"avg_line_length": 38.15909090909091,
"alnum_prop": 0.6759976176295414,
"repo_name": "yaroslavprogrammer/django",
"id": "7863c05a7425c54443b4c1f87ba844fcd17981f0",
"size": "3358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/admin/actions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "51281"
},
{
"name": "JavaScript",
"bytes": "101148"
},
{
"name": "Python",
"bytes": "8589498"
},
{
"name": "Shell",
"bytes": "12135"
}
],
"symlink_target": ""
} |
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('stein', '0029_auto_20171010_1149'),
]
operations = [
migrations.AlterField(
model_name='glossaryentry',
name='examples',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), help_text="When giving more than one example seperate them with a '|' ( Alt Gr + >-Button).", null=True, size=None, verbose_name='examples'),
),
]
| {
"content_hash": "f609768f260beaa4af55d34b969b1736",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 232,
"avg_line_length": 33.1764705882353,
"alnum_prop": 0.648936170212766,
"repo_name": "GeoMatDigital/django-geomat",
"id": "484050d7a5f5639a63630167db22808bdf477b4e",
"size": "639",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "geomat/stein/migrations/0030_auto_20171015_1154.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "16597"
},
{
"name": "Dockerfile",
"bytes": "1091"
},
{
"name": "HTML",
"bytes": "14474"
},
{
"name": "JavaScript",
"bytes": "31354"
},
{
"name": "Makefile",
"bytes": "371"
},
{
"name": "Python",
"bytes": "197468"
},
{
"name": "Shell",
"bytes": "674"
}
],
"symlink_target": ""
} |
"""Preprocessing code for MetaQA datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import app
from absl import flags
from tqdm import tqdm
FLAGS = flags.FLAGS
flags.DEFINE_string('metaqa_dir', None, 'Base directory for METAQA data.')
flags.DEFINE_string('output_dir', None,
'Base directory to store preprocessed data.')
def add_one_fact(kb, subj, rel, obj):
if subj not in kb:
kb[subj] = dict()
if rel not in kb[subj]:
kb[subj][rel] = set()
kb[subj][rel].add(obj)
def load_kb(kb_filename):
"""Load KB into dictionary.
Args:
kb_filename: kb filename
Returns:
A dictionary {subject: {rel: objects}}
"""
kb = dict()
entities = set()
for line in open(kb_filename):
line = line.strip()
subj, rel, obj = line.split('|')
entities.add(subj.lower())
entities.add(obj.lower())
add_one_fact(kb, subj, rel, obj)
if rel not in ['release_year', 'in_language', 'has_genre', 'has_tags']:
add_one_fact(kb, obj, rel + '-inv', subj)
return kb, list(entities)
def get_topic_ent(question):
ent_start = question.find('[') + 1
ent_end = question.find(']')
ent = question[ent_start:ent_end]
return ent
def get_shortest_path(kb, topic_ent, answers, num_hop):
"""Get all shortest path from topic entity to answers.
Args:
kb: dictionary to store the KB {subject: {relation: objects}}
topic_ent: topic entity
answers: a set of answers
num_hop: max number of hops
Returns:
a list of shortest paths
"""
cur_ents = set([(topic_ent, ())])
candidate_chains = set()
for _ in range(num_hop):
next_ents = set()
for ent, prev_path in cur_ents:
prev_path = list(prev_path)
if ent in kb:
for rel, objs in kb[ent].items():
if objs & answers:
candidate_chains.add(tuple(prev_path + [rel]))
next_ents.update([(obj, tuple(prev_path + [rel])) for obj in objs])
cur_ents = next_ents
return [list(chain) for chain in candidate_chains]
def get_intermediate_entities(kb, topic_ent, chain):
"""Get all tail entities of a topic entity following a chain of relations.
Args:
kb: dictionary to store the KB {subject: {relation: objects}}
topic_ent: topic entity to start with
chain: a list of relations to follow
Returns:
a set of tail entities
"""
cur_ents = set([topic_ent])
intermediate_entities = [cur_ents]
for rel in chain:
next_ents = set()
for ent in cur_ents:
if ent in kb and rel in kb[ent]:
objs = kb[ent][rel]
next_ents.update(objs)
next_ents.discard(topic_ent)
cur_ents = next_ents
intermediate_entities.append(cur_ents)
return intermediate_entities
def postprocess_candidate_chains(kb, topic_ent, answers, candidate_chains,
num_hop):
"""Postprocess shortest paths and keep the one that leads to answers.
Args:
kb: dictionary to store the KB {subject: {relation: objects}}
topic_ent: topic entity to start with
answers: a set of answers
candidate_chains: all possible shortest paths
num_hop: max number of hops
Returns:
the best shortest path (None if not exist)
"""
for chain in candidate_chains:
if num_hop == 3:
if len(chain) < 3 or chain[1] != chain[0] + '-inv':
continue
intermediate_entities = get_intermediate_entities(kb, topic_ent, chain)
if intermediate_entities[-1] == answers:
return chain, intermediate_entities
return None, None
def _link_entity_list(entity_list, entity2id):
new_list = []
for item in entity_list:
new_list.append({
'text': item,
'kb_id': entity2id[item.lower()],
})
return new_list
def _link_question(question, entity2id):
"""Add entity links for this question."""
question['answers'] = _link_entity_list(question['answers'], entity2id)
question['intermediate_entities'] = [
_link_entity_list(el, entity2id)
for el in question['intermediate_entities']
]
question['entities'] = _link_entity_list(question['question_entities'],
entity2id)
del question['question_entities']
question['question'] = question['question'].replace(
'__ent__', question['entities'][0]['text'])
return question
def preprocess_metaqa(kb, entity2id, data_in_filename, data_out_filename,
num_hop):
"""Runner of the preprocessing code.
Args:
kb: kb dict
entity2id: entity to int id
data_in_filename: input filename
data_out_filename: output filename
num_hop: num hop
"""
num_found = 0
num_data = 0
with open(data_in_filename) as f_in, open(data_out_filename, 'w') as f_out:
for line in tqdm(f_in):
num_data += 1
line = line.strip()
question, answers_str = line.split('\t')
topic_ent = get_topic_ent(question)
answers = set(answers_str.split('|'))
candidate_chains = get_shortest_path(kb, topic_ent, answers, num_hop)
best_chain, intermediate_entities = postprocess_candidate_chains(
kb, topic_ent, answers, candidate_chains, num_hop)
num_found += int(best_chain is not None)
out_example = {
'question': question.replace('[' + topic_ent + ']', '__ent__'),
'answers': list(answers),
'entities': _link_entity_list([topic_ent], entity2id),
'intermediate_entities': [list(es) for es in intermediate_entities] \
+ [list()] * (num_hop - len(intermediate_entities)),
'inference_chains': best_chain,
}
f_out.write('%s\n' % json.dumps(out_example))
print('shortest path found: %d / %d' % (num_found, num_data))
def main(_):
kb, entities = load_kb(os.path.join(FLAGS.metaqa_dir, 'kb.txt'))
entity2id = {ee: ii for ii, ee in enumerate(entities)}
with open(os.path.join(FLAGS.output_dir, 'entities.txt'), 'w') as f:
f.write('\n'.join(entities))
for num_hop in range(1, 4):
for split in ['train', 'dev', 'test']:
data_in_filename = os.path.join(
FLAGS.metaqa_dir, '%d-hop/vanilla/qa_%s.txt' % (num_hop, split))
data_out_filename = os.path.join(FLAGS.output_dir,
'%d-hop/%s.json' % (num_hop, split))
preprocess_metaqa(kb, entity2id, data_in_filename, data_out_filename,
num_hop)
if __name__ == '__main__':
app.run(main)
| {
"content_hash": "404deed0614c66bf7376c8bc80724988",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 29.170403587443946,
"alnum_prop": 0.623674096848578,
"repo_name": "google-research/language",
"id": "7ca3c034bede703686a12c1dd1b2e249392a079c",
"size": "7120",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "language/labs/drkit/metaqa/preprocessing/metaqa_preprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9834"
},
{
"name": "CSS",
"bytes": "602"
},
{
"name": "HTML",
"bytes": "25162"
},
{
"name": "JavaScript",
"bytes": "8857"
},
{
"name": "Jupyter Notebook",
"bytes": "1505066"
},
{
"name": "Python",
"bytes": "7139472"
},
{
"name": "Shell",
"bytes": "183709"
}
],
"symlink_target": ""
} |
print('This is another demo')
print("It's supposed to test git") | {
"content_hash": "5230106b3615ffd680531d5d060c40be",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 34,
"avg_line_length": 32,
"alnum_prop": 0.734375,
"repo_name": "erc7as/cs3240-labdemo",
"id": "f833c78bed919a59a2af6d766bf9f325fd42757a",
"size": "64",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "print.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "338"
}
],
"symlink_target": ""
} |
"""
This module contains the Adaptive Dynamic Event Tree and
the Adaptive Hybrid Dynamic Event Tree sampling strategies
Created on May 21, 2016
@author: alfoa
supercedes Samplers.py from alfoa
"""
#for future compatibility with Python 3--------------------------------------------------------------
from __future__ import division, print_function, unicode_literals, absolute_import
#End compatibility block for Python 3----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
import sys
import copy
import numpy as np
from operator import mul
from functools import reduce
import xml.etree.ElementTree as ET
import itertools
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from .DynamicEventTree import DynamicEventTree
from .LimitSurfaceSearch import LimitSurfaceSearch
from ..utils import utils
from ..utils import TreeStructure as ETS
#Internal Modules End--------------------------------------------------------------------------------
class AdaptiveDynamicEventTree(DynamicEventTree, LimitSurfaceSearch):
"""
This class is aimed to perform a supervised Adaptive Dynamic Event Tree sampling strategy
"""
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
inputSpecification = super(AdaptiveDynamicEventTree, cls).getInputSpecification()
return inputSpecification
def __init__(self):
"""
Default Constructor that will initialize member variables with reasonable
defaults or empty lists/dictionaries where applicable.
@ In, None
@ Out, None
"""
DynamicEventTree.__init__(self) # init DET
LimitSurfaceSearch.__init__(self) # init Adaptive
self.detAdaptMode = 1 # Adaptive Dynamic Event Tree method (=1 -> DynamicEventTree as hybridsampler and subsequent LimitSurfaceSearch,=2 -> DynamicEventTree online adaptive)
self.noTransitionStrategy = 1 # Strategy in case no transitions have been found by DET (1 = 'Probability MC', 2 = Increase the grid exploration)
self.insertAdaptBPb = True # Add Probabability THs requested by adaptive in the initial grid (default = False)
self.startAdaptive = False # Flag to trigger the begin of the adaptive limit surface search
self.adaptiveReady = False # Flag to store the response of the LimitSurfaceSearch.localStillReady method
self.investigatedPoints = [] # List containing the points that have been already investigated
self.completedHistCnt = 1 # Counter of the completed histories
self.hybridDETstrategy = None # Integer flag to turn the hybrid strategy on:
# None -> No hybrid approach,
# 1 -> the epistemic variables are going to be part of the limit surface search
# 2 -> the epistemic variables are going to be treated by a normal hybrid DET approach and the LimitSurface search
# will be performed on each epistemic tree (n LimitSurfaces)
self.foundEpistemicTree = False # flag that testifies if an epistemic tree has been found (Adaptive Hybrid DET)
self.actualHybridTree = '' # name of the root tree used in self.hybridDETstrategy=2 to check which Tree needs to be used for the current LS search
self.sortedListOfHists = [] # sorted list of histories
@staticmethod
def _checkIfRunning(treeValues):
"""
Static method (no self) that checks if a job is running
@ In, treeValues, TreeStructure.Node, the node in which the running info are stored
@ Out, _checkIfRunning, bool, is it running?
"""
return not treeValues['runEnded']
@staticmethod
def _checkEnded(treeValues):
"""
Static method (no self) that checks if a job finished to run
@ In, treeValues, TreeStructure.Node, the node in which the running info are stored
@ Out, _checkEnded, bool, is it finished?
"""
return treeValues['runEnded']
@staticmethod
def _checkCompleteHistory(treeValues):
"""
Static method (no self) that checks if a 'branch' represents a completed history
@ In, treeValues, TreeStructure.Node, the node in which the running info are stored
@ Out, _checkCompleteHistory, bool, is it a completed history (hit the last threshold?)
"""
return treeValues['completedHistory']
def _localWhatDoINeed(self):
"""
This method is a local mirror of the general whatDoINeed method.
It is implemented by the samplers that need to request special objects
@ In, None
@ Out, needDict, dict, dictionary listing needed objects
"""
#adaptNeedInst = self.limitSurfaceInstances.values()[-1]._localWhatDoINeed()
needDict = dict(itertools.chain(LimitSurfaceSearch._localWhatDoINeed(self).items(),DynamicEventTree._localWhatDoINeed(self).items()))
return needDict
def _checkIfStartAdaptive(self):
"""
Function that checks if the adaptive needs to be started (mode 1)
@ In, None
@ Out, None
"""
if not self.startAdaptive:
self.startAdaptive = True
if len(self.lastOutput) == 0:
self.startAdaptive = False
return
for treer in self.TreeInfo.values():
for _ in treer.iterProvidedFunction(self._checkIfRunning):
self.startAdaptive = False
break
if not self.startAdaptive:
break
def _checkClosestBranch(self):
"""
Function that checks the closest branch already evaluated
@ In, None
@ Out, returnTuple, tuple, closest branch info:
- if self.hybridDETstrategy and branch found -> returnTuple = (valBranch,cdfValues,treer)
- if self.hybridDETstrategy and branch not found -> returnTuple = (None,cdfValues,treer)
- if not self.hybridDETstrategy and branch found -> returnTuple = (valBranch,cdfValues)
- if not self.hybridDETstrategy and branch not found -> returnTuple = (None,cdfValues)
"""
from sklearn import neighbors
# compute cdf of sampled vars
lowerCdfValues = {}
cdfValues = {}
self.raiseADebug("Check for closest branch:")
self.raiseADebug("_"*50)
for key,value in self.values.items():
self.raiseADebug("Variable name : "+str(key))
self.raiseADebug("Distribution name: "+str(self.toBeSampled[key]))
if key not in self.epistemicVariables.keys():
cdfValues[key] = self.distDict[key].cdf(value)
try:
index = utils.first(np.atleast_1d(np.asarray(self.branchProbabilities[key]) <= cdfValues[key]).nonzero())[-1]
val = self.branchProbabilities[key][index]
except (ValueError, IndexError):
val = None
lowerCdfValues[key] = val
self.raiseADebug("CDF value : "+str(cdfValues[key]))
self.raiseADebug("Lower CDF found : "+str(lowerCdfValues[key]))
self.raiseADebug("_"*50)
#if hybrid DET, we need to find the correct tree that matches the values of the epistemic
if self.hybridDETstrategy is not None:
self.foundEpistemicTree, treer, compareDict = False, None, dict.fromkeys(self.epistemicVariables.keys(),False)
for tree in self.TreeInfo.values():
epistemicVars = tree.getrootnode().get("hybridsamplerCoordinate")[0]['SampledVars']
for key in self.epistemicVariables.keys():
compareDict[key] = utils.compare(epistemicVars[key],self.values[key])
if all(compareDict.values()):
# we found the right epistemic tree
self.foundEpistemicTree, treer = True, tree
break
else:
treer = utils.first(self.TreeInfo.values())
# check if in the adaptive points already explored (if not push into the grid)
if not self.insertAdaptBPb:
candidatesBranch = []
# check if adaptive point is better choice -> TODO: improve efficiency
for invPoint in self.investigatedPoints:
pbth = [invPoint[self.toBeSampled[key]] for key in cdfValues.keys()]
if all(i <= pbth[cnt] for cnt,i in enumerate(cdfValues.values())):
candidatesBranch.append(invPoint)
if len(candidatesBranch) > 0:
if None in lowerCdfValues.values():
lowerCdfValues = candidatesBranch[0]
for invPoint in candidatesBranch:
pbth = [invPoint[self.toBeSampled[key]] for key in cdfValues.keys()]
if all(i >= pbth[cnt] for cnt,i in enumerate(lowerCdfValues.values())):
lowerCdfValues = invPoint
# Check if The adaptive point requested is outside the so far run grid; in case return None
# In addition, if Adaptive Hybrid DET, if treer is None, we did not find any tree
# in the epistemic space => we need to create another one
if None in lowerCdfValues.values() or treer is None:
if self.hybridDETstrategy is not None:
returnTuple = None, cdfValues, treer
else:
returnTuple = None, cdfValues
return returnTuple
nntrain, mapping = None, {}
for ending in treer.iterProvidedFunction(self._checkEnded):
#already ended branches, create training set for nearest algorithm (take coordinates <= of cdfValues) -> TODO: improve efficiency
pbth = [ending.get('SampledVarsPb')[key] for key in lowerCdfValues.keys()]
if all(pbth[cnt] <= i for cnt,i in enumerate(lowerCdfValues.values())):
if nntrain is None:
nntrain = np.zeros((1,len(cdfValues.keys())))
nntrain[0,:] = np.array(copy.copy(pbth))
else:
nntrain = np.concatenate((nntrain,np.atleast_2d(np.array(copy.copy(pbth)))),axis=0)
mapping[nntrain.shape[0]] = ending
if nntrain is not None:
neigh = neighbors.NearestNeighbors(n_neighbors=len(mapping.keys()))
neigh.fit(nntrain)
valBranch = self._checkValidityOfBranch(neigh.kneighbors([list(lowerCdfValues.values())]),mapping)
if self.hybridDETstrategy is not None:
returnTuple = valBranch,cdfValues,treer
else:
returnTuple = valBranch,cdfValues
return returnTuple
else:
returnTuple = (None,cdfValues,treer) if self.hybridDETstrategy is not None else (None,cdfValues)
return returnTuple
def _checkValidityOfBranch(self,branchSet,mapping):
"""
Function that checks if the nearest branches found by method _checkClosestBranch are valid
@ In, branchSet, tuple, tuple of branches
@ In, mapping, dict, dictionary of candidate branches
@ Out, validBranch, TreeStructure.Node, most valid branch (if not found, return None)
"""
validBranch = None
idOfBranches = branchSet[1][-1]
for closestBranch in idOfBranches:
if not mapping[closestBranch+1].get('completedHistory') and not mapping[closestBranch+1].get('happenedEvent'):
validBranch = mapping[closestBranch+1]
break
return validBranch
def _retrieveBranchInfo(self,branch):
"""
Function that retrieves the key information from a branch to start a newer calculation
@ In, branch, TreeStructure.Node, the branch to inquire
@ Out, info, dict, the dictionary with information on the inputted branch
"""
info = branch.getValues()
info['actualBranchOnLevel'] = branch.numberBranches()
info['parentNode'] = branch
return info
def _constructEndInfoFromBranch(self,model, myInput, info, cdfValues):
"""
Method to construct the end information from the 'info' inputted
@ In, model, Models object, the model that is used to explore the input space (e.g. a code, like RELAP-7)
@ In, myInput, list, list of inputs for the Models object (passed through the Steps XML block)
@ In, info, dict, dictionary of information at the end of a branch (information collected by the method _retrieveBranchInfo)
@ In, cdfValues, dict, dictionary of CDF thresholds reached by the branch that just ended.
@ Out, None
"""
endInfo = info['parentNode'].get('endInfo')
#del self.inputInfo
self.counter += 1
self.branchCountOnLevel = info['actualBranchOnLevel']+1
# Get Parent node name => the branch name is creating appending to this name a comma and self.branchCountOnLevel counter
rname = info['parentNode'].get('name') + '-' + str(self.branchCountOnLevel)
info['parentNode'].add('completedHistory', False)
self.raiseADebug(str(rname))
bcnt = self.branchCountOnLevel
while info['parentNode'].isAnActualBranch(rname):
bcnt += 1
rname = info['parentNode'].get('name') + '-' + str(bcnt)
# create a subgroup that will be appended to the parent element in the xml tree structure
subGroup = ETS.HierarchicalNode(rname)
subGroup.add('parent', info['parentNode'].get('name'))
subGroup.add('name', rname)
self.raiseADebug('cond pb = '+str(info['parentNode'].get('conditionalPb')))
condPbC = float(info['parentNode'].get('conditionalPb'))
# Loop over branchChangedParams (events) and start storing information,
# such as conditional pb, variable values, into the xml tree object
branchChangedParamValue = []
branchChangedParamPb = []
branchParams = []
if endInfo:
for key in endInfo['branchChangedParams'].keys():
branchParams.append(key)
branchChangedParamPb.append(endInfo['branchChangedParams'][key]['associatedProbability'][0])
branchChangedParamValue.append(endInfo['branchChangedParams'][key]['oldValue'][0])
subGroup.add('branchChangedParam',branchParams)
subGroup.add('branchChangedParamValue',branchChangedParamValue)
subGroup.add('branchChangedParamPb',branchChangedParamPb)
# add conditional probability
subGroup.add('conditionalPb',condPbC)
# add initiator distribution info, start time, etc.
subGroup.add('startTime', info['parentNode'].get('endTime'))
# initialize the endTime to be equal to the start one... It will modified at the end of this branch
subGroup.add('endTime', info['parentNode'].get('endTime'))
# add the branchedLevel dictionary to the subgroup
# branch calculation info... running, queue, etc are set here
subGroup.add('runEnded',False)
subGroup.add('running',False)
subGroup.add('queue',True)
subGroup.add('completedHistory', False)
subGroup.add('happenedEvent', True)
subGroup.add('triggeredVariable',info['parentNode'].get('triggeredVariable'))
# Append the new branch (subgroup) info to the parentNode in the tree object
info['parentNode'].appendBranch(subGroup)
# Fill the values dictionary that will be passed into the model in order to create an input
# In this dictionary the info for changing the original input is stored
self.inputInfo.update({'prefix':rname,'endTimeStep':info['parentNode'].get('actualEndTimeStep'),
'branchChangedParam':subGroup.get('branchChangedParam'),
'branchChangedParamValue':subGroup.get('branchChangedParamValue'),
'conditionalPb':subGroup.get('conditionalPb'),
'startTime':info['parentNode'].get('endTime'),
'happenedEvent':subGroup.get('happenedEvent'),
'triggeredVariable':subGroup.get('triggeredVariable'),
'RAVEN_parentID':subGroup.get('parent'),
'RAVEN_isEnding':True})
# add the newer branch name to the map
self.rootToJob[rname] = self.rootToJob[subGroup.get('parent')]
# check if it is a preconditioned DET sampling, if so add the relative information
# it exists only in case an hybridDET strategy is activated
precSampled = info['parentNode'].get('hybridsamplerCoordinate')
if precSampled:
self.inputInfo['hybridsamplerCoordinate' ] = copy.deepcopy(precSampled)
subGroup.add('hybridsamplerCoordinate', copy.copy(precSampled))
# The probability Thresholds are stored here in the cdfValues dictionary... We are sure that they are whitin the ones defined in the grid
# check is not needed
self.inputInfo['initiatorDistribution' ] = [self.toBeSampled[key] for key in cdfValues.keys()]
self.inputInfo['PbThreshold' ] = list(cdfValues.values())
self.inputInfo['ValueThreshold' ] = [self.distDict[key].ppf(value) for key,value in cdfValues.items()]
self.inputInfo['SampledVars' ] = {}
self.inputInfo['SampledVarsPb' ] = {}
for varname in self.standardDETvariables:
self.inputInfo['SampledVars' ][varname] = self.distDict[varname].ppf(cdfValues[varname])
self.inputInfo['SampledVarsPb'][varname] = cdfValues[varname]
# constant variables
self._constantVariables()
if precSampled:
for precSample in precSampled:
self.inputInfo['SampledVars' ].update(precSample['SampledVars'])
self.inputInfo['SampledVarsPb'].update(precSample['SampledVarsPb'])
pointPb = reduce(mul,[it for sub in [pre['SampledVarsPb'].values() for pre in precSampled ] for it in sub] if precSampled else [1.0])
self.inputInfo['PointProbability' ] = pointPb*subGroup.get('conditionalPb')
self.inputInfo['ProbabilityWeight'] = self.inputInfo['PointProbability' ]
self.inputInfo.update({'ProbabilityWeight-'+key.strip():value for key,value in self.inputInfo['SampledVarsPb'].items()})
# add additional edits if needed
model.getAdditionalInputEdits(self.inputInfo)
# Add the new input path into the RunQueue system
newInputs = {'args':[str(self.type)], 'kwargs': dict(self.inputInfo)}
self.RunQueue['queue'].append(newInputs)
self.RunQueue['identifiers'].append(self.inputInfo['prefix'])
for key,value in self.inputInfo.items():
subGroup.add(key,copy.copy(value))
if endInfo:
subGroup.add('endInfo',copy.deepcopy(endInfo))
def localStillReady(self,ready):
"""
first perform some check to understand what it needs to be done possibly perform an early return
ready is returned
@ In, ready, bool, a boolean representing whether the caller is prepared for another input.
@ Out, ready, bool, a boolean representing whether the caller is prepared for another input.
"""
if self.counter == 0:
return True
if len(self.RunQueue['queue']) != 0:
detReady = True
else:
detReady = False
# since the RunQueue is empty, let's check if there are still branches running => if not => start the adaptive search
self._checkIfStartAdaptive()
if self.startAdaptive:
data = self.lastOutput.asDataset()
endingData = data.where(data['RAVEN_isEnding']==True,drop=True)
numCompletedHistories = len(endingData['RAVEN_isEnding'])
if numCompletedHistories > self.completedHistCnt:
lastOutDict = {key:endingData[key].values for key in endingData.keys()}
if numCompletedHistories > self.completedHistCnt:
actualLastOutput = self.lastOutput
self.lastOutput = copy.deepcopy(lastOutDict)
ready = LimitSurfaceSearch.localStillReady(self,ready)
self.lastOutput = actualLastOutput
self.completedHistCnt = numCompletedHistories
self.raiseAMessage("Completed full histories are "+str(self.completedHistCnt))
else:
ready = False
self.adaptiveReady = ready
if ready or detReady:
return True
else:
return False
return detReady
def localGenerateInput(self,model,myInput):
"""
Function to select the next most informative point for refining the limit
surface search.
After this method is called, the self.inputInfo should be ready to be sent
to the model
@ In, model, model instance, an instance of a model
@ In, myInput, list, a list of the original needed inputs for the model (e.g. list of files, etc.)
@ Out, None
"""
if self.startAdaptive == True and self.adaptiveReady == True:
LimitSurfaceSearch.localGenerateInput(self,model,myInput)
#the adaptive sampler created the next point sampled vars
#find the closest branch
if self.hybridDETstrategy is not None:
closestBranch, cdfValues, treer = self._checkClosestBranch()
else:
closestBranch, cdfValues = self._checkClosestBranch()
if closestBranch is None:
self.raiseADebug('An usable branch for next candidate has not been found => create a parallel branch!')
# add pbthresholds in the grid
investigatedPoint = {}
for key,value in cdfValues.items():
try:
ind = utils.first(np.atleast_1d(np.asarray(self.branchProbabilities[key]) <= value).nonzero())[-1]
except (IndexError, ValueError):
ind = 0
if value not in self.branchProbabilities[key]:
self.branchProbabilities[key].insert(ind,value)
self.branchValues[key].insert(ind,self.distDict[key].ppf(value))
investigatedPoint[key] = value
# collect investigated point
self.investigatedPoints.append(investigatedPoint)
if closestBranch:
info = self._retrieveBranchInfo(closestBranch)
self._constructEndInfoFromBranch(model, myInput, info, cdfValues)
else:
# create a new tree, since there are no branches that are close enough to the adaptive request
elm = ETS.HierarchicalNode(self.name + '_' + str(len(self.TreeInfo.keys())+1))
elm.add('name', self.name + '_'+ str(len(self.TreeInfo.keys())+1))
elm.add('startTime', 0.0)
# Initialize the endTime to be equal to the start one...
# It will modified at the end of each branch
elm.add('endTime', 0.0)
elm.add('runEnded',False)
elm.add('running',True)
elm.add('queue',False)
elm.add('completedHistory', False)
branchedLevel = {}
for key,value in cdfValues.items():
branchedLevel[key] = utils.first(np.atleast_1d(np.asarray(self.branchProbabilities[key]) == value).nonzero())[-1]
# The dictionary branchedLevel is stored in the xml tree too. That's because
# the advancement of the thresholds must follow the tree structure
elm.add('branchedLevel', branchedLevel)
if self.hybridDETstrategy is not None and not self.foundEpistemicTree:
# adaptive hybrid DET and not found a tree in the epistemic space
# take the first tree and modify the hybridsamplerCoordinate
hybridSampled = copy.deepcopy(utils.first(self.TreeInfo.values()).getrootnode().get('hybridsamplerCoordinate'))
for hybridStrategy in hybridSampled:
for key in self.epistemicVariables.keys():
if key in hybridStrategy['SampledVars'].keys():
self.raiseADebug("epistemic var " + str(key)+" value = "+str(self.values[key]))
hybridStrategy['SampledVars'][key] = copy.copy(self.values[key])
hybridStrategy['SampledVarsPb'][key] = self.distDict[key].pdf(self.values[key])
hybridStrategy['prefix'] = len(self.TreeInfo.values())+1
# TODO: find a strategy to recompute the probability weight here (for now == PointProbability)
hybridStrategy['PointProbability'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())
hybridStrategy['ProbabilityWeight'] = reduce(mul, self.inputInfo['SampledVarsPb'].values())
elm.add('hybridsamplerCoordinate', hybridSampled)
self.inputInfo.update({'ProbabilityWeight-'+key.strip():value for key,value in self.inputInfo['SampledVarsPb'].items()})
# Here it is stored all the info regarding the DET => we create the info for all the branchings and we store them
self.TreeInfo[self.name + '_' + str(len(self.TreeInfo.keys())+1)] = ETS.HierarchicalTree(elm)
self._createRunningQueueBeginOne(self.TreeInfo[self.name + '_' + str(len(self.TreeInfo.keys()))],branchedLevel, model,myInput)
return DynamicEventTree.localGenerateInput(self,model,myInput)
def localInputAndChecks(self,xmlNode, paramInput):
"""
Class specific xml inputs will be read here and checked for validity.
@ In, xmlNode, xml.etree.ElementTree.Element, The xml element node that will be checked against the available options specific to this Sampler.
@ In, paramInput, InputData.ParameterInput, the parsed parameters
@ Out, None
"""
#TODO remove using xmlNode
#check if the hybrid DET has been activated, in case remove the nodes and treat them separaterly
hybridNodes = xmlNode.findall("HybridSampler")
if len(hybridNodes) != 0:
# check the type of hybrid that needs to be performed
limitSurfaceHybrid = False
for elm in hybridNodes:
samplType = elm.attrib['type'] if 'type' in elm.attrib.keys() else None
if samplType == 'LimitSurface':
if len(hybridNodes) != 1:
self.raiseAnError(IOError,'if one of the HybridSampler is of type "LimitSurface", it can not be combined with other strategies. Only one HybridSampler node can be inputted!')
limitSurfaceHybrid = True
if limitSurfaceHybrid == True:
#remove the elements from original xmlNode and check if the types are compatible
for elm in hybridNodes:
xmlNode.remove(elm)
self.hybridDETstrategy = 1
else:
self.hybridDETstrategy = 2
if self.hybridDETstrategy == 2:
self.raiseAnError(IOError, 'The sheaf of LSs for the Adaptive Hybrid DET is not yet available. Use type "LimitSurface"!')
DynamicEventTree.localInputAndChecks(self,xmlNode, paramInput)
# now we put back the nodes into the xmlNode to initialize the LimitSurfaceSearch with those variables as well
for elm in hybridNodes:
for child in elm:
if limitSurfaceHybrid == True:
xmlNode.append(child)
if child.tag in ['variable','Distribution']:
self.epistemicVariables[child.attrib['name']] = None
LimitSurfaceSearch._readMoreXMLbase(self,xmlNode)
LimitSurfaceSearch.localInputAndChecks(self,xmlNode, paramInput)
if 'mode' in xmlNode.attrib.keys():
if xmlNode.attrib['mode'].lower() == 'online':
self.detAdaptMode = 2
elif xmlNode.attrib['mode'].lower() == 'post':
self.detAdaptMode = 1
else:
self.raiseAnError(IOError,'unknown mode ' + xmlNode.attrib['mode'] + '. Available are "online" and "post"!')
if 'noTransitionStrategy' in xmlNode.attrib.keys():
if xmlNode.attrib['noTransitionStrategy'].lower() == 'mc':
self.noTransitionStrategy = 1
elif xmlNode.attrib['noTransitionStrategy'].lower() == 'grid':
self.noTransitionStrategy = 2
else:
self.raiseAnError(IOError,'unknown noTransitionStrategy '+xmlNode.attrib['noTransitionStrategy']+'. Available are "mc" and "grid"!')
if 'updateGrid' in xmlNode.attrib.keys():
if utils.stringIsTrue(xmlNode.attrib['updateGrid']):
self.insertAdaptBPb = True
# we add an artificial threshold because I need to find a way to prepend a rootbranch into a Tree object
for val in self.branchProbabilities.values():
if min(val) != 1e-3:
val.insert(0, 1e-3)
def _generateDistributions(self,availableDist,availableFunc):
"""
Generates the distributions and functions.
@ In, availDist, dict, dict of distributions
@ In, availableFunc, dict, dict of functions
@ Out, None
"""
DynamicEventTree._generateDistributions(self,availableDist,availableFunc)
def localInitialize(self,solutionExport = None):
"""
Will perform all initialization specific to this Sampler. For instance,
creating an empty container to hold the identified surface points, error
checking the optionally provided solution export and other preset values,
and initializing the limit surface Post-Processor used by this sampler.
@ In, solutionExport, DataObjects, optional, a PointSet to hold the solution (a list of limit surface points)
@ Out, None
"""
if self.detAdaptMode == 2:
self.startAdaptive = True
# we first initialize the LimitSurfaceSearch sampler
LimitSurfaceSearch.localInitialize(self,solutionExport=solutionExport)
if self.hybridDETstrategy is not None:
# we are running an adaptive hybrid DET and not only an adaptive DET
if self.hybridDETstrategy == 1:
gridVector = self.limitSurfacePP.gridEntity.returnParameter("gridVectors")
# construct an hybrid DET through an XML node
distDict, xmlNode = {}, ET.fromstring('<InitNode> <HybridSampler type="Grid" name="none"/> </InitNode>')
for varName, dist in self.distDict.items():
if varName.replace('<distribution>','') in self.epistemicVariables.keys():
# found an epistemic
varNode = ET.Element('Distribution' if varName.startswith('<distribution>') else 'variable',{'name':varName.replace('<distribution>','')})
varNode.append(ET.fromstring("<distribution>"+dist.name.strip()+"</distribution>"))
distDict[dist.name.strip()] = self.distDict[varName]
varNode.append(ET.fromstring('<grid construction="custom" type="value">'+' '.join([str(elm) for elm in utils.first(gridVector.values())[varName.replace('<distribution>','')]])+'</grid>'))
xmlNode.find("HybridSampler").append(varNode)
#TODO, need to pass real paramInput
self._localInputAndChecksHybrid(xmlNode, paramInput=None)
for hybridsampler in self.hybridStrategyToApply.values():
hybridsampler._generateDistributions(distDict, {})
DynamicEventTree.localInitialize(self)
if self.hybridDETstrategy == 2:
self.actualHybridTree = utils.first(self.TreeInfo.keys())
self._endJobRunnable = sys.maxsize
def generateInput(self,model,oldInput):
"""
This method has to be overwritten to provide the specialization for the specific sampler
The model instance in might be needed since, especially for external codes,
only the code interface possesses the dictionary for reading the variable definition syntax
@ In, model, model instance, it is the instance of a RAVEN model
@ In, oldInput, list, a list of the original needed inputs for the model (e.g. list of files, etc. etc)
@ Out, generateInput, tuple(0,list), list containing the new inputs -in reality it is the model that returns this; the Sampler generates the value to be placed in the input of the model.
"""
return DynamicEventTree.generateInput(self, model, oldInput)
def localFinalizeActualSampling(self,jobObject,model,myInput):
"""
General function (available to all samplers) that finalize the sampling
calculation just ended. In this case (DET), The function reads the
information from the ended calculation, updates the working variables, and
creates the new inputs for the next branches
@ In, jobObject, instance, an instance of a JobHandler
@ In, model, model instance, it is the instance of a RAVEN model
@ In, myInput, list, the generating input
@ Out, None
"""
returncode = DynamicEventTree.localFinalizeActualSampling(self,jobObject,model,myInput,genRunQueue=False)
forceEvent = True if self.startAdaptive else False
if returncode:
self._createRunningQueue(model,myInput, forceEvent)
| {
"content_hash": "ee17354a6ca864da9cbf35d380a561d9",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 199,
"avg_line_length": 53.15358931552588,
"alnum_prop": 0.6764973774301957,
"repo_name": "idaholab/raven",
"id": "e37ee36e5528c27508acea83e6fd4671e1fcb329",
"size": "32428",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "ravenframework/Samplers/AdaptiveDynamicEventTree.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1556316"
},
{
"name": "Batchfile",
"bytes": "1095"
},
{
"name": "C",
"bytes": "148504"
},
{
"name": "C++",
"bytes": "48279546"
},
{
"name": "CMake",
"bytes": "9998"
},
{
"name": "Jupyter Notebook",
"bytes": "84202"
},
{
"name": "MATLAB",
"bytes": "202335"
},
{
"name": "Makefile",
"bytes": "2399"
},
{
"name": "Perl",
"bytes": "1297"
},
{
"name": "Python",
"bytes": "7004752"
},
{
"name": "R",
"bytes": "67"
},
{
"name": "SWIG",
"bytes": "8622"
},
{
"name": "Shell",
"bytes": "124289"
},
{
"name": "TeX",
"bytes": "479725"
}
],
"symlink_target": ""
} |
import sys
from shutil import copy2
from copy import deepcopy
from collections import namedtuple
from macholib.MachO import MachO
from macholib.ptypes import sizeof
from macholib.mach_o import *
from .common.macho_helpers import modify_macho_file_headers
def insert_load_command(target_path, library_install_name):
""" Inserts a new LC_LOAD_DYLIB load command into the target Mach-O header.
Note: the target file will be overwritten. Consider backing it up first before calling this function.
Returns True if everything is OK. Otherwise rises an exception.
"""
def patchHeader(t):
load_command = generate_dylib_load_command(t, library_install_name)
return insert_load_command_into_header(t, load_command)
return modify_macho_file_headers(target_path, patchHeader)
def macho_dependencies_list(target_path, header_magic=None):
""" Generates a list of libraries the given Mach-O file depends on.
In that list a single library is represented by its "install path": for some
libraries it would be a full file path, and for others it would be a relative
path (sometimes with dyld templates like @executable_path or @rpath in it).
Note: I don't know any reason why would some architectures of a fat Mach-O depend
on certain libraries while others don't, but *it's technically possible*.
So that's why you may want to specify the `header_magic` value for a particular header.
Returns an object with two properties: `weak` and `strong` that hold lists of weak
and strong dependencies respectively.
"""
MachODeprendencies = namedtuple("MachODeprendecies", "weak strong")
# Convert the magic value into macholib representation if needed
if isinstance(header_magic, basestring):
header_magic = _MH_MAGIC_from_string(header_magic)
macho = MachO(target_path)
# Obtain a list of headers for the required magic value (if any)
suggestions = filter(lambda t: t.header.magic == header_magic
or # just add all headers if user didn't specifiy the magic
header_magic == None, macho.headers)
header = None if len(suggestions) <= 0 else suggestions[0]
# filter() above *always* returns a list, so we have to check if it's empty
if header is None:
raise Exception("Unable to find a header for the given MAGIC value in that Mach-O file")
return None
def decodeLoadCommandData(data):
# Also ignore trailing zeros
return data[:data.find(b"\x00")].decode(sys.getfilesystemencoding())
def strongReferencesFromHeader(h):
# List of LC_LOAD_DYLIB commands
list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_DYLIB, h.commands)
# Their contents (aka data) as a file path
return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list)
def weakReferencesFromHeader(h):
list = filter(lambda (lc,cmd,data): lc.cmd == LC_LOAD_WEAK_DYLIB, h.commands)
return map(lambda (lc,cmd,data): decodeLoadCommandData(data), list)
strongRefs = strongReferencesFromHeader(header)
weakRefs = weakReferencesFromHeader(header)
return MachODeprendencies(weak = weakRefs, strong = strongRefs)
def insert_load_command_into_header(header, load_command):
""" Inserts the given load command into the header and adjust its size. """
lc, cmd, path = load_command
header.commands.append((lc, cmd, path))
header.header.ncmds += 1
header.changedHeaderSizeBy(lc.cmdsize)
def generate_dylib_load_command(header, libary_install_name):
""" Generates a LC_LOAD_DYLIB command for the given header and a library install path.
Note: the header must already contain at least one LC_LOAD_DYLIB command (see code comments).
Returns a ready-for-use load_command in terms of macholib.
"""
# One can not simply create instances of `dylib_command` and `load_command` classes,
# because that's just not the way macholib works. If we try then all we'll get is a bunch
# of endian (big/little) issues when these objects are serialized into a file.
# BUT THAT'S PROGRAMMING RIGHT?
# So instead I'll iterate *existing* load commands, find a dyld_command, copy it
# and modify this copy. This existing command is said to be fully initialized.
lc = None
cmd = None
for (command, internal_cmd, data) in header.commands:
if (command.cmd == LC_LOAD_DYLIB) and isinstance(internal_cmd, dylib_command):
lc = deepcopy(command)
cmd = deepcopy(internal_cmd)
break
if not lc or not cmd:
raise Exception("Invalid Mach-O file. I mean, there must be at least one LC_LOAD_DYLIB load command.")
return None
# Well, now we just replace everything with our own stuff
cmd.timestamp = 0
cmd.current_version = cmd.compatibility_version = 0x1000
# Since we store the library's path just after the load command itself, we need to find out it's offset.
base = sizeof(load_command) + sizeof(dylib_command)
# `name` is rather bad name for this property: actually it means a path string offset
cmd.name = base
# Also the whole thing must be aligned by 4 bytes on 32-bit arches and by 8 bytes on 64-bit arches
align = 4 if header.header.magic == MH_MAGIC else 8
aligned_name = libary_install_name + (b'\x00' * (align - (len(libary_install_name) % align)))
# So now we finally can say what size this load_command is
lc.cmdsize = base + len(aligned_name)
return (lc, cmd, aligned_name)
def _MH_MAGIC_from_string(str):
return {
'MH_MAGIC' : MH_MAGIC,
'MH_MAGIC_64': MH_MAGIC_64,
}.get(str, None)
| {
"content_hash": "99d35d9293c90a1cb01ebb01baf3901c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 105,
"avg_line_length": 43.2,
"alnum_prop": 0.7375925925925926,
"repo_name": "wxdublin/machobot",
"id": "5af3e5679378420f0c6194e61cc3282830f308a4",
"size": "5623",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "machobot/dylib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1474"
},
{
"name": "Python",
"bytes": "12761"
}
],
"symlink_target": ""
} |
"""
WSGI config for mysite project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mysite.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "252e9cf5ffbe8b4c0bf6e645417879a2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 78,
"avg_line_length": 28,
"alnum_prop": 0.7647058823529411,
"repo_name": "gbriones1/django-skelleton",
"id": "64c7a1fb284aa56acdde351d23d3918341a4fcb4",
"size": "1073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mysite/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "12006"
},
{
"name": "HTML",
"bytes": "67882"
},
{
"name": "JavaScript",
"bytes": "129051"
},
{
"name": "Python",
"bytes": "244344"
},
{
"name": "Shell",
"bytes": "951"
}
],
"symlink_target": ""
} |
""" A lan connect class using udp
"""
__author__ = "Oliver Lindemann <oliver@expyriment.org>"
__version__ = "0.5"
import atexit
import os
import socket
from multiprocessing import Process, Event, Queue
import logging
from .types import UDPData
from .polling_time_profile import PollingTimeProfile
from .process_priority_manager import get_priority
from .timer import Timer, app_timer, get_time_ms
def get_lan_ip():
if os.name != "nt":
# linux
from subprocess import check_output
rtn = check_output("hostname -I".split(" "))
rtn = rtn.decode().split(" ")
return rtn[0].strip()
else:
# windows
# code bas on http://stackoverflow.com/questions/11735821/python-get-localhost-ip
return socket.gethostbyname(socket.gethostname())
class UDPConnection(object):
# DOC document the usage "connecting" "unconnecting"
COMMAND_CHAR = b"$"
CONNECT = COMMAND_CHAR + b"connect"
UNCONNECT = COMMAND_CHAR + b"unconnect"
COMMAND_REPLY = COMMAND_CHAR + b"ok"
PING = COMMAND_CHAR + b"ping"
MY_IP = get_lan_ip()
def __init__(self, udp_port=5005):
self.udp_port = udp_port
self._socket = socket.socket(socket.AF_INET, # Internet
socket.SOCK_DGRAM) # UDP
self._socket.bind((UDPConnection.MY_IP, self.udp_port))
self._socket.setblocking(False)
self.peer_ip = None
self.timer = Timer(sync_timer=app_timer) # own timer, because often
# used in own process
@property
def my_ip(self):
return UDPConnection.MY_IP
def __str__(self):
return "ip: {0} (port: {1}); peer: {2}".format(UDPConnection.MY_IP,
self.udp_port, self.peer_ip)
def receive(self, timeout):
"""checks for received data and returns it
In contrast to poll the function keep polling until timeout if no new
data are available.
timeout in seconds
"""
t = get_time_ms()
timeout_ms = int(timeout*1000)
while True:
rtn = self.poll()
if rtn is not None:
#print("UDP receive: {0}".format(rtn))
return rtn
if (get_time_ms() - t) > timeout_ms:
return None
def poll(self):
"""returns data (bytes) or None if no data found
process also commands
if send is unkown input is ignored
"""
try:
data, sender = self._socket.recvfrom(1024)
except:
return None
# process data
if data == UDPConnection.CONNECT:
#connection request
self.peer_ip = sender[0]
if not self.send(UDPConnection.COMMAND_REPLY):
self.peer_ip = None
elif sender[0] != self.peer_ip:
return None # ignore data
elif data == UDPConnection.PING:
self.send(UDPConnection.COMMAND_REPLY)
elif data == self.UNCONNECT:
self.unconnect_peer()
return data
def send(self, data, timeout=1.0):
"""returns if problems or not
timeout in seconds (default = 1.0)
return False if failed to send
"""
timeout_ms = int(timeout*1000)
if self.peer_ip is None:
return False
start = get_time_ms()
if isinstance(data, str):
data = data.encode() # force to byte
while get_time_ms() - start < timeout_ms:
try:
self._socket.sendto(data, (self.peer_ip, self.udp_port))
#print("UDP send: {0}".format(data))
return True
except:
pass
return False
def connect_peer(self, peer_ip, timeout=1.0):
self.unconnect_peer()
self.peer_ip = peer_ip
if self.send(UDPConnection.CONNECT, timeout=timeout) and \
self.wait_input(UDPConnection.COMMAND_REPLY, duration=timeout):
return True
self.peer_ip = None
return False
def wait_input(self, input_string, duration=1.0):
"""poll the connection and waits for a specific input"""
start = get_time_ms()
duration_ms = int(duration*1000)
while get_time_ms() - start < duration_ms:
in_ = self.poll()
if in_ == input_string:
return True
return False
def unconnect_peer(self, timeout=1.0):
self.send(UDPConnection.UNCONNECT, timeout=timeout)
self.peer_ip = None
@property
def is_connected(self):
return self.peer_ip is not None
def ping(self, timeout=0.5):
"""returns boolean if succeeded and ping time in ms"""
if self.peer_ip == None:
return False, None
start = get_time_ms()
if self.send(UDPConnection.PING, timeout=timeout) and \
self.wait_input(UDPConnection.COMMAND_REPLY, duration=timeout):
return True, get_time_ms() - start
return False, None
def clear_receive_buffer(self):
data = ""
while data is not None:
data = self.poll()
def poll_last_data(self):
"""polls all data and returns only the last one
return None if not data found"""
rtn = None
tmp = self.poll()
while tmp is not None:
rtn = tmp
tmp = self.poll()
return rtn
class UDPConnectionProcess(Process):
"""UDPConnectionProcess polls and writes to a data queue.
Example::
# Server that prints each input and echos it to the client
# that is currently connected
from udp_connection import UDPConnectionProcess, Queue
receive_queue = Queue()
udp_p = UDPConnectionProcess(receive_queue=receive_queue)
udp_p.start()
udp_p.event_polling.set() # start polling
while True:
data = receive_queue.get()
print(data)
if data is not None:
udp_p.send_queue.put(data.string)
Example::
# connecting to a server
""" # DOC
def __init__(self, event_trigger = (),
event_ignore_tag = None):
"""Initialize UDPConnectionProcess
Parameters
----------
receive_queue: multiprocessing.Queue
the queue to which the received data should be put
peer_ip : string
the IP of the peer to which the connection should be established
sync_clock : Clock
the internal clock for timestamps will synchronized with this clock
event_trigger: multiprocessing.Event() (or list of..)
event trigger(s) to be set. If Udp event is received and it is not a
command to set this event (typical of sensor recording processes).
event_ignore_tag:
udp data that start with this tag will be ignored for event triggering
""" # DOC
super(UDPConnectionProcess, self).__init__()
self.receive_queue = Queue()
self.send_queue = Queue()
self.event_is_connected = Event()
self._event_quit_request = Event()
self._event_is_polling = Event()
self._event_ignore_tag = event_ignore_tag
if isinstance(event_trigger, type(Event) ):
event_trigger = (event_trigger)
try:
self._event_trigger = tuple(event_trigger)
except:
self._event_trigger = ()
atexit.register(self.quit)
@property
def my_ip(self):
return UDPConnection.MY_IP
def quit(self):
self._event_quit_request.set()
if self.is_alive():
self.join()
def pause(self):
self._event_is_polling.clear()
def start_polling(self):
self._event_is_polling.set()
def run(self):
udp_connection = UDPConnection(udp_port=5005)
self.start_polling()
ptp = PollingTimeProfile()
prev_event_polling = None
while not self._event_quit_request.is_set():
if prev_event_polling != self._event_is_polling.is_set():
# event pooling changed
prev_event_polling = self._event_is_polling.is_set()
if prev_event_polling:
logging.warning("UDP start, pid {}, priority {}".format(
self.pid, get_priority(self.pid)))
else:
logging.warning("UDP stop")
ptp.stop()
if not self._event_is_polling.is_set():
self._event_is_polling.wait(timeout=0.1)
else:
data = udp_connection.poll()
t = udp_connection.timer.time
ptp.update(t)
if data is not None:
d = UDPData(string=data, time=t)
self.receive_queue.put(d)
if self._event_ignore_tag is not None and \
not d.startswith(self._event_ignore_tag):
for ev in self._event_trigger:
# set all connected trigger
ev.set()
try:
udp_connection.send(self.send_queue.get_nowait())
except:
pass
# has connection changed?
if self.event_is_connected.is_set() != udp_connection.is_connected:
if udp_connection.is_connected:
self.event_is_connected.set()
else:
self.event_is_connected.clear()
if not udp_connection.is_connected:
udp_connection.timer.wait(200)
udp_connection.unconnect_peer()
logging.warning("UDP quit, {}".format(ptp.get_profile_str()))
| {
"content_hash": "4d1c30206eb60d7b85393560b1756f94",
"timestamp": "",
"source": "github",
"line_count": 318,
"max_line_length": 89,
"avg_line_length": 31.132075471698112,
"alnum_prop": 0.5522222222222222,
"repo_name": "lindemann09/pyForceDAQ",
"id": "6380a3c9d1131dd50cf5eb46ca464f8b3478bb6d",
"size": "9900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "forceDAQ/_lib/udp_connection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "393808"
},
{
"name": "Makefile",
"bytes": "1069"
},
{
"name": "Python",
"bytes": "149956"
},
{
"name": "R",
"bytes": "898"
}
],
"symlink_target": ""
} |
import unittest
from stix.test import EntityTestCase
from stix.common import CampaignRef
class CampaignRefTests(EntityTestCase, unittest.TestCase):
klass = CampaignRef
_full_dict = {
'idref': "example:foo-1",
'timestamp': "2014-01-31T06:14:46",
'names': ["foo", "bar"]
}
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b92dec6e06caf1ad37ef54a21f769fd2",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 58,
"avg_line_length": 19,
"alnum_prop": 0.6232686980609419,
"repo_name": "STIXProject/python-stix",
"id": "eb407308f8ad5d37e5ed463d4a68e2f91a02f82d",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stix/test/common/campaign_reference_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "1422974"
}
],
"symlink_target": ""
} |
__version_info__ = {
'major': 2,
'minor': 4,
'micro': 2,
'releaselevel': 'beta',
'serial': 1
}
def get_version(short=False):
assert __version_info__['releaselevel'] in ('alpha', 'beta', 'final')
vers = ["%(major)i.%(minor)i.%(micro)i" % __version_info__, ]
if __version_info__['releaselevel'] != 'final' and not short:
vers.append('%s%i' % (__version_info__['releaselevel'][0],
__version_info__['serial']))
return ''.join(vers)
__version__ = get_version()
| {
"content_hash": "73825865713a7119e9e69bd7b501ff9e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 29.055555555555557,
"alnum_prop": 0.5315487571701721,
"repo_name": "murphyke/avocado",
"id": "0a84b8010baf3c8775a27a950dd1813ce885d1f3",
"size": "523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "avocado/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "18009"
},
{
"name": "Makefile",
"bytes": "84"
},
{
"name": "Python",
"bytes": "1035156"
},
{
"name": "R",
"bytes": "273"
},
{
"name": "SAS",
"bytes": "689"
},
{
"name": "Shell",
"bytes": "2369"
}
],
"symlink_target": ""
} |
init_fun = 'main.init'
entity_path = 'entities'
connect_entity = 'Account'
global_entity = 'Global'
max_aoi = 80 # max aoi in meter
# client options for debug
client_entity_path = 'client/entities'
entity_def_path = 'defs' | {
"content_hash": "e9e69cbf3f15c997aeb4706af72e5080",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 38,
"avg_line_length": 17.46153846153846,
"alnum_prop": 0.7048458149779736,
"repo_name": "dennisding/ether",
"id": "ff12c062481ebcc2d734739824e75081c07d5eb6",
"size": "251",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "75989"
}
],
"symlink_target": ""
} |
import logging
import pygame
from objects.globals.gamecolors import GameColors
from objects.globals.gamesettings import GameSettings
from objects.leaderboards.leaderboard_entry import LeaderboardEntry
from objects.leaderboards.score_service import ScoreService
from objects.screens.base_screen import BaseScreen
from objects.screens.game_screens import GameScreens
from other.py_text_input import TextInput
class GameOverScreen(BaseScreen):
"""Game over screen"""
def __init__(self, resourceContext, localizationContext, leaderBoardService):
BaseScreen.__init__(self, resourceContext)
self.__leaderBoardService = leaderBoardService
self.__logger = logging.getLogger(GameOverScreen.__module__)
self.__resourceContext = resourceContext
self.__localizationContext = localizationContext
def run(self, clock, screen, args=None):
entry = LeaderboardEntry(args)
bonus = ScoreService.count_bonus(entry)
entry.score = ScoreService.count_final_score(entry, bonus)
self.__leaderBoardService.load_leader_board()
text_input = TextInput(font_size=25, antialias=True, text_color=GameColors.WHITE)
running = True
next_screen = GameScreens.LEADER_BOARD_SCREEN
user_entered_nick = ''
while running:
clock.tick(GameSettings.FPS)
events = pygame.event.get()
for event in pygame.event.get():
if event.type == pygame.QUIT:
next_screen = GameScreens.QUIT_GAME
running = False
if event.type == pygame.KEYUP:
if event.key == pygame.K_SPACE:
running = False
user_accepted_text = text_input.update(events)
screen.fill(GameColors.BLACK)
screen.blit(self.__resourceContext.imgResources.background,
self.__resourceContext.imgResources.background.get_rect())
self.draw_text(screen, self.__localizationContext.game_over_screen.title_label, 50,
GameSettings.WIDTH // 2,
GameSettings.HEIGHT // 2 - 300)
self.draw_text(screen, self.__localizationContext.game_over_screen.score_label + str(entry.score), 50,
GameSettings.WIDTH // 2, GameSettings.HEIGHT // 2 - 200)
self.draw_text(screen, self.__localizationContext.game_over_screen.bonus_label + str(bonus),
18,
GameSettings.WIDTH // 2, GameSettings.HEIGHT // 2 - 150)
self.draw_text(screen, self.__localizationContext.game_over_screen.level_label + str(entry.level), 25,
GameSettings.WIDTH // 2, GameSettings.HEIGHT // 2 - 100)
self.draw_text(screen, self.__localizationContext.game_over_screen.hits_label + str(entry.hits), 25,
GameSettings.WIDTH // 2, GameSettings.HEIGHT // 2 - 50)
self.draw_text(screen, self.__localizationContext.game_over_screen.power_ups_label + str(entry.power_ups),
25,
GameSettings.WIDTH // 2, GameSettings.HEIGHT // 2)
self.draw_text(screen, self.__localizationContext.game_over_screen.name_enter_label,
25,
GameSettings.WIDTH // 2, GameSettings.HEIGHT // 2 + 100)
screen.blit(text_input.get_surface(), (GameSettings.WIDTH // 2 - 50, GameSettings.HEIGHT // 2 + 150))
if len(user_entered_nick) > 0:
self.draw_text(screen, self.__localizationContext.game_over_screen.continue_label,
25,
GameSettings.WIDTH // 2, GameSettings.HEIGHT // 2 + 200)
pygame.display.flip()
user_entered_nick = text_input.get_text()
if user_accepted_text and len(user_entered_nick) > 0:
running = False
entry.player_name = user_entered_nick
self.__leaderBoardService.add_entry(entry)
self.__leaderBoardService.persist_leader_board()
return {BaseScreen.SCREEN_NEXT: next_screen, "entry": entry}
| {
"content_hash": "4572b3554061b011bd7cd7d076aa1279",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 118,
"avg_line_length": 45.06382978723404,
"alnum_prop": 0.6093012275731823,
"repo_name": "Superzer0/pyRiverRaid",
"id": "be49745d959cd66f2c18b9ad8ec55ed120d29a2d",
"size": "4236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "objects/screens/game_over_screen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "104662"
}
],
"symlink_target": ""
} |
import sys
sys.path.append("tools")
import mergejs
have_compressor = None
try:
import jsmin
have_compressor = "jsmin"
except ImportError:
try:
import minimize
have_compressor = "minimize"
except Exception, E:
print E
pass
sourceDirectory = "../src"
configFilename = "library.cfg"
outputFilename = "pretty-json-min.js"
if len(sys.argv) > 1:
configFilename = sys.argv[1]
extension = configFilename[-4:]
if extension != ".cfg":
configFilename = sys.argv[1] + ".cfg"
if len(sys.argv) > 2:
outputFilename = sys.argv[2]
print "Merging libraries."
merged = mergejs.run(sourceDirectory, None, configFilename)
if have_compressor == "jsmin":
print "Compressing using jsmin."
minimized = jsmin.jsmin(merged)
elif have_compressor == "minimize":
print "Compressing using minimize."
minimized = minimize.minimize(merged)
else: # fallback
print "Not compressing."
minimized = merged
print "Adding license file."
minimized = file("license.txt").read() + minimized
print "Writing to %s." % outputFilename
file(outputFilename, "w").write(minimized)
print "Done."
| {
"content_hash": "326db985462fd96d97cc452ff934f4ad",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 59,
"avg_line_length": 23.979166666666668,
"alnum_prop": 0.6776715899218071,
"repo_name": "srv94/PingConsole",
"id": "e9e49b944310a5f3a5e70e7d8d0beb8613554bcc",
"size": "1174",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/main/resources/static/resources/plugins/pretty-json/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7006"
},
{
"name": "CSS",
"bytes": "603873"
},
{
"name": "HTML",
"bytes": "1926021"
},
{
"name": "Java",
"bytes": "408181"
},
{
"name": "JavaScript",
"bytes": "2202390"
},
{
"name": "PHP",
"bytes": "3841"
},
{
"name": "Python",
"bytes": "107348"
},
{
"name": "Shell",
"bytes": "2348"
}
],
"symlink_target": ""
} |
from urllib.parse import urlparse, urljoin
from flask import request, url_for, redirect
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and \
ref_url.netloc == test_url.netloc
def get_redirect_target():
for target in request.values.get('next'), request.referrer:
if not target:
continue
if is_safe_url(target):
return target
def redirect_back(endpoint='index', **values):
target = request.args.get('next', '')
if not target or not is_safe_url(target):
target = url_for(endpoint, **values)
return redirect(target)
| {
"content_hash": "2b18bba278e8372559fd3044f60bafb9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 63,
"avg_line_length": 34.095238095238095,
"alnum_prop": 0.6522346368715084,
"repo_name": "ilhamadun/har",
"id": "3c89f3f18d3083a4a90841ec633f7429bde8fa9b",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "har/controller/url.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "9528"
},
{
"name": "Python",
"bytes": "41008"
}
],
"symlink_target": ""
} |
"""
Created on 2014-05-12
:author: Andreas Kaiser (disko)
"""
from __future__ import absolute_import
from fanstatic import Library
from fanstatic import Resource
from js.jquery import jquery
library = Library('jquery_sortable', 'resources')
jquery_sortable = Resource(
library,
'jquery-sortable.js',
minified='jquery-sortable-min.js',
depends=[jquery, ])
| {
"content_hash": "58b3032ea2db3be8ce914f9d506d08a6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 19.789473684210527,
"alnum_prop": 0.7180851063829787,
"repo_name": "fanstatic/js.jquery-sortable",
"id": "fd464997e0efdf13e86029d2537018b7d75be7f6",
"size": "400",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "js/jquery_sortable/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "2980"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
} |
"""
@file
@brief This modules contains a class which implements a simple server.
"""
import sys
import os
import urllib
import datetime
from http.server import HTTPServer
from socketserver import ThreadingMixIn
from pyquickhelper.loghelper import fLOG
from pyensae.sql.database_main import Database
from ..simple_server.simple_server_custom import SimpleHandler, ThreadServer
class CustomDBServerHandler(SimpleHandler):
"""
The server proposes a simple way to create one server on your own.
It includes an access to a SQLlite3 database.
"""
def __init__(self, request, client_address, server):
"""
Regular constructor, an instance is created for each request,
do not store any data for a longer time than a request.
"""
SimpleHandler.__init__(self, request, client_address, server)
#self.m_database = server._my_database
#self.m_main_page = server._my_main_page
#self.m_root = server._my_root
def main_page(self):
"""
returns the main page (case the server is called with no path)
@return default page
"""
return self.server._my_main_page
def get_javascript_paths(self):
"""
returns all the location where the server should look for a java script
@return list of paths
"""
return [self.server._my_root, SimpleHandler.javascript_path]
def interpret_parameter_as_list_int(self, ps):
"""
interpret a list of parameters, each of them is a list of integer
separated by ,
@param ps something like ``params.get("blog_selected")``
@return list of int
"""
res = []
for ins in ps:
spl = ins.split(",")
ii = [int(_) for _ in spl]
res.extend(ii)
return res
def process_event(self, st):
"""
process an event, and log it
@param st string to process
"""
self.server.process_event(st)
def serve_content_web(self, path, method, params):
"""
functions to overload (executed after serve_content)
@param path ParseResult
@param method GET or POST
@param params params parsed from the url + others
"""
if path.path.startswith("/logs/"):
url = path.path[6:]
targ = urllib.parse.unquote(url)
self.process_event(targ)
self.send_response(200)
self.send_headers("")
else:
url = path.path
htype, ftype = self.get_ftype(url)
for p in self.server._my_root:
local = os.path.join(p, url.lstrip("/"))
if os.path.exists(local):
break
if htype == "text/html":
if os.path.exists(local):
content = self.get_file_content(local, ftype)
self.send_response(200)
self.send_headers(path.path)
# context
params["db"] = self.server._my_database
params["page"] = url
params[
"website"] = "http://%s:%d/" % self.server.server_address
self.feed(content, True, params)
else:
self.send_response(200)
self.send_headers("")
self.feed(
"unable to find (CustomServerHanlder): " +
path.geturl() +
"\nlocal file:" +
local +
"\n")
self.send_error(404)
elif os.path.exists(local):
content = self.get_file_content(local, ftype)
self.send_response(200)
self.send_headers(url)
self.feed(content, False, params)
else:
self.send_response(200)
self.send_headers("")
self.feed(
"unable to find (CustomServerHanlder): " +
path.geturl() +
"\nlocal file:" +
local +
"\n")
self.send_error(404)
class CustomDBServer (ThreadingMixIn, HTTPServer):
"""
defines a custom server which includes an access to a database,
this database will contain de table to store the clicks
.. exref::
:title: create a custom local server
The following code creates an instance of a local server.
The server expects to find its content in the same folder.
::
from pyensae import Database
db = Database(dbfile)
df = pandas.DataFrame ( [ {"name":"xavier", "module":"pyrsslocal"} ] )
db.connect()
db.import_dataframe(df, "example")
db.close()
url = "http://localhost:%d/p_aserver.html" % port
webbrowser.open(url)
CustomDBServer.run_server(None, dbfile, port = port, extra_path = os.path.join("."))
The main page is the following one and it can contains a Python script
which will be interpreter by the server.
It gives access to a variable ``db`` which is a local database
in SQLlite.
::
<?xml version="1.0" encoding="utf-8"?>
<html>
<head>
<link type="text/css" href="/p_aserver.css" rel="stylesheet"/>
<title>Custom DB Server</title>
<meta content="dupre, pyrsslocal, custom server" name="keywords"/>
<meta content="text/html; charset=utf-8" http-equiv="Content-Type"/>
<link rel="shortcut icon" href="p_aserver.ico" />
<meta content="CustomServer from pyrsslocal" name="description" />
<script type="text/javascript" src="/p_aserver.js"></script>
<script src="/js/run_prettify.js" type="text/javascript"></script>
</head>
<body onload="setPositions(['divtable', ])" class="mymainbody">
<div class="divtop">
<h1>Custom DB Server unittest</h1>
</div>
<div class="divtable" id="divfiles" onscroll="savePosition('divtable')">
<h2>Content of table example</h2>
<script type="text/python">
print("<table>")
db.connect()
for row in db.execute_view("SELECT * FROM example") :
srow = [ str(_) for _ in row ]
print( "<tr><td>{0}</td></tr>".format("</td><td>".join(srow) ) )
db.close()
print("</table>")
</script>
<p>end.</p>
</div>
</body>
</html>
"""
@staticmethod
def schema_table(table):
"""
returns the schema for a specific table
@param table name (in ["stats", "event"])
@return dictionary
"""
if table == "stats":
return {0: ("id_post", int),
1: ("dtime", datetime.datetime),
2: ("status", str),
3: ("rate", int),
4: ("comment", str),
}
if table == "event":
return {-1: ("id_event", int, "PRIMARYKEY", "AUTOINCREMENT"),
0: ("dtime", datetime.datetime),
1: ("uuid", str),
2: ("type1", str),
3: ("type2", str),
4: ("args", str),
}
raise Exception("unexpected table name") # pragma: no cover
def __init__(self,
server_address,
dbfile,
RequestHandlerClass=CustomDBServerHandler,
main_page="index.html",
root=None,
logfile=None
):
"""
constructor
@param server_address addess of the server
@param RequestHandlerClass it should be @see cl CustomServerHandler
@param dbfile database filename (SQLlite format)
@param main_page main page for the service (when requested with no specific file)
@param root folder or list of folders where the server will look into for files such as the main page
"""
HTTPServer.__init__(self, server_address, RequestHandlerClass)
self._my_database = Database(dbfile, LOG=fLOG)
self._my_database_ev = Database(dbfile, LOG=fLOG)
this = os.path.abspath(os.path.split(__file__)[0])
if root is None:
root = [this]
elif isinstance(root, str):
root = [root, this]
elif isinstance(root, list):
root = root + [this]
else:
raise TypeError( # pragma: no cover
"Unable to interpret root '%s'." % str(root))
self._my_root = root
self._my_main_page = main_page
self._my_address = server_address
fLOG("CustomServer.init: root=", root)
fLOG("CustomServer.init: db=", dbfile)
self.table_event = "cs_events"
self.table_stats = "cs_stats"
self.logfile = logfile
if self.logfile is not None:
if self.logfile == "stdout":
self.flog = sys.stdout
elif isinstance(self.logfile, str):
self.flog = open(self.logfile, "a", encoding="utf8")
else:
self.flog = self.logfile
else:
self.flog = None
self._my_database_ev.connect()
if not self._my_database_ev.has_table(self.table_stats):
schema = CustomDBServer.schema_table("stats")
self._my_database_ev.create_table(self.table_stats, schema)
self._my_database_ev.commit()
self._my_database_ev.create_index(
"id_post_" +
self.table_stats,
self.table_stats,
"id_post",
False)
self._my_database_ev.commit()
if not self._my_database_ev.has_table(self.table_event):
schema = CustomDBServer.schema_table("event")
self._my_database_ev.create_table(self.table_event, schema)
self._my_database_ev.commit()
self._my_database_ev.close()
def __enter__(self):
"""
What to do when creating the class.
"""
return self
def __exit__(self, exc_type, exc_value, traceback): # pylint: disable=W0221
"""
What to do when removing the instance (close the log file).
"""
if self.flog is not None and self.logfile != "stdout":
self.flog.close()
def process_event(self, event):
"""
Processes an event, it expects a format like the following:
::
type1/uuid/type2/args
@param event string to log
"""
now = datetime.datetime.now()
if self.flog is not None:
self.flog.write(str(now) + " " + event)
self.flog.write("\n")
self.flog.flush()
info = event.split("/")
status = None
if len(info) >= 4 and info[2] == "status":
status = {"status": info[4],
"id_post": int(info[3]),
"dtime": now,
"rate": -1,
"comment": ""}
if len(info) > 4:
info[3:] = ["/".join(info[3:])]
if len(info) < 4:
raise OSError("unable to log event: " + event)
values = {"type1": info[0],
"uuid": info[1],
"type2": info[2],
"dtime": now,
"args": info[3]}
# to avoid database to collide
iscon = self._my_database_ev.is_connected()
if iscon:
if self.flog is not None:
self.flog.write("unable to connect the database")
if status is not None:
self.flog.write("unable to update status: " + str(status))
return
self._my_database_ev.connect()
self._my_database_ev.insert(self.table_event, values)
if status is not None:
self._my_database_ev.insert(self.table_stats, status)
self._my_database_ev.commit()
self._my_database_ev.close()
@staticmethod
def run_server(server, dbfile, thread=False, port=8080, logfile=None,
extra_path=None):
"""
start the server
@param server if None, it becomes ``CustomServer(dbfile, ('localhost', 8080), CustomServerHandler)``
@param dbfile file to the RSS database (SQLite)
@param thread if True, the server is run in a thread
and the function returns right away,
otherwite, it runs the server.
@param port port to use
@param logfile file for the log or "stdout" for the standard output
@param extra_path additional path the server should look into to find a page
@return server if thread is False, the thread otherwise (the thread is started)
@warning If you kill the python program while the thread is still running, python interpreter might be closed completely.
"""
if server is None:
server = CustomDBServer(
('localhost',
port),
dbfile,
CustomDBServerHandler,
logfile=logfile,
root=extra_path)
if thread:
th = ThreadServer(server)
th.start()
return th
else: # pragma: no cover
server.serve_forever()
return server
| {
"content_hash": "ebde457d6b302128dae6e0fdf1ac6a68",
"timestamp": "",
"source": "github",
"line_count": 406,
"max_line_length": 129,
"avg_line_length": 34.53448275862069,
"alnum_prop": 0.5106625775622281,
"repo_name": "sdpython/pyrsslocal",
"id": "2077dbb33ee2143d97008f2b8de1b788a10937cd",
"size": "14021",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyrsslocal/custom_server/aserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "382"
},
{
"name": "CSS",
"bytes": "13236"
},
{
"name": "HTML",
"bytes": "302919"
},
{
"name": "JavaScript",
"bytes": "25484"
},
{
"name": "Python",
"bytes": "218585"
}
],
"symlink_target": ""
} |
import subprocess
def theme():
# Here we just triage to GTK settings for now
try:
out = subprocess.run(
['gsettings', 'get', 'org.gnome.desktop.interface', 'gtk-theme'],
capture_output=True)
stdout = out.stdout.decode()
except Exception:
return 'Light'
# we have a string, now remove start and end quote
theme = stdout.lower().strip()[1:-1]
if '-dark' in theme.lower():
return 'Dark'
else:
return 'Light'
def isDark():
return theme() == 'Dark'
def isLight():
return theme() == 'Light'
# def listener(callback: typing.Callable[[str], None]) -> None:
def listener(callback):
with subprocess.Popen(
('gsettings', 'monitor', 'org.gnome.desktop.interface', 'gtk-theme'),
stdout=subprocess.PIPE,
universal_newlines=True,
) as p:
for line in p.stdout:
callback('Dark' if '-dark' in line.strip().removeprefix("gtk-theme: '").removesuffix("'").lower() else 'Light')
| {
"content_hash": "d23068fc0c87e94f287463013da584ce",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 123,
"avg_line_length": 30.696969696969695,
"alnum_prop": 0.5972359328726555,
"repo_name": "Syncplay/syncplay",
"id": "094a2fc09d73e5266683ecbb1d19813cb032d991",
"size": "1285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "syncplay/vendor/darkdetect/_linux_detect.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "53024"
},
{
"name": "Makefile",
"bytes": "3662"
},
{
"name": "Python",
"bytes": "1073792"
},
{
"name": "Shell",
"bytes": "9820"
}
],
"symlink_target": ""
} |
"""
Ensemble verification
=====================
In this tutorial we perform a verification of a probabilistic extrapolation nowcast
using MeteoSwiss radar data.
"""
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
from pprint import pprint
from pysteps import io, nowcasts, rcparams, verification
from pysteps.motion.lucaskanade import dense_lucaskanade
from pysteps.postprocessing import ensemblestats
from pysteps.utils import conversion, dimension, transformation
from pysteps.visualization import plot_precip_field
###############################################################################
# Read precipitation field
# ------------------------
#
# First, we will import the sequence of MeteoSwiss ("mch") radar composites.
# You need the pysteps-data archive downloaded and the pystepsrc file
# configured with the data_source paths pointing to data folders.
# Selected case
date = datetime.strptime("201607112100", "%Y%m%d%H%M")
data_source = rcparams.data_sources["mch"]
n_ens_members = 20
n_leadtimes = 6
seed = 24
###############################################################################
# Load the data from the archive
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# The data are upscaled to 2 km resolution to limit the memory usage and thus
# be able to afford a larger number of ensemble members.
root_path = data_source["root_path"]
path_fmt = data_source["path_fmt"]
fn_pattern = data_source["fn_pattern"]
fn_ext = data_source["fn_ext"]
importer_name = data_source["importer"]
importer_kwargs = data_source["importer_kwargs"]
timestep = data_source["timestep"]
# Find the radar files in the archive
fns = io.find_by_date(
date, root_path, path_fmt, fn_pattern, fn_ext, timestep, num_prev_files=2
)
# Read the data from the archive
importer = io.get_method(importer_name, "importer")
R, _, metadata = io.read_timeseries(fns, importer, **importer_kwargs)
# Convert to rain rate
R, metadata = conversion.to_rainrate(R, metadata)
# Upscale data to 2 km
R, metadata = dimension.aggregate_fields_space(R, metadata, 2000)
# Plot the rainfall field
plot_precip_field(R[-1, :, :], geodata=metadata)
plt.show()
# Log-transform the data to unit of dBR, set the threshold to 0.1 mm/h,
# set the fill value to -15 dBR
R, metadata = transformation.dB_transform(R, metadata, threshold=0.1, zerovalue=-15.0)
# Set missing values with the fill value
R[~np.isfinite(R)] = -15.0
# Nicely print the metadata
pprint(metadata)
###############################################################################
# Forecast
# --------
#
# We use the STEPS approach to produce a ensemble nowcast of precipitation fields.
# Estimate the motion field
V = dense_lucaskanade(R)
# Perform the ensemble nowcast with STEPS
nowcast_method = nowcasts.get_method("steps")
R_f = nowcast_method(
R[-3:, :, :],
V,
n_leadtimes,
n_ens_members,
n_cascade_levels=6,
R_thr=-10.0,
kmperpixel=2.0,
timestep=timestep,
decomp_method="fft",
bandpass_filter_method="gaussian",
noise_method="nonparametric",
vel_pert_method="bps",
mask_method="incremental",
seed=seed,
)
# Back-transform to rain rates
R_f = transformation.dB_transform(R_f, threshold=-10.0, inverse=True)[0]
# Plot some of the realizations
fig = plt.figure()
for i in range(4):
ax = fig.add_subplot(221 + i)
ax.set_title("Member %02d" % i)
plot_precip_field(R_f[i, -1, :, :], geodata=metadata, colorbar=False, axis="off")
plt.tight_layout()
plt.show()
###############################################################################
# Verification
# ------------
#
# Pysteps includes a number of verification metrics to help users to analyze
# the general characteristics of the nowcasts in terms of consistency and
# quality (or goodness).
# Here, we will verify our probabilistic forecasts using the ROC curve,
# reliability diagrams, and rank histograms, as implemented in the verification
# module of pysteps.
# Find the files containing the verifying observations
fns = io.archive.find_by_date(
date,
root_path,
path_fmt,
fn_pattern,
fn_ext,
timestep,
0,
num_next_files=n_leadtimes,
)
# Read the observations
R_o, _, metadata_o = io.read_timeseries(fns, importer, **importer_kwargs)
# Convert to mm/h
R_o, metadata_o = conversion.to_rainrate(R_o, metadata_o)
# Upscale data to 2 km
R_o, metadata_o = dimension.aggregate_fields_space(R_o, metadata_o, 2000)
# Compute the verification for the last lead time
# compute the exceedance probability of 0.1 mm/h from the ensemble
P_f = ensemblestats.excprob(R_f[:, -1, :, :], 0.1, ignore_nan=True)
###############################################################################
# ROC curve
# ~~~~~~~~~
roc = verification.ROC_curve_init(0.1, n_prob_thrs=10)
verification.ROC_curve_accum(roc, P_f, R_o[-1, :, :])
fig, ax = plt.subplots()
verification.plot_ROC(roc, ax, opt_prob_thr=True)
ax.set_title("ROC curve (+%i min)" % (n_leadtimes * timestep))
plt.show()
###############################################################################
# Reliability diagram
# ~~~~~~~~~~~~~~~~~~~
reldiag = verification.reldiag_init(0.1)
verification.reldiag_accum(reldiag, P_f, R_o[-1, :, :])
fig, ax = plt.subplots()
verification.plot_reldiag(reldiag, ax)
ax.set_title("Reliability diagram (+%i min)" % (n_leadtimes * timestep))
plt.show()
###############################################################################
# Rank histogram
# ~~~~~~~~~~~~~~
rankhist = verification.rankhist_init(R_f.shape[0], 0.1)
verification.rankhist_accum(rankhist, R_f[:, -1, :, :], R_o[-1, :, :])
fig, ax = plt.subplots()
verification.plot_rankhist(rankhist, ax)
ax.set_title("Rank histogram (+%i min)" % (n_leadtimes * timestep))
plt.show()
# sphinx_gallery_thumbnail_number = 5
| {
"content_hash": "917f4c594af965c525fd93d7ba27aabd",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 86,
"avg_line_length": 30.53157894736842,
"alnum_prop": 0.631098086536804,
"repo_name": "pySTEPS/pysteps",
"id": "54d2500c8aa7a088403df09405109322933a3bc5",
"size": "5819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/plot_ensemble_verification.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Cython",
"bytes": "35339"
},
{
"name": "Python",
"bytes": "1211447"
}
],
"symlink_target": ""
} |
import os
import sys
import random
import time
from itertools import product
import Tkinter as tk
import tkFont
import tkSimpleDialog
import tkMessageBox
from PIL import ImageTk, Image
class Card:
FACES = {11: 'Jack', 12: 'Queen', 13: 'King', 14: 'Ace'}
SUITS = {'Hearts': 1, 'Diamonds': 2, 'Spades': 3, 'Clubs': 4}
COLORS = {'Hearts': 0, 'Diamonds': 0, 'Spades': 1, 'Clubs': 1}
def __init__(self, rank, suit, scale=0.6):
self.suit = suit
self.rank = rank
self.suit_rank = self.SUITS[suit]
self.sort_rank = self.rank
if rank == 5:
if self.suit_rank == 1 or self.suit_rank == 3:
self.sort_rank = 100
else:
self.sort_rank = 0
image_file = '/home/jacob/personal/card_game/deck/'
image_file += '{:>02}'.format(self.rank) + suit[0].lower() + '.png'
self.image_file = image_file
# self.HEIGHT=self.SCALE*251.0
# self.WIDTH=self.SCALE*180.0
self.SCALE = scale
self.HEIGHT = 251.0
self.WIDTH = 180.0
self.image = Image.open(self.image_file)
self.photo_image = ImageTk.PhotoImage(self.image.resize(
(int(self.SCALE * self.WIDTH), int(self.SCALE * self.HEIGHT)), Image.ANTIALIAS))
def __str__(self):
value = self.FACES.get(self.rank, self.rank)
return "{0} of {1}".format(value, self.suit)
def __repr__(self):
return str(self)
def resize(self, scale):
self.scale = scale
self.photo_image = ImageTk.PhotoImage(self.image.resize(
(int(scale * self.WIDTH), int(scale * self.HEIGHT)), Image.ANTIALIAS))
def show(self):
os.system('eog ' + self.image_file)
def check_trump(self, trump_suit):
if self.rank != 5:
if self.suit == trump_suit:
return True
else:
return False
else:
if self.COLORS[self.suit] == self.COLORS[trump_suit]:
return True
else:
return False
class Hand:
def __init__(self):
self.cards = []
def clear_hand(self):
self.cards = []
def discard(self, trump_suit):
self.cards = [x for x in self.cards if x.check_trump(trump_suit)]
def sort_hand(self):
self.cards = sorted(
self.cards, key=lambda x: (
x.suit_rank, x.sort_rank))
def play(self, card):
return self.cards.pop(card - 1)
def add(self, card):
self.cards.append(card)
def __str__(self):
s = ''
for i in range(len(self.cards)):
s = s + ' ' + str(i + 1) + ':' + ' ' * (i + 1) + \
str(self.cards[i]) + '\n'
return s
def __repr__(self):
return str(self)
class Deck:
def __init__(self, scale=0.6):
ranks = range(2, 15)
suits = 'Spades Diamonds Clubs Hearts'.split()
self.cards = [Card(r, s, scale=scale)
for s, r in product(suits, ranks)]
self.scale = scale
def __str__(self):
s = ''
for i in range(len(self.cards)):
s = s + ' ' * i + str(self.cards[i]) + '\n'
return s
def __repr__(self):
pass
def shuffle(self):
random.shuffle(self.cards)
def deal(self, hand, num_cards=1):
for i in range(num_cards):
hand.add(self.cards.pop())
class Pedro_Player:
def __init__(self, name, position):
self.name = name
self.hand = Hand()
self.dealer = False
self.position = position
def get_label(self, frame, fontsize, fontcolor):
font_style = tkFont.Font(family='Times', size=fontsize)
label = tk.Label(
frame,
text=self.name,
bg='forest green',
fg=fontcolor,
font=font_style)
return label
def set_dealer(self):
self.dealer = True
def hand_as_image(self, frame, scale=0.5):
if len(self.hand.cards) == 0:
return None
for card in self.hand.cards:
card.resize(scale)
width = self.hand.cards[0].WIDTH * scale
height = self.hand.cards[0].HEIGHT * scale
dx = width / 4.
x_center = width / 2.
y_center = height / 2.
num_cards = len(self.hand.cards)
hand_frame = tk.Canvas(frame, width=dx * (num_cards - 1) + width,
height=height, bg='forest green')
for count, card in enumerate(self.hand.cards):
shift = count * dx
hand_frame.create_image(
shift + x_center,
y_center,
image=card.photo_image)
return hand_frame
def hand_as_button(self, frame, scale=0.5):
if len(self.hand.cards) == 0:
return None
for card in self.hand.cards:
card.resize(scale)
width = self.hand.cards[0].WIDTH * scale
height = self.hand.cards[0].HEIGHT * scale
dx = width / 4.
x_center = width / 2.
y_center = height / 2.
hand_frame = tk.Frame(frame,
height=height, bg='forest green')
num_cards = len(self.hand.cards)
buttons = [0 for i in range(num_cards)]
for count, card in enumerate(self.hand.cards):
shift = count * dx
if count == len(self.hand.cards) - 1:
buttons[count] = tk.Button(
hand_frame,
height=height,
width=width,
image=card.photo_image,
command=quit)
else:
buttons[count] = tk.Button(
hand_frame,
height=height,
width=dx,
image=card.photo_image,
command=quit)
buttons[count].grid(row=0, column=count)
return hand_frame
class Pedro_Game:
def __init__(self, root, deck, players):
self.root = root
self.deck = deck
self.dealer_pos = 3
self.order = [0, 1, 2, 3]
self.players = players
# Get Size Parameter from Cards
# scale=deck.cards[0].scale
self.height, self.width = deck.cards[0].HEIGHT, deck.cards[0].WIDTH
def pick_dealer(self):
temp = random.randint(0, 3)
self.order = self.order[temp:] + self.order[0:temp]
self.dealer_pos = self.order[3]
self.players[self.dealer_pos].set_dealer()
string = self.players[self.dealer_pos].name + ' will deal first'
tkMessageBox.showinfo(root, string)
self.deal()
def get_bid(self, order_loc, current_bid, winning_bidder_loc):
loc = self.order[order_loc]
try:
self.hand_frame.destroy()
except AttributeError:
pass
player = self.players[loc]
player.hand.sort_hand()
dx = self.width / 4.
x_center = self.width / 2.
y_center = self.height / 2.
self.hand_frame = tk.Frame(root)
player.hand_as_image(self.hand_frame)
button_font = tkFont.Font(family='Times', size=16)
bid_frame = tk.Frame(self.hand_frame)
text = 'Pass'
if player.dealer:
if current_bid == 5:
text = 'Pass Deck'
bids = [6, 14]
elif current_bid == 14:
bids = [14]
else:
bids = [current_bid + 1, 14]
else:
if current_bid < 14:
bids = [i for i in range(6, 15)]
else:
self.get_bid(3, 14, winning_bidder_loc)
if player.dealer:
b = tk.Button(
bid_frame,
text=text,
font=button_font,
command=lambda: self.select_trump(
current_bid,
winning_bidder_loc))
b.pack(side='left')
for bid in bids:
b = tk.Button(
bid_frame,
text=bid,
font=button_font,
command=lambda bid=bid: self.select_trump(
bid,
order_loc))
b.pack(side='left')
else:
b = tk.Button(
bid_frame,
text=text,
font=button_font,
command=lambda: self.get_bid(
order_loc + 1,
current_bid,
winning_bidder_loc))
b.pack(side='left')
for bid in bids:
b = tk.Button(
bid_frame,
text=bid,
font=button_font,
command=lambda bid=bid: self.get_bid(
order_loc + 1,
bid,
order_loc))
b.pack(side='left')
if bid < current_bid + 1:
b['state'] = tk.DISABLED
bid_frame.pack(side='bottom')
self.hand_frame.pack()
def select_trump(self, winning_bid, winning_bidder_loc):
winning_bidder_loc = self.order[winning_bidder_loc]
winning_bidder = self.players[winning_bidder_loc]
name = winning_bidder.name
self.hand_frame.destroy()
dx = self.width / 4.
x_center = self.width / 2.
y_center = self.height / 2.
self.hand_frame = tk.Frame(self.root)
winning_bidder.hand_as_image(self.hand_frame)
suits = 'Hearts Diamonds Spades Clubs'.split()
lower_frame = tk.Frame(self.hand_frame)
for suit in suits:
b = tk.Button(
lower_frame,
text=suit,
command=lambda suit=suit: self.play_hand(
winning_bid,
winning_bidder_loc,
suit))
b.pack(side='left')
lower_frame.pack()
self.hand_frame.pack()
def play_hand(self, winning_bid, winning_bidder_loc, trump_suit):
self.hand_frame.destroy()
order = [0, 1, 2, 3]
self.order = order[winning_bidder_loc:] + order[:winning_bidder_loc]
for loc in self.order:
player = self.players[loc]
player.hand.discard(trump_suit)
num_cards = len(player.hand.cards)
need = 6 - num_cards
if need > 0:
self.deck.deal(player.hand, need)
player.hand.sort_hand()
# for count,loc in enumerate(self.order):
player = self.players[winning_bidder_loc]
player.hand_as_button(root, winning_bidder_loc)
def deal(self):
for i in range(3):
for loc in self.order:
player = self.players[loc]
self.deck.deal(player.hand, 3)
self.get_bid(0, 5, 0)
class Trick:
def __init__(self, trump_suit):
self.trump_suit = trump_suit
self.cards = []
def add(self, card):
self.cards.append(card)
def old_trick(self, frame, scale=0.25):
if len(self.cards) == 0:
return None
for card in self.cards:
card.resize(scale)
width = self.cards[0].WIDTH * scale
height = self.cards[0].HEIGHT * scale
x_center = width / 2.
y_center = height / 2.
num_cards = len(self.cards)
hand_frame = tk.Canvas(frame, width=num_cards * width,
height=height, bg='forest green')
for count, card in enumerate(self.cards):
hand_frame.create_image(
width * count + x_center,
y_center,
image=card.photo_image)
return hand_frame
scale = 0.5
random.seed(1)
root = tk.Tk()
root.title('Pedro')
screen_height = root.winfo_screenwidth()
screen_width = root.winfo_screenheight()
frame = tk.Frame(
root,
height=screen_height,
width=screen_width,
bg='forest green',
borderwidth=10,
relief='ridge')
deck = Deck(scale=scale)
deck.shuffle()
players = [
Pedro_Player('Jacob', 0),
Pedro_Player('David', 1),
Pedro_Player('Mom', 2),
Pedro_Player('Brigette', 3)]
jacob, david, mom, brigette = players
deck.deal(jacob.hand, 4)
deck.deal(david.hand, 4)
deck.deal(mom.hand, 4)
deck.deal(brigette.hand, 4)
fontsize = 20
fontcolor = 'white'
spacers = [
tk.Label(frame, text=' ', bg='forest green'),
tk.Label(frame, text=' ', bg='forest green'),
tk.Label(frame, text=' ', bg='forest green')]
height = jacob.hand.cards[0].HEIGHT
width = jacob.hand.cards[0].WIDTH
player_locs = [
(0, 0),
(0, 5),
(7, 5),
(7, 0)]
trick_locs = [
(3, 2),
(3, 3),
(5, 2),
(5, 3)]
spacer_locs = [
(0, 1),
(0, 4),
(0, 6)]
current_trick = Trick('Hearts')
deck.deal(current_trick, 4)
trick1 = tk.Canvas(frame, width=scale * height, height=scale * height)
trick2 = tk.Canvas(frame, width=scale * height, height=scale * height)
trick3 = tk.Canvas(frame, width=scale * height, height=scale * height)
trick4 = tk.Canvas(frame, width=scale * height, height=scale * height)
x_center = width * scale
y_center = height * scale
test = [
tk.Label(frame, image=current_trick.cards[0].photo_image),
tk.Label(frame, image=current_trick.cards[1].photo_image),
tk.Label(frame, image=current_trick.cards[2].photo_image),
tk.Label(frame, image=current_trick.cards[3].photo_image)]
names = [
jacob.get_label(frame, fontsize, fontcolor),
david.get_label(frame, fontsize, fontcolor),
mom.get_label(frame, fontsize, fontcolor),
brigette.get_label(frame, fontsize, fontcolor)]
hands = [
jacob.hand_as_button(frame, scale=0.5),
david.hand_as_image(frame, scale=0.5),
mom.hand_as_image(frame, scale=0.5),
brigette.hand_as_image(frame, scale=0.5)]
for count, name in enumerate(names):
x, y = player_locs[count]
name.grid(row=x, column=y)
hands[count].grid(row=x + 1, column=y, rowspan=2)
for count, t in enumerate(test):
x, y = trick_locs[count]
t.grid(row=x, column=y, rowspan=2)
for count, spacer in enumerate(spacers):
x, y = spacer_locs[count]
spacer.grid(row=x, column=y)
played_trick = Trick('Hearts')
deck.deal(played_trick, 4)
played = played_trick.old_trick(frame)
played.grid(row=1, column=7)
# layed_trick=Trick('Clubs')
# deck.deal(layed_trick,4)
# layed=layed_trick.old_trick(frame)
# layed.grid(row=2,column=7)
# old trick = column 7
frame.pack()
# game=Pedro_Game(root,deck,players)
# game.pick_dealer()
root.mainloop()
| {
"content_hash": "b1298d6946ce3d5a0785c282e69ae2dd",
"timestamp": "",
"source": "github",
"line_count": 538,
"max_line_length": 92,
"avg_line_length": 27.345724907063197,
"alnum_prop": 0.5279363784665579,
"repo_name": "JFeaux/pedro",
"id": "6bf0c51819fa3b5b1d4f82fd2f19c7d4ea4ee195",
"size": "14731",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/game_play.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88614"
}
],
"symlink_target": ""
} |
"""Module implementing RNN Cells.
This module provides a number of basic commonly used RNN cells, such as LSTM
(Long Short Term Memory) or GRU (Gated Recurrent Unit), and a number of
operators that allow adding dropouts, projections, or embeddings for inputs.
Constructing multi-layer cells is supported by the class `MultiRNNCell`, or by
calling the `rnn` ops several times.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import hashlib
import numbers
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import activations
from tensorflow.python.keras import initializers
from tensorflow.python.keras import layers as keras_layer
from tensorflow.python.keras.engine import input_spec
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import base as base_layer
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
_BIAS_VARIABLE_NAME = "bias"
_WEIGHTS_VARIABLE_NAME = "kernel"
# This can be used with self.assertRaisesRegexp for assert_like_rnncell.
ASSERT_LIKE_RNNCELL_ERROR_REGEXP = "is not an RNNCell"
def _hasattr(obj, attr_name):
try:
getattr(obj, attr_name)
except AttributeError:
return False
else:
return True
def assert_like_rnncell(cell_name, cell):
"""Raises a TypeError if cell is not like an RNNCell.
NOTE: Do not rely on the error message (in particular in tests) which can be
subject to change to increase readability. Use
ASSERT_LIKE_RNNCELL_ERROR_REGEXP.
Args:
cell_name: A string to give a meaningful error referencing to the name of
the functionargument.
cell: The object which should behave like an RNNCell.
Raises:
TypeError: A human-friendly exception.
"""
conditions = [
_hasattr(cell, "output_size"),
_hasattr(cell, "state_size"),
_hasattr(cell, "get_initial_state") or _hasattr(cell, "zero_state"),
callable(cell),
]
errors = [
"'output_size' property is missing", "'state_size' property is missing",
"either 'zero_state' or 'get_initial_state' method is required",
"is not callable"
]
if not all(conditions):
errors = [error for error, cond in zip(errors, conditions) if not cond]
raise TypeError("The argument {!r} ({}) is not an RNNCell: {}.".format(
cell_name, cell, ", ".join(errors)))
def _concat(prefix, suffix, static=False):
"""Concat that enables int, Tensor, or TensorShape values.
This function takes a size specification, which can be an integer, a
TensorShape, or a Tensor, and converts it into a concatenated Tensor
(if static = False) or a list of integers (if static = True).
Args:
prefix: The prefix; usually the batch size (and/or time step size).
(TensorShape, int, or Tensor.)
suffix: TensorShape, int, or Tensor.
static: If `True`, return a python list with possibly unknown dimensions.
Otherwise return a `Tensor`.
Returns:
shape: the concatenation of prefix and suffix.
Raises:
ValueError: if `suffix` is not a scalar or vector (or TensorShape).
ValueError: if prefix or suffix was `None` and asked for dynamic
Tensors out.
"""
if isinstance(prefix, ops.Tensor):
p = prefix
p_static = tensor_util.constant_value(prefix)
if p.shape.ndims == 0:
p = array_ops.expand_dims(p, 0)
elif p.shape.ndims != 1:
raise ValueError("prefix tensor must be either a scalar or vector, "
"but saw tensor: %s" % p)
else:
p = tensor_shape.as_shape(prefix)
p_static = p.as_list() if p.ndims is not None else None
p = (
constant_op.constant(p.as_list(), dtype=dtypes.int32)
if p.is_fully_defined() else None)
if isinstance(suffix, ops.Tensor):
s = suffix
s_static = tensor_util.constant_value(suffix)
if s.shape.ndims == 0:
s = array_ops.expand_dims(s, 0)
elif s.shape.ndims != 1:
raise ValueError("suffix tensor must be either a scalar or vector, "
"but saw tensor: %s" % s)
else:
s = tensor_shape.as_shape(suffix)
s_static = s.as_list() if s.ndims is not None else None
s = (
constant_op.constant(s.as_list(), dtype=dtypes.int32)
if s.is_fully_defined() else None)
if static:
shape = tensor_shape.as_shape(p_static).concatenate(s_static)
shape = shape.as_list() if shape.ndims is not None else None
else:
if p is None or s is None:
raise ValueError("Provided a prefix or suffix of None: %s and %s" %
(prefix, suffix))
shape = array_ops.concat((p, s), 0)
return shape
def _zero_state_tensors(state_size, batch_size, dtype):
"""Create tensors of zeros based on state_size, batch_size, and dtype."""
def get_state_shape(s):
"""Combine s with batch_size to get a proper tensor shape."""
c = _concat(batch_size, s)
size = array_ops.zeros(c, dtype=dtype)
if not context.executing_eagerly():
c_static = _concat(batch_size, s, static=True)
size.set_shape(c_static)
return size
return nest.map_structure(get_state_shape, state_size)
@tf_export(v1=["nn.rnn_cell.RNNCell"])
class RNNCell(base_layer.Layer):
"""Abstract object representing an RNN cell.
Every `RNNCell` must have the properties below and implement `call` with
the signature `(output, next_state) = call(input, state)`. The optional
third input argument, `scope`, is allowed for backwards compatibility
purposes; but should be left off for new subclasses.
This definition of cell differs from the definition used in the literature.
In the literature, 'cell' refers to an object with a single scalar output.
This definition refers to a horizontal array of such units.
An RNN cell, in the most abstract setting, is anything that has
a state and performs some operation that takes a matrix of inputs.
This operation results in an output matrix with `self.output_size` columns.
If `self.state_size` is an integer, this operation also results in a new
state matrix with `self.state_size` columns. If `self.state_size` is a
(possibly nested tuple of) TensorShape object(s), then it should return a
matching structure of Tensors having shape `[batch_size].concatenate(s)`
for each `s` in `self.batch_size`.
"""
def __init__(self, trainable=True, name=None, dtype=None, **kwargs):
super(RNNCell, self).__init__(
trainable=trainable, name=name, dtype=dtype, **kwargs)
# Attribute that indicates whether the cell is a TF RNN cell, due the slight
# difference between TF and Keras RNN cell. Notably the state is not wrapped
# in a list for TF cell where they are single tensor state, whereas keras
# cell will wrap the state into a list, and call() will have to unwrap them.
self._is_tf_rnn_cell = True
def __call__(self, inputs, state, scope=None):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple with
shapes `[batch_size, s] for s in self.state_size`.
scope: VariableScope for the created subgraph; defaults to class name.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
if scope is not None:
with vs.variable_scope(
scope, custom_getter=self._rnn_get_variable) as scope:
return super(RNNCell, self).__call__(inputs, state, scope=scope)
else:
scope_attrname = "rnncell_scope"
scope = getattr(self, scope_attrname, None)
if scope is None:
scope = vs.variable_scope(
vs.get_variable_scope(), custom_getter=self._rnn_get_variable)
setattr(self, scope_attrname, scope)
with scope:
return super(RNNCell, self).__call__(inputs, state)
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
if context.executing_eagerly():
trainable = variable._trainable # pylint: disable=protected-access
else:
trainable = (
variable in tf_variables.trainable_variables() or
(isinstance(variable, tf_variables.PartitionedVariable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
@property
def state_size(self):
"""size(s) of state(s) used by this cell.
It can be represented by an Integer, a TensorShape or a tuple of Integers
or TensorShapes.
"""
raise NotImplementedError("Abstract method")
@property
def output_size(self):
"""Integer or TensorShape: size of outputs produced by this cell."""
raise NotImplementedError("Abstract method")
def build(self, _):
# This tells the parent Layer object that it's OK to call
# self.add_variable() inside the call() method.
pass
def get_initial_state(self, inputs=None, batch_size=None, dtype=None):
if inputs is not None:
# Validate the given batch_size and dtype against inputs if provided.
inputs = ops.convert_to_tensor(inputs, name="inputs")
if batch_size is not None:
if tensor_util.is_tensor(batch_size):
static_batch_size = tensor_util.constant_value(
batch_size, partial=True)
else:
static_batch_size = batch_size
if inputs.shape.dims[0].value != static_batch_size:
raise ValueError(
"batch size from input tensor is different from the "
"input param. Input tensor batch: {}, batch_size: {}".format(
inputs.shape.dims[0].value, batch_size))
if dtype is not None and inputs.dtype != dtype:
raise ValueError(
"dtype from input tensor is different from the "
"input param. Input tensor dtype: {}, dtype: {}".format(
inputs.dtype, dtype))
batch_size = inputs.shape.dims[0].value or array_ops.shape(inputs)[0]
dtype = inputs.dtype
if None in [batch_size, dtype]:
raise ValueError(
"batch_size and dtype cannot be None while constructing initial "
"state: batch_size={}, dtype={}".format(batch_size, dtype))
return self.zero_state(batch_size, dtype)
def zero_state(self, batch_size, dtype):
"""Return zero-filled state tensor(s).
Args:
batch_size: int, float, or unit Tensor representing the batch size.
dtype: the data type to use for the state.
Returns:
If `state_size` is an int or TensorShape, then the return value is a
`N-D` tensor of shape `[batch_size, state_size]` filled with zeros.
If `state_size` is a nested list or tuple, then the return value is
a nested list or tuple (of the same structure) of `2-D` tensors with
the shapes `[batch_size, s]` for each s in `state_size`.
"""
# Try to use the last cached zero_state. This is done to avoid recreating
# zeros, especially when eager execution is enabled.
state_size = self.state_size
is_eager = context.executing_eagerly()
if is_eager and _hasattr(self, "_last_zero_state"):
(last_state_size, last_batch_size, last_dtype,
last_output) = getattr(self, "_last_zero_state")
if (last_batch_size == batch_size and last_dtype == dtype and
last_state_size == state_size):
return last_output
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
output = _zero_state_tensors(state_size, batch_size, dtype)
if is_eager:
self._last_zero_state = (state_size, batch_size, dtype, output)
return output
class LayerRNNCell(RNNCell):
"""Subclass of RNNCells that act like proper `tf.Layer` objects.
For backwards compatibility purposes, most `RNNCell` instances allow their
`call` methods to instantiate variables via `tf.compat.v1.get_variable`. The
underlying
variable scope thus keeps track of any variables, and returning cached
versions. This is atypical of `tf.layer` objects, which separate this
part of layer building into a `build` method that is only called once.
Here we provide a subclass for `RNNCell` objects that act exactly as
`Layer` objects do. They must provide a `build` method and their
`call` methods do not access Variables `tf.compat.v1.get_variable`.
"""
def __call__(self, inputs, state, scope=None, *args, **kwargs):
"""Run this RNN cell on inputs, starting from the given state.
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: if `self.state_size` is an integer, this should be a `2-D Tensor`
with shape `[batch_size, self.state_size]`. Otherwise, if
`self.state_size` is a tuple of integers, this should be a tuple with
shapes `[batch_size, s] for s in self.state_size`.
scope: optional cell scope.
*args: Additional positional arguments.
**kwargs: Additional keyword arguments.
Returns:
A pair containing:
- Output: A `2-D` tensor with shape `[batch_size, self.output_size]`.
- New state: Either a single `2-D` tensor, or a tuple of tensors matching
the arity and shapes of `state`.
"""
# Bypass RNNCell's variable capturing semantics for LayerRNNCell.
# Instead, it is up to subclasses to provide a proper build
# method. See the class docstring for more details.
return base_layer.Layer.__call__(
self, inputs, state, scope=scope, *args, **kwargs)
@tf_export(v1=["nn.rnn_cell.BasicRNNCell"])
class BasicRNNCell(LayerRNNCell):
"""The most basic RNN cell.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnRNNTanh` for better performance on GPU.
Args:
num_units: int, The number of units in the RNN cell.
activation: Nonlinearity to use. Default: `tanh`. It could also be string
that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.SimpleRNNCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
super(BasicRNNCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnRNNTanh for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Most basic RNN: output = new_state = act(W * input + U * state + B)."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
output = self._activation(gate_inputs)
return output, output
def get_config(self):
config = {
"num_units": self._num_units,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export(v1=["nn.rnn_cell.GRUCell"])
class GRUCell(LayerRNNCell):
"""Gated Recurrent Unit cell (cf.
http://arxiv.org/abs/1406.1078).
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnGRU` for better performance on GPU, or
`tf.contrib.rnn.GRUBlockCellV2` for better performance on CPU.
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables in an
existing scope. If not `True`, and the existing scope already has the
given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.GRUCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None,
name=None,
dtype=None,
**kwargs):
super(GRUCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnGRU for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
self._kernel_initializer = initializers.get(kernel_initializer)
self._bias_initializer = initializers.get(bias_initializer)
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
self._gate_kernel = self.add_variable(
"gates/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, 2 * self._num_units],
initializer=self._kernel_initializer)
self._gate_bias = self.add_variable(
"gates/%s" % _BIAS_VARIABLE_NAME,
shape=[2 * self._num_units],
initializer=(self._bias_initializer
if self._bias_initializer is not None else
init_ops.constant_initializer(1.0, dtype=self.dtype)))
self._candidate_kernel = self.add_variable(
"candidate/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[input_depth + self._num_units, self._num_units],
initializer=self._kernel_initializer)
self._candidate_bias = self.add_variable(
"candidate/%s" % _BIAS_VARIABLE_NAME,
shape=[self._num_units],
initializer=(self._bias_initializer
if self._bias_initializer is not None else
init_ops.zeros_initializer(dtype=self.dtype)))
self.built = True
def call(self, inputs, state):
"""Gated recurrent unit (GRU) with nunits cells."""
_check_rnn_cell_input_dtypes([inputs, state])
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, state], 1), self._gate_kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._gate_bias)
value = math_ops.sigmoid(gate_inputs)
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
candidate = math_ops.matmul(
array_ops.concat([inputs, r_state], 1), self._candidate_kernel)
candidate = nn_ops.bias_add(candidate, self._candidate_bias)
c = self._activation(candidate)
new_h = u * state + (1 - u) * c
return new_h, new_h
def get_config(self):
config = {
"num_units": self._num_units,
"kernel_initializer": initializers.serialize(self._kernel_initializer),
"bias_initializer": initializers.serialize(self._bias_initializer),
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(GRUCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
_LSTMStateTuple = collections.namedtuple("LSTMStateTuple", ("c", "h"))
@tf_export(v1=["nn.rnn_cell.LSTMStateTuple"])
class LSTMStateTuple(_LSTMStateTuple):
"""Tuple used by LSTM Cells for `state_size`, `zero_state`, and output state.
Stores two elements: `(c, h)`, in that order. Where `c` is the hidden state
and `h` is the output.
Only used when `state_is_tuple=True`.
"""
__slots__ = ()
@property
def dtype(self):
(c, h) = self
if c.dtype != h.dtype:
raise TypeError("Inconsistent internal state: %s vs %s" %
(str(c.dtype), str(h.dtype)))
return c.dtype
@tf_export(v1=["nn.rnn_cell.BasicLSTMCell"])
class BasicLSTMCell(LayerRNNCell):
"""DEPRECATED: Please use `tf.compat.v1.nn.rnn_cell.LSTMCell` instead.
Basic LSTM recurrent network cell.
The implementation is based on: http://arxiv.org/abs/1409.2329.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
For advanced models, please use the full `tf.compat.v1.nn.rnn_cell.LSTMCell`
that follows.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initialize the basic LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
forget_bias: float, The bias added to forget gates (see above). Must set
to `0.0` manually when restoring from CudnnLSTM-trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. The latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, must use
`CudnnCompatibleLSTMCell` instead.
"""
super(BasicLSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
@property
def state_size(self):
return (LSTMStateTuple(self._num_units, self._num_units)
if self._state_is_tuple else 2 * self._num_units)
@property
def output_size(self):
return self._num_units
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units])
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=init_ops.zeros_initializer(dtype=self.dtype))
self.built = True
def call(self, inputs, state):
"""Long short-term memory cell (LSTM).
Args:
inputs: `2-D` tensor with shape `[batch_size, input_size]`.
state: An `LSTMStateTuple` of state tensors, each shaped `[batch_size,
num_units]`, if `state_is_tuple` has been set to `True`. Otherwise, a
`Tensor` shaped `[batch_size, 2 * num_units]`.
Returns:
A pair containing the new hidden state, and the new state (either a
`LSTMStateTuple` or a concatenated state, depending on
`state_is_tuple`).
"""
_check_rnn_cell_input_dtypes([inputs, state])
sigmoid = math_ops.sigmoid
one = constant_op.constant(1, dtype=dtypes.int32)
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = array_ops.split(value=state, num_or_size_splits=2, axis=one)
gate_inputs = math_ops.matmul(
array_ops.concat([inputs, h], 1), self._kernel)
gate_inputs = nn_ops.bias_add(gate_inputs, self._bias)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = array_ops.split(
value=gate_inputs, num_or_size_splits=4, axis=one)
forget_bias_tensor = constant_op.constant(self._forget_bias, dtype=f.dtype)
# Note that using `add` and `multiply` instead of `+` and `*` gives a
# performance improvement. So using those at the cost of readability.
add = math_ops.add
multiply = math_ops.multiply
new_c = add(
multiply(c, sigmoid(add(f, forget_bias_tensor))),
multiply(sigmoid(i), self._activation(j)))
new_h = multiply(self._activation(new_c), sigmoid(o))
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = array_ops.concat([new_c, new_h], 1)
return new_h, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(BasicLSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@tf_export(v1=["nn.rnn_cell.LSTMCell"])
class LSTMCell(LayerRNNCell):
"""Long short-term memory unit (LSTM) recurrent network cell.
The default non-peephole implementation is based on:
https://pdfs.semanticscholar.org/1154/0131eae85b2e11d53df7f1360eeb6476e7f4.pdf
Felix Gers, Jurgen Schmidhuber, and Fred Cummins.
"Learning to forget: Continual prediction with LSTM." IET, 850-855, 1999.
The peephole implementation is based on:
https://research.google.com/pubs/archive/43905.pdf
Hasim Sak, Andrew Senior, and Francoise Beaufays.
"Long short-term memory recurrent neural network architectures for
large scale acoustic modeling." INTERSPEECH, 2014.
The class uses optional peep-hole connections, optional cell clipping, and
an optional projection layer.
Note that this cell is not optimized for performance. Please use
`tf.contrib.cudnn_rnn.CudnnLSTM` for better performance on GPU, or
`tf.contrib.rnn.LSTMBlockCell` and `tf.contrib.rnn.LSTMBlockFusedCell` for
better performance on CPU.
"""
@deprecated(None, "This class is equivalent as tf.keras.layers.LSTMCell,"
" and will be replaced by that in Tensorflow 2.0.")
def __init__(self,
num_units,
use_peepholes=False,
cell_clip=None,
initializer=None,
num_proj=None,
proj_clip=None,
num_unit_shards=None,
num_proj_shards=None,
forget_bias=1.0,
state_is_tuple=True,
activation=None,
reuse=None,
name=None,
dtype=None,
**kwargs):
"""Initialize the parameters for an LSTM cell.
Args:
num_units: int, The number of units in the LSTM cell.
use_peepholes: bool, set True to enable diagonal/peephole connections.
cell_clip: (optional) A float value, if provided the cell state is clipped
by this value prior to the cell output activation.
initializer: (optional) The initializer to use for the weight and
projection matrices.
num_proj: (optional) int, The output dimensionality for the projection
matrices. If None, no projection is performed.
proj_clip: (optional) A float value. If `num_proj > 0` and `proj_clip` is
provided, then the projected values are clipped elementwise to within
`[-proj_clip, proj_clip]`.
num_unit_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
num_proj_shards: Deprecated, will be removed by Jan. 2017. Use a
variable_scope partitioner instead.
forget_bias: Biases of the forget gate are initialized by default to 1 in
order to reduce the scale of forgetting at the beginning of the
training. Must set it manually to `0.0` when restoring from CudnnLSTM
trained checkpoints.
state_is_tuple: If True, accepted and returned states are 2-tuples of the
`c_state` and `m_state`. If False, they are concatenated along the
column axis. This latter behavior will soon be deprecated.
activation: Activation function of the inner states. Default: `tanh`. It
could also be string that is within Keras activation function names.
reuse: (optional) Python boolean describing whether to reuse variables in
an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
name: String, the name of the layer. Layers with the same name will share
weights, but to avoid mistakes we require reuse=True in such cases.
dtype: Default dtype of the layer (default of `None` means use the type of
the first input). Required when `build` is called before `call`.
**kwargs: Dict, keyword named properties for common layer attributes, like
`trainable` etc when constructing the cell from configs of get_config().
When restoring from CudnnLSTM-trained checkpoints, use
`CudnnCompatibleLSTMCell` instead.
"""
super(LSTMCell, self).__init__(
_reuse=reuse, name=name, dtype=dtype, **kwargs)
_check_supported_dtypes(self.dtype)
if not state_is_tuple:
logging.warn(
"%s: Using a concatenated state is slower and will soon be "
"deprecated. Use state_is_tuple=True.", self)
if num_unit_shards is not None or num_proj_shards is not None:
logging.warn(
"%s: The num_unit_shards and proj_unit_shards parameters are "
"deprecated and will be removed in Jan 2017. "
"Use a variable scope with a partitioner instead.", self)
if context.executing_eagerly() and context.num_gpus() > 0:
logging.warn(
"%s: Note that this cell is not optimized for performance. "
"Please use tf.contrib.cudnn_rnn.CudnnLSTM for better "
"performance on GPU.", self)
# Inputs must be 2-dimensional.
self.input_spec = input_spec.InputSpec(ndim=2)
self._num_units = num_units
self._use_peepholes = use_peepholes
self._cell_clip = cell_clip
self._initializer = initializers.get(initializer)
self._num_proj = num_proj
self._proj_clip = proj_clip
self._num_unit_shards = num_unit_shards
self._num_proj_shards = num_proj_shards
self._forget_bias = forget_bias
self._state_is_tuple = state_is_tuple
if activation:
self._activation = activations.get(activation)
else:
self._activation = math_ops.tanh
if num_proj:
self._state_size = (
LSTMStateTuple(num_units, num_proj) if state_is_tuple else num_units +
num_proj)
self._output_size = num_proj
else:
self._state_size = (
LSTMStateTuple(num_units, num_units) if state_is_tuple else 2 *
num_units)
self._output_size = num_units
@property
def state_size(self):
return self._state_size
@property
def output_size(self):
return self._output_size
@tf_utils.shape_type_conversion
def build(self, inputs_shape):
if inputs_shape[-1] is None:
raise ValueError("Expected inputs.shape[-1] to be known, saw shape: %s" %
str(inputs_shape))
_check_supported_dtypes(self.dtype)
input_depth = inputs_shape[-1]
h_depth = self._num_units if self._num_proj is None else self._num_proj
maybe_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_unit_shards)
if self._num_unit_shards is not None else None)
self._kernel = self.add_variable(
_WEIGHTS_VARIABLE_NAME,
shape=[input_depth + h_depth, 4 * self._num_units],
initializer=self._initializer,
partitioner=maybe_partitioner)
if self.dtype is None:
initializer = init_ops.zeros_initializer
else:
initializer = init_ops.zeros_initializer(dtype=self.dtype)
self._bias = self.add_variable(
_BIAS_VARIABLE_NAME,
shape=[4 * self._num_units],
initializer=initializer)
if self._use_peepholes:
self._w_f_diag = self.add_variable(
"w_f_diag", shape=[self._num_units], initializer=self._initializer)
self._w_i_diag = self.add_variable(
"w_i_diag", shape=[self._num_units], initializer=self._initializer)
self._w_o_diag = self.add_variable(
"w_o_diag", shape=[self._num_units], initializer=self._initializer)
if self._num_proj is not None:
maybe_proj_partitioner = (
partitioned_variables.fixed_size_partitioner(self._num_proj_shards)
if self._num_proj_shards is not None else None)
self._proj_kernel = self.add_variable(
"projection/%s" % _WEIGHTS_VARIABLE_NAME,
shape=[self._num_units, self._num_proj],
initializer=self._initializer,
partitioner=maybe_proj_partitioner)
self.built = True
def call(self, inputs, state):
"""Run one step of LSTM.
Args:
inputs: input Tensor, must be 2-D, `[batch, input_size]`.
state: if `state_is_tuple` is False, this must be a state Tensor, `2-D,
[batch, state_size]`. If `state_is_tuple` is True, this must be a tuple
of state Tensors, both `2-D`, with column sizes `c_state` and `m_state`.
Returns:
A tuple containing:
- A `2-D, [batch, output_dim]`, Tensor representing the output of the
LSTM after reading `inputs` when previous state was `state`.
Here output_dim is:
num_proj if num_proj was set,
num_units otherwise.
- Tensor(s) representing the new state of LSTM after reading `inputs` when
the previous state was `state`. Same type and shape(s) as `state`.
Raises:
ValueError: If input size cannot be inferred from inputs via
static shape inference.
"""
_check_rnn_cell_input_dtypes([inputs, state])
num_proj = self._num_units if self._num_proj is None else self._num_proj
sigmoid = math_ops.sigmoid
if self._state_is_tuple:
(c_prev, m_prev) = state
else:
c_prev = array_ops.slice(state, [0, 0], [-1, self._num_units])
m_prev = array_ops.slice(state, [0, self._num_units], [-1, num_proj])
input_size = inputs.get_shape().with_rank(2).dims[1].value
if input_size is None:
raise ValueError("Could not infer input size from inputs.get_shape()[-1]")
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
lstm_matrix = math_ops.matmul(
array_ops.concat([inputs, m_prev], 1), self._kernel)
lstm_matrix = nn_ops.bias_add(lstm_matrix, self._bias)
i, j, f, o = array_ops.split(
value=lstm_matrix, num_or_size_splits=4, axis=1)
# Diagonal connections
if self._use_peepholes:
c = (
sigmoid(f + self._forget_bias + self._w_f_diag * c_prev) * c_prev +
sigmoid(i + self._w_i_diag * c_prev) * self._activation(j))
else:
c = (
sigmoid(f + self._forget_bias) * c_prev +
sigmoid(i) * self._activation(j))
if self._cell_clip is not None:
# pylint: disable=invalid-unary-operand-type
c = clip_ops.clip_by_value(c, -self._cell_clip, self._cell_clip)
# pylint: enable=invalid-unary-operand-type
if self._use_peepholes:
m = sigmoid(o + self._w_o_diag * c) * self._activation(c)
else:
m = sigmoid(o) * self._activation(c)
if self._num_proj is not None:
m = math_ops.matmul(m, self._proj_kernel)
if self._proj_clip is not None:
# pylint: disable=invalid-unary-operand-type
m = clip_ops.clip_by_value(m, -self._proj_clip, self._proj_clip)
# pylint: enable=invalid-unary-operand-type
new_state = (
LSTMStateTuple(c, m)
if self._state_is_tuple else array_ops.concat([c, m], 1))
return m, new_state
def get_config(self):
config = {
"num_units": self._num_units,
"use_peepholes": self._use_peepholes,
"cell_clip": self._cell_clip,
"initializer": initializers.serialize(self._initializer),
"num_proj": self._num_proj,
"proj_clip": self._proj_clip,
"num_unit_shards": self._num_unit_shards,
"num_proj_shards": self._num_proj_shards,
"forget_bias": self._forget_bias,
"state_is_tuple": self._state_is_tuple,
"activation": activations.serialize(self._activation),
"reuse": self._reuse,
}
base_config = super(LSTMCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def _enumerated_map_structure_up_to(shallow_structure, map_fn, *args, **kwargs):
ix = [0]
def enumerated_fn(*inner_args, **inner_kwargs):
r = map_fn(ix[0], *inner_args, **inner_kwargs)
ix[0] += 1
return r
return nest.map_structure_up_to(shallow_structure, enumerated_fn, *args,
**kwargs)
def _default_dropout_state_filter_visitor(substate):
if isinstance(substate, LSTMStateTuple):
# Do not perform dropout on the memory state.
return LSTMStateTuple(c=False, h=True)
elif isinstance(substate, tensor_array_ops.TensorArray):
return False
return True
class _RNNCellWrapperV1(RNNCell):
"""Base class for cells wrappers V1 compatibility.
This class along with `_RNNCellWrapperV2` allows to define cells wrappers that
are compatible with V1 and V2, and defines helper methods for this purpose.
"""
def __init__(self, cell):
super(_RNNCellWrapperV1, self).__init__()
self.cell = cell
if isinstance(cell, trackable.Trackable):
self._track_trackable(self.cell, name="cell")
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Calls the wrapped cell and performs the wrapping logic.
This method is called from the wrapper's `call` or `__call__` methods.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
raise NotImplementedError
def __call__(self, inputs, state, scope=None):
"""Runs the RNN cell step computation.
We assume that the wrapped RNNCell is being built within its `__call__`
method. We directly use the wrapped cell's `__call__` in the overridden
wrapper `__call__` method.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `__call__`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
scope: VariableScope for the subgraph created in the wrapped cells'
`__call__`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.__call__, scope=scope)
class _RNNCellWrapperV2(keras_layer.AbstractRNNCell):
"""Base class for cells wrappers V2 compatibility.
This class along with `_RNNCellWrapperV1` allows to define cells wrappers that
are compatible with V1 and V2, and defines helper methods for this purpose.
"""
def __init__(self, cell, *args, **kwargs):
super(_RNNCellWrapperV2, self).__init__(*args, **kwargs)
self.cell = cell
def call(self, inputs, state, **kwargs):
"""Runs the RNN cell step computation.
When `call` is being used, we assume that the wrapper object has been built,
and therefore the wrapped cells has been built via its `build` method and
its `call` method can be used directly.
This allows to use the wrapped cell and the non-wrapped cell equivalently
when using `call` and `build`.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
return self._call_wrapped_cell(
inputs, state, cell_call_fn=self.cell.call, **kwargs)
def build(self, inputs_shape):
"""Builds the wrapped cell."""
self.cell.build(inputs_shape)
self.built = True
class DropoutWrapperBase(object):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self,
cell,
input_keep_prob=1.0,
output_keep_prob=1.0,
state_keep_prob=1.0,
variational_recurrent=False,
input_size=None,
dtype=None,
seed=None,
dropout_state_filter_visitor=None):
"""Create a cell with added input, state, and/or output dropout.
If `variational_recurrent` is set to `True` (**NOT** the default behavior),
then the same dropout mask is applied at every step, as described in:
[A Theoretically Grounded Application of Dropout in Recurrent
Neural Networks. Y. Gal, Z. Ghahramani](https://arxiv.org/abs/1512.05287).
Otherwise a different dropout mask is applied at every time step.
Note, by default (unless a custom `dropout_state_filter` is provided),
the memory state (`c` component of any `LSTMStateTuple`) passing through
a `DropoutWrapper` is never modified. This behavior is described in the
above article.
Args:
cell: an RNNCell, a projection to output_size is added to it.
input_keep_prob: unit Tensor or float between 0 and 1, input keep
probability; if it is constant and 1, no input dropout will be added.
output_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
state_keep_prob: unit Tensor or float between 0 and 1, output keep
probability; if it is constant and 1, no output dropout will be added.
State dropout is performed on the outgoing states of the cell. **Note**
the state components to which dropout is applied when `state_keep_prob`
is in `(0, 1)` are also determined by the argument
`dropout_state_filter_visitor` (e.g. by default dropout is never applied
to the `c` component of an `LSTMStateTuple`).
variational_recurrent: Python bool. If `True`, then the same dropout
pattern is applied across all time steps per run call. If this parameter
is set, `input_size` **must** be provided.
input_size: (optional) (possibly nested tuple of) `TensorShape` objects
containing the depth(s) of the input tensors expected to be passed in to
the `DropoutWrapper`. Required and used **iff** `variational_recurrent
= True` and `input_keep_prob < 1`.
dtype: (optional) The `dtype` of the input, state, and output tensors.
Required and used **iff** `variational_recurrent = True`.
seed: (optional) integer, the randomness seed.
dropout_state_filter_visitor: (optional), default: (see below). Function
that takes any hierarchical level of the state and returns a scalar or
depth=1 structure of Python booleans describing which terms in the state
should be dropped out. In addition, if the function returns `True`,
dropout is applied across this sublevel. If the function returns
`False`, dropout is not applied across this entire sublevel.
Default behavior: perform dropout on all terms except the memory (`c`)
state of `LSTMCellState` objects, and don't try to apply dropout to
`TensorArray` objects: ```
def dropout_state_filter_visitor(s):
if isinstance(s, LSTMCellState): # Never perform dropout on the c
state. return LSTMCellState(c=False, h=True)
elif isinstance(s, TensorArray): return False return True ```
Raises:
TypeError: if `cell` is not an `RNNCell`, or `keep_state_fn` is provided
but not `callable`.
ValueError: if any of the keep_probs are not between 0 and 1.
"""
super(DropoutWrapperBase, self).__init__(cell)
assert_like_rnncell("cell", cell)
if (dropout_state_filter_visitor is not None and
not callable(dropout_state_filter_visitor)):
raise TypeError("dropout_state_filter_visitor must be callable")
self._dropout_state_filter = (
dropout_state_filter_visitor or _default_dropout_state_filter_visitor)
with ops.name_scope("DropoutWrapperInit"):
def tensor_and_const_value(v):
tensor_value = ops.convert_to_tensor(v)
const_value = tensor_util.constant_value(tensor_value)
return (tensor_value, const_value)
for prob, attr in [(input_keep_prob, "input_keep_prob"),
(state_keep_prob, "state_keep_prob"),
(output_keep_prob, "output_keep_prob")]:
tensor_prob, const_prob = tensor_and_const_value(prob)
if const_prob is not None:
if const_prob < 0 or const_prob > 1:
raise ValueError("Parameter %s must be between 0 and 1: %d" %
(attr, const_prob))
setattr(self, "_%s" % attr, float(const_prob))
else:
setattr(self, "_%s" % attr, tensor_prob)
# Set variational_recurrent, seed before running the code below
self._variational_recurrent = variational_recurrent
self._seed = seed
self._recurrent_input_noise = None
self._recurrent_state_noise = None
self._recurrent_output_noise = None
if variational_recurrent:
if dtype is None:
raise ValueError(
"When variational_recurrent=True, dtype must be provided")
def convert_to_batch_shape(s):
# Prepend a 1 for the batch dimension; for recurrent
# variational dropout we use the same dropout mask for all
# batch elements.
return array_ops.concat(([1], tensor_shape.TensorShape(s).as_list()), 0)
def batch_noise(s, inner_seed):
shape = convert_to_batch_shape(s)
return random_ops.random_uniform(shape, seed=inner_seed, dtype=dtype)
if (not isinstance(self._input_keep_prob, numbers.Real) or
self._input_keep_prob < 1.0):
if input_size is None:
raise ValueError(
"When variational_recurrent=True and input_keep_prob < 1.0 or "
"is unknown, input_size must be provided")
self._recurrent_input_noise = _enumerated_map_structure_up_to(
input_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("input", i)),
input_size)
self._recurrent_state_noise = _enumerated_map_structure_up_to(
cell.state_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("state", i)),
cell.state_size)
self._recurrent_output_noise = _enumerated_map_structure_up_to(
cell.output_size,
lambda i, s: batch_noise(s, inner_seed=self._gen_seed("output", i)),
cell.output_size)
def _gen_seed(self, salt_prefix, index):
if self._seed is None:
return None
salt = "%s_%d" % (salt_prefix, index)
string = (str(self._seed) + salt).encode("utf-8")
return int(hashlib.md5(string).hexdigest()[:8], 16) & 0x7FFFFFFF
@property
def wrapped_cell(self):
return self.cell
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self.cell.zero_state(batch_size, dtype)
def _variational_recurrent_dropout_value(
self, index, value, noise, keep_prob):
"""Performs dropout given the pre-calculated noise tensor."""
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob + noise
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = math_ops.div(value, keep_prob) * binary_tensor
ret.set_shape(value.get_shape())
return ret
def _dropout(self,
values,
salt_prefix,
recurrent_noise,
keep_prob,
shallow_filtered_substructure=None):
"""Decides whether to perform standard dropout or recurrent dropout."""
if shallow_filtered_substructure is None:
# Put something so we traverse the entire structure; inside the
# dropout function we check to see if leafs of this are bool or not.
shallow_filtered_substructure = values
if not self._variational_recurrent:
def dropout(i, do_dropout, v):
if not isinstance(do_dropout, bool) or do_dropout:
return nn_ops.dropout_v2(
v, rate=1. - keep_prob, seed=self._gen_seed(salt_prefix, i))
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values])
else:
def dropout(i, do_dropout, v, n):
if not isinstance(do_dropout, bool) or do_dropout:
return self._variational_recurrent_dropout_value(i, v, n, keep_prob)
else:
return v
return _enumerated_map_structure_up_to(
shallow_filtered_substructure, dropout,
*[shallow_filtered_substructure, values, recurrent_noise])
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Runs the wrapped cell and applies dropout.
Args:
inputs: A tensor with wrapped cell's input.
state: A tensor or tuple of tensors with wrapped cell's state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments.
Returns:
A pair containing:
- Output: A tensor with cell's output.
- New state: A tensor or tuple of tensors with new wrapped cell's state.
"""
def _should_dropout(p):
return (not isinstance(p, float)) or p < 1
if _should_dropout(self._input_keep_prob):
inputs = self._dropout(inputs, "input", self._recurrent_input_noise,
self._input_keep_prob)
output, new_state = cell_call_fn(inputs, state, **kwargs)
if _should_dropout(self._state_keep_prob):
# Identify which subsets of the state to perform dropout on and
# which ones to keep.
shallow_filtered_substructure = nest.get_traverse_shallow_structure(
self._dropout_state_filter, new_state)
new_state = self._dropout(new_state, "state", self._recurrent_state_noise,
self._state_keep_prob,
shallow_filtered_substructure)
if _should_dropout(self._output_keep_prob):
output = self._dropout(output, "output", self._recurrent_output_noise,
self._output_keep_prob)
return output, new_state
@tf_export(v1=["nn.rnn_cell.DropoutWrapper"])
class DropoutWrapper(DropoutWrapperBase, _RNNCellWrapperV1):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, *args, **kwargs):
super(DropoutWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = DropoutWrapperBase.__init__.__doc__
@tf_export("nn.RNNCellDropoutWrapper", v1=[])
class DropoutWrapperV2(DropoutWrapperBase, _RNNCellWrapperV2):
"""Operator adding dropout to inputs and outputs of the given cell."""
def __init__(self, *args, **kwargs):
super(DropoutWrapperV2, self).__init__(*args, **kwargs)
__init__.__doc__ = DropoutWrapperBase.__init__.__doc__
class ResidualWrapperBase(object):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, cell, residual_fn=None):
"""Constructs a `ResidualWrapper` for `cell`.
Args:
cell: An instance of `RNNCell`.
residual_fn: (Optional) The function to map raw cell inputs and raw cell
outputs to the actual cell outputs of the residual network.
Defaults to calling nest.map_structure on (lambda i, o: i + o), inputs
and outputs.
"""
super(ResidualWrapperBase, self).__init__(cell)
self._residual_fn = residual_fn
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell and then apply the residual_fn on its inputs to its outputs.
Args:
inputs: cell inputs.
state: cell state.
cell_call_fn: Wrapped cell's method to use for step computation (cell's
`__call__` or 'call' method).
**kwargs: Additional arguments passed to the wrapped cell's `call`.
Returns:
Tuple of cell outputs and new state.
Raises:
TypeError: If cell inputs and outputs have different structure (type).
ValueError: If cell inputs and outputs have different structure (value).
"""
outputs, new_state = cell_call_fn(inputs, state, **kwargs)
# Ensure shapes match
def assert_shape_match(inp, out):
inp.get_shape().assert_is_compatible_with(out.get_shape())
def default_residual_fn(inputs, outputs):
nest.assert_same_structure(inputs, outputs)
nest.map_structure(assert_shape_match, inputs, outputs)
return nest.map_structure(lambda inp, out: inp + out, inputs, outputs)
res_outputs = (self._residual_fn or default_residual_fn)(inputs, outputs)
return (res_outputs, new_state)
@tf_export(v1=["nn.rnn_cell.ResidualWrapper"])
class ResidualWrapper(ResidualWrapperBase, _RNNCellWrapperV1):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, *args, **kwargs):
super(ResidualWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = ResidualWrapperBase.__init__.__doc__
@tf_export("nn.RNNCellResidualWrapper", v1=[])
class ResidualWrapperV2(ResidualWrapperBase, _RNNCellWrapperV2):
"""RNNCell wrapper that ensures cell inputs are added to the outputs."""
def __init__(self, *args, **kwargs):
super(ResidualWrapperV2, self).__init__(*args, **kwargs)
__init__.__doc__ = ResidualWrapperBase.__init__.__doc__
class DeviceWrapperBase(object):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, cell, device):
"""Construct a `DeviceWrapper` for `cell` with device `device`.
Ensures the wrapped `cell` is called with `tf.device(device)`.
Args:
cell: An instance of `RNNCell`.
device: A device string or function, for passing to `tf.device`.
"""
super(DeviceWrapperBase, self).__init__(cell)
self._device = device
@property
def state_size(self):
return self.cell.state_size
@property
def output_size(self):
return self.cell.output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
with ops.device(self._device):
return self.cell.zero_state(batch_size, dtype)
def _call_wrapped_cell(self, inputs, state, cell_call_fn, **kwargs):
"""Run the cell on specified device."""
with ops.device(self._device):
return cell_call_fn(inputs, state, **kwargs)
@tf_export(v1=["nn.rnn_cell.DeviceWrapper"])
class DeviceWrapper(DeviceWrapperBase, _RNNCellWrapperV1):
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DeviceWrapper, self).__init__(*args, **kwargs)
__init__.__doc__ = DeviceWrapperBase.__init__.__doc__
@tf_export("nn.RNNCellDeviceWrapper", v1=[])
class DeviceWrapperV2(DeviceWrapperBase, _RNNCellWrapperV2):
"""Operator that ensures an RNNCell runs on a particular device."""
def __init__(self, *args, **kwargs): # pylint: disable=useless-super-delegation
super(DeviceWrapperV2, self).__init__(*args, **kwargs)
__init__.__doc__ = DeviceWrapperBase.__init__.__doc__
@tf_export(v1=["nn.rnn_cell.MultiRNNCell"])
class MultiRNNCell(RNNCell):
"""RNN cell composed sequentially of multiple simple cells.
Example:
```python
num_units = [128, 64]
cells = [BasicLSTMCell(num_units=n) for n in num_units]
stacked_rnn_cell = MultiRNNCell(cells)
```
"""
@deprecated(None, "This class is equivalent as "
"tf.keras.layers.StackedRNNCells, and will be replaced by "
"that in Tensorflow 2.0.")
def __init__(self, cells, state_is_tuple=True):
"""Create a RNN cell composed sequentially of a number of RNNCells.
Args:
cells: list of RNNCells that will be composed in this order.
state_is_tuple: If True, accepted and returned states are n-tuples, where
`n = len(cells)`. If False, the states are all concatenated along the
column axis. This latter behavior will soon be deprecated.
Raises:
ValueError: if cells is empty (not allowed), or at least one of the cells
returns a state tuple but the flag `state_is_tuple` is `False`.
"""
super(MultiRNNCell, self).__init__()
if not cells:
raise ValueError("Must specify at least one cell for MultiRNNCell.")
if not nest.is_sequence(cells):
raise TypeError("cells must be a list or tuple, but saw: %s." % cells)
if len(set([id(cell) for cell in cells])) < len(cells):
logging.log_first_n(
logging.WARN, "At least two cells provided to MultiRNNCell "
"are the same object and will share weights.", 1)
self._cells = cells
for cell_number, cell in enumerate(self._cells):
# Add Trackable dependencies on these cells so their variables get
# saved with this object when using object-based saving.
if isinstance(cell, trackable.Trackable):
# TODO(allenl): Track down non-Trackable callers.
self._track_trackable(cell, name="cell-%d" % (cell_number,))
self._state_is_tuple = state_is_tuple
if not state_is_tuple:
if any(nest.is_sequence(c.state_size) for c in self._cells):
raise ValueError("Some cells return tuples of states, but the flag "
"state_is_tuple is not set. State sizes are: %s" %
str([c.state_size for c in self._cells]))
@property
def state_size(self):
if self._state_is_tuple:
return tuple(cell.state_size for cell in self._cells)
else:
return sum(cell.state_size for cell in self._cells)
@property
def output_size(self):
return self._cells[-1].output_size
def zero_state(self, batch_size, dtype):
with ops.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._state_is_tuple:
return tuple(cell.zero_state(batch_size, dtype) for cell in self._cells)
else:
# We know here that state_size of each cell is not a tuple and
# presumably does not contain TensorArrays or anything else fancy
return super(MultiRNNCell, self).zero_state(batch_size, dtype)
@property
def trainable_weights(self):
if not self.trainable:
return []
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
weights += cell.non_trainable_weights
if not self.trainable:
trainable_weights = []
for cell in self._cells:
if isinstance(cell, base_layer.Layer):
trainable_weights += cell.trainable_weights
return trainable_weights + weights
return weights
def call(self, inputs, state):
"""Run this multi-layer cell on inputs, starting from state."""
cur_state_pos = 0
cur_inp = inputs
new_states = []
for i, cell in enumerate(self._cells):
with vs.variable_scope("cell_%d" % i):
if self._state_is_tuple:
if not nest.is_sequence(state):
raise ValueError(
"Expected state to be a tuple of length %d, but received: %s" %
(len(self.state_size), state))
cur_state = state[i]
else:
cur_state = array_ops.slice(state, [0, cur_state_pos],
[-1, cell.state_size])
cur_state_pos += cell.state_size
cur_inp, new_state = cell(cur_inp, cur_state)
new_states.append(new_state)
new_states = (
tuple(new_states) if self._state_is_tuple else array_ops.concat(
new_states, 1))
return cur_inp, new_states
def _check_rnn_cell_input_dtypes(inputs):
"""Check whether the input tensors are with supported dtypes.
Default RNN cells only support floats and complex as its dtypes since the
activation function (tanh and sigmoid) only allow those types. This function
will throw a proper error message if the inputs is not in a supported type.
Args:
inputs: tensor or nested structure of tensors that are feed to RNN cell as
input or state.
Raises:
ValueError: if any of the input tensor are not having dtypes of float or
complex.
"""
for t in nest.flatten(inputs):
_check_supported_dtypes(t.dtype)
def _check_supported_dtypes(dtype):
if dtype is None:
return
dtype = dtypes.as_dtype(dtype)
if not (dtype.is_floating or dtype.is_complex):
raise ValueError("RNN cell only supports floating point inputs, "
"but saw dtype: %s" % dtype)
| {
"content_hash": "0894722fc2884e7abc90ca238df1eed5",
"timestamp": "",
"source": "github",
"line_count": 1740,
"max_line_length": 82,
"avg_line_length": 39.08505747126437,
"alnum_prop": 0.6566139277732032,
"repo_name": "ghchinoy/tensorflow",
"id": "19b7bfa8d86172455517b0cba3998befd6b56cc8",
"size": "68697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/rnn_cell_impl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, Http404
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.views.generic import date_based
from django.conf import settings
from django.db.models import Q
from django.conf import settings
from swaps.models import Offer, Swap
from swaps.forms import OfferForm, ProposeSwapForm, ProposingOfferForm
if "notification" in settings.INSTALLED_APPS:
from notification import models as notification
else:
notification = None
try:
from threadedcomments.models import ThreadedComment
forums = True
except ImportError:
forums = False
@login_required
def offers(request, username=None):
offers = Offer.objects.filter(state=1).order_by("-offered_time")
return render_to_response("swaps/offers.html", {"offers": offers}, context_instance=RequestContext(request))
@login_required
def offer(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
#deletable = offer.is_deletable()
return render_to_response("swaps/offer.html", {
"offer": offer,
#"deletable": deletable,
}, context_instance=RequestContext(request))
@login_required
def your_offers(request):
user = request.user
offers = Offer.objects.filter(offerer=user).order_by("-offered_time")
return render_to_response("swaps/your_offers.html", {"offers": offers}, context_instance=RequestContext(request))
@login_required
def swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
return render_to_response("swaps/swap.html", {
"swap": swap,
}, context_instance=RequestContext(request))
@login_required
def proposed_by_you(request):
swaps = Swap.objects.filter(proposing_offer__offerer=request.user, state=1).order_by("-proposed_time")
return render_to_response("swaps/proposed_by_you.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def proposed_to_you(request):
swaps = Swap.objects.filter(responding_offer__offerer=request.user, state=1).order_by("-proposed_time")
return render_to_response("swaps/proposed_to_you.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def accepted_swaps(request):
swaps = Swap.objects.filter(
Q(state=2, proposing_offer__offerer=request.user) |
Q(state=2, responding_offer__offerer=request.user)).order_by("-accepted_time")
return render_to_response("swaps/accepted.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def dead_swaps(request):
swaps = Swap.objects.filter(
Q(state__gt=3, proposing_offer__offerer=request.user) |
Q(state__gt=3, responding_offer__offerer=request.user)).order_by("-killed_time")
return render_to_response("swaps/dead.html", {"swaps": swaps}, context_instance=RequestContext(request))
@login_required
def new(request):
def is_valid(formsets):
valid = True
for name, formset in formsets:
valid = valid and formset.is_valid()
return valid
formsets = []
if request.method == "POST":
if request.POST["action"] == "create":
for name, formset_class in OfferForm.inlines:
formsets+=[(name, formset_class(request.POST, request.FILES)),]
offer_form = OfferForm(request.POST, request.FILES)
formsets_valid = is_valid(formsets)
offer_form.inlines = formsets
if offer_form.is_valid() and formsets_valid:
offer = offer_form.save(commit=False)
offer.offerer = request.user
offer.save()
for name, formset_class in OfferForm.inlines:
f = formset_class(request.POST, request.FILES, instance=offer)
if f.is_valid():
f.save()
request.user.message_set.create(message=_("Successfully saved offer '%s'") % offer.short_description)
#if notification:
# if friends: # @@@ might be worth having a shortcut for sending to all friends
# notification.send((x['friend'] for x in Friendship.objects.friends_for_user(offer.offerer)), "offer_friend_post", {"post": blog})
return HttpResponseRedirect(reverse("offer_list_yours"))
else:
offer_form = OfferForm()
else:
offer_form = OfferForm()
return render_to_response("swaps/new_offer.html", {
"offer_form": offer_form
}, context_instance=RequestContext(request))
@login_required
def edit_offer(request, offer_id):
def is_valid(formsets):
valid = True
for name, formset in formsets:
valid = valid and formset.is_valid()
return valid
offer = get_object_or_404(Offer, id=offer_id)
if offer.offerer != request.user:
request.user.message_set.create(message="You cannot edit offers that are not yours")
return HttpResponseRedirect(reverse("offer_list_yours"))
return_to = request.GET['returnto']
formsets = []
if request.method == "POST":
if request.POST["action"] == "update":
offer_form = OfferForm(request.POST, instance=offer)
for name, formset_class in OfferForm.inlines:
formsets+=[(name, formset_class(request.POST, request.FILES, instance=offer)),]
formsets_valid = is_valid(formsets)
offer_form.inlines = formsets
if offer_form.is_valid() and formsets_valid:
offer = offer_form.save(commit=False)
offer.save()
for name, formset_class in OfferForm.inlines:
f = formset_class(request.POST, request.FILES, instance=offer)
if f.is_valid():
f.save()
if notification:
for swap in offer.proposed_swaps.filter(state=1):
notification.send([swap.responding_offer.offerer,], "swaps_proposing_offer_changed",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
for swap in offer.responding_swaps.filter(state=1):
notification.send([swap.proposing_offer.offerer,], "swaps_responding_offer_changed",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
request.user.message_set.create(message=_("Successfully updated offer '%s'") % offer.short_description)
return HttpResponseRedirect(reverse(return_to))
else:
offer_form = OfferForm(instance=offer)
for name, formset_class in OfferForm.inlines:
formsets+=[(name, formset_class(None, None, instance=offer)),]
else:
offer_form = OfferForm(instance=offer)
for name, formset_class in OfferForm.inlines:
formsets+=[(name, formset_class(None, None, instance=offer)),]
offer_form.inlines = formsets
return render_to_response("swaps/edit_offer.html", {
"offer_form": offer_form,
"offer": offer,
}, context_instance=RequestContext(request))
@login_required
def delete_offer(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
if offer.offerer != request.user:
request.user.message_set.create(message="You cannot delete offers that are not yours")
return HttpResponseRedirect(reverse("offer_list_yours"))
if request.method == "POST":
offer.delete()
return HttpResponseRedirect(reverse("offer_list_yours"))
@login_required
def cancel_offer(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
if offer.offerer != request.user:
request.user.message_set.create(message="You cannot cancel offers that are not yours")
return HttpResponseRedirect(reverse("offer_list_yours"))
if request.method == "POST":
offer.cancel()
return HttpResponseRedirect(reverse("offer_list_yours"))
@login_required
def propose_swap(request, offer_id):
offer = get_object_or_404(Offer, id=offer_id)
if request.method == "POST":
swap_form = ProposeSwapForm(request.POST)
offer_form = ProposingOfferForm(request.POST)
swap = None
if swap_form.is_valid():
swap = swap_form.save(commit=False)
swap.responding_offer = offer
swap.save()
if offer_form.is_valid():
proposing_offer = offer_form.save(commit=False)
proposing_offer.offerer = request.user
proposing_offer.save()
swap = Swap(
proposing_offer=proposing_offer,
responding_offer=offer)
swap.save()
if swap:
if notification:
notification.send([offer.offerer,], "swaps_proposal",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("proposed_by_you"))
else:
swap_form = ProposeSwapForm()
swap_form.fields['proposing_offer'].queryset = Offer.objects.filter(offerer=request.user, state=1)
offer_form = ProposingOfferForm()
return render_to_response("swaps/propose_swap.html", {
"offer": offer,
"swap_form": swap_form,
"offer_form": offer_form,
}, context_instance=RequestContext(request))
@login_required
def accept_swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
swap.accept()
swap.save()
if notification:
notification.send([swap.proposing_offer.offerer,], "swaps_acceptance",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("accepted_swaps"))
@login_required
def reject_swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
swap.reject()
swap.save()
if notification:
notification.send([swap.proposing_offer.offerer,], "swaps_rejection",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("dead_swaps"))
@login_required
def cancel_swap(request, swap_id):
swap = get_object_or_404(Swap, id=swap_id)
swap.cancel()
swap.save()
if notification:
notification.send([swap.responding_offer.offerer,], "swaps_cancellation",
{"creator": request.user,
"swap": swap,
"proposing_offer": swap.proposing_offer,
"responding_offer": swap.responding_offer})
return HttpResponseRedirect(reverse("dead_swaps"))
| {
"content_hash": "574e5de5ffb923a0be7d653d03025125",
"timestamp": "",
"source": "github",
"line_count": 274,
"max_line_length": 154,
"avg_line_length": 41.76642335766423,
"alnum_prop": 0.6327333100314575,
"repo_name": "andriibekker/django-swaps",
"id": "bc6d1d79b5cc8a4c6f41d813bb7c6f45f0deede3",
"size": "11444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "swaps/views.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""
Echo client and server examples.
Support for:
- U{Twisted<http://twistedmatrix.com>}
- U{WSGI<http://wsgi.org>}
- U{Django<http://djangoproject.com>}
- U{WASD<http://wasd.vsm.com.au/WASD/>}
You can use this example with the echo_test.swf client on the
U{EchoTest<http://pyamf.org/wiki/EchoTest>} wiki page.
@since: 0.1
"""
import os, os.path
import logging
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(levelname)-5.5s [%(name)s] %(message)s'
)
def run_wsgi_server(name, options, services):
"""
Runs the AMF services using the
L{WSGIGateway<pyamf.remoting.gateway.wsgi.WSGIGateway>}.
@param options: Commandline options.
@type options: C{dict}
@param services: List of services for the Flash gateway.
@type services: C{dict}
@return: The function that will run the server.
@rtype: C{callable}
"""
from pyamf.remoting.gateway.wsgi import WSGIGateway
from wsgiref import simple_server
host = options.host
port = int(options.port)
gw = WSGIGateway(services, logger=logging)
httpd = simple_server.WSGIServer(
(host, port),
simple_server.WSGIRequestHandler,
)
def app(environ, start_response):
if environ['PATH_INFO'] == '/crossdomain.xml':
fn = os.path.join(os.getcwd(), os.path.dirname(__file__),
'crossdomain.xml')
fp = open(fn, 'rt')
buffer = fp.readlines()
fp.close()
start_response('200 OK', [
('Content-Type', 'application/xml'),
('Content-Length', str(len(''.join(buffer))))
])
return buffer
return gw(environ, start_response)
httpd.set_app(app)
print "Started %s - WSGI Server on http://%s:%d" % (name, host, port)
return httpd.serve_forever
def run_django_server(name, options, services):
"""
Runs the AMF services using the
L{DjangoGateway<pyamf.remoting.gateway.django.DjangoGateway>}.
Essentially, we use the C{django.core.handlers.wsgi} to
run the base server and pass the requests to the Django handler.
@param options: Commandline options.
@type options: C{dict}
@param services: List of services for the Flash gateway.
@type services: C{dict}
@return: The function that will run the server.
@rtype: C{callable}
"""
import os
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'echo.settings'
import subprocess
host = options.host
port = int(options.port)
address = '%s:%d' % (host, port)
path = os.path.join('echo', 'manage.py')
print "Started %s - Django Server on http://%s:%d" % (name, host, port)
return subprocess.call('python %s runserver %s' % (path, address), shell=True)
def run_twisted_server(name, options, services):
"""
Runs the AMF services using the
L{TwistedGateway<pyamf.remoting.gateway.twisted.TwistedGateway>}.
@param options: Commandline options
@type options: C{dict}
@param services: List of services for the Flash gateway.
@type services: C{dict}
@return: The function that will run the server.
@rtype: C{callable}
"""
from twisted.internet import reactor
from twisted.web import server, static, resource
from pyamf.remoting.gateway.twisted import TwistedGateway
host = options.host
port = int(options.port)
path = options.path
gw = TwistedGateway(services, expose_request=False, logger=logging)
root = resource.Resource()
root.putChild('', gw)
root.putChild('/'+path, gw)
root.putChild('crossdomain.xml', static.File(os.path.join(
os.getcwd(), os.path.dirname(__file__), 'crossdomain.xml'),
defaultType='application/xml'))
print "Started %s - Twisted Server on http://%s:%d/%s" % (name, host, port, path)
reactor.listenTCP(port, server.Site(root), 50, host)
return reactor.run
def run_wasd_server(name, options, services):
"""
Runs the AMF services using the
L{WSGIGateway<pyamf.remoting.wsgigateway.WSGIGateway>}.
@param options: Commandline options.
@type options: C{dict}
@param services: List of services for the Flash gateway.
@type services: C{dict}
@return: The function that will run the server.
@rtype: C{callable}
"""
from pyamf.remoting.gateway.wsgi import WSGIGateway
import wasd
gw = WSGIGateway(services, logger=logging)
def app(environ, start_response):
if environ['PATH_INFO'] == '/crossdomain.xml':
fn = os.path.join(os.getcwd(), os.path.dirname(__file__),
'crossdomain.xml')
fp = open(fn, 'rt')
buffer = fp.readlines()
fp.close()
start_response('200 OK', [
('Content-Type', 'application/xml'),
('Content-Length', str(len(''.join(buffer))))
])
return buffer
return gw(environ, start_response)
def serve_forever():
while wasd.cgiplus_begin():
wasd.wsgi_run(app)
return serve_forever
def run_server(name, options, services):
"""
Starts the echo AMF server.
@param options: Commandline options.
@type options: C{dict}
@param services: List of services for the Flash gateway.
@type services: C{dict}
"""
if options.type == 'wsgi':
func = run_wsgi_server(name, options, services)
elif options.type == 'twisted':
func = run_twisted_server(name, options, services)
elif options.type == 'django':
func = run_django_server(name, options, services)
elif options.type == 'wasd':
func = run_wasd_server(name, options, services)
import pyamf
print 'using PyAMF from %s' % (pyamf.__file__,)
try:
func()
except KeyboardInterrupt:
pass
def new_httplib_client(name, options, service):
"""
Runs AMF services for a C{httplib} echo client.
@param options: Commandline options
@type options: C{dict}
@param service: Target service path on the AMF gateway.
@type service: C{str}
@return: The function that will run the client.
@rtype: C{callable}
"""
from pyamf.remoting.client import RemotingService
host = options.host
port = int(options.port)
path = options.path
url = "http://%s:%d/%s" % (host, port, path)
print "Started %s - httplib Client for %s" % (name, url)
gateway = RemotingService(url, logger=logging)
echo_service = gateway.getService(service)
return echo_service
def new_client(name, options, service):
"""
Starts the echo AMF client.
@param name: Name of the example.
@type name: C{str}
@param options: Commandline options.
@type options: C{dict}
@param service: Target servicepath on the AMF gateway.
@type service: C{str}
"""
return new_httplib_client(name, options, service)
def parse_args(args):
"""
Parse commandline options.
"""
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-t', '--type', dest='type',
choices=('wsgi', 'twisted', 'django', 'wasd'), default='wsgi',
help='Determines which AMF gateway type to use')
parser.add_option('--host', dest='host', default='localhost',
help='The host address for the AMF gateway')
parser.add_option('-p', '--port', dest='port', default=8000,
help='The port number the server uses')
parser.add_option('-l', '--path', dest='path', default='',
help='The gateway path')
parser.add_option('-s', '--service', dest='service', default='echo',
help='The remote service name')
return parser.parse_args(args)
| {
"content_hash": "14de8e3a4210299b1bb2ad09edfa78fb",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 85,
"avg_line_length": 29.625,
"alnum_prop": 0.6256233218258535,
"repo_name": "ethankennerly/hotel-vs-gozilla",
"id": "e2bd9f2f516a380a2a750bb0f862eca2c9d86ba1",
"size": "7898",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyamf_util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "203672"
},
{
"name": "JavaScript",
"bytes": "10251"
},
{
"name": "Python",
"bytes": "3659094"
},
{
"name": "Shell",
"bytes": "1149"
}
],
"symlink_target": ""
} |
import sys, time, copy
import numpy as np
from threading import RLock
import roslib; roslib.load_manifest('hrl_dynamic_mpc')
roslib.load_manifest('pr2_controllers_msgs')
import rospy
from geometry_msgs.msg import Wrench, Twist
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from pr2_controllers_msgs.msg import JointTrajectoryControllerState
from hrl_lib import transforms as tr
import darci_arm_kinematics as dak
##
#Class DarciSimClient()
#gives interface in python for controller to be similar to other MPC stuff we've done on Cody,
#also exposes kinematics, IK, joint limits and the ability to recorde robot data.
#
class DarciSimClient():
def __init__(self, arm = 'l', record_data = False):
self.arm = arm
if arm == 'r':
print "using right arm on Darci"
self.RIGHT_ARM = 0 # i don't think this was used anywhere
else:
self.LEFT_ARM = 1
# print "Left and both arms not implemented in this client yet ... \n"
# print "will require writing different function calls since joint data for left and right arm come in together from the meka server"
# sys.exit()
self.kinematics = dak.DarciArmKinematics(arm)
self.joint_pub = rospy.Publisher('/'+self.arm+'_arm_controller/command', JointTrajectory)
self.joint_names = None
self.lock = RLock()
self.joint_angles = None
self.joint_velocities = None
self.J_h = None
self.time = None
self.desired_joint_angles = None
self.stiffness_percent = 0.75
self.ee_force = None
self.ee_torque = None
self.skins = None #will need code for all of these skin interfaces
self.Jc_l = []
self.n_l = []
self.values_l = []
#values from m3gtt repository in robot_config folder
# These could be read in from a yaml file like this (import yaml; stream = open("FILE_NAME_HERE", 'r'); data = yaml.load(stream))
# However, not clear if absolute path to yaml file (possibly on another computer) is better then just defining it here
# The downside is obviously if someone tunes gains differently on the robot.
self.joint_stiffness = (np.array([1, 1, 1, 1, 0.06, 0.08, 0.08])*180/np.pi*self.stiffness_percent).tolist()
self.joint_damping = (np.array([0.06, 0.1, 0.015, 0.015, 0.0015, 0.002, 0.002])*180/np.pi*self.stiffness_percent).tolist()
self.record_data = record_data
if self.record_data:
from collections import deque
self.q_record = deque()
self.qd_record = deque()
self.times = deque()
self.state_sub = rospy.Subscriber('/'+self.arm+'_arm_controller/state', JointTrajectoryControllerState, self.robotStateCallback)
rospy.sleep(1.0)
while self.joint_angles is None:
rospy.sleep(0.05)
self.desired_joint_angles = copy.copy(self.joint_angles)
self.joint_cmd = JointTrajectory()
self.joint_cmd.header.stamp = rospy.Time.now()
self.joint_cmd.header.frame_id = '/torso_lift_link'
self.joint_cmd.joint_names = self.joint_names
jtp = JointTrajectoryPoint()
jtp.positions = self.desired_joint_angles
jtp.velocities = [1.]*len(self.joint_names)
self.joint_cmd.points = [jtp]
self.joint_pub.publish(self.joint_cmd)
def robotStateCallback(self, msg):
self.lock.acquire()
if self.joint_names is None:
self.joint_names = copy.copy(msg.joint_names)
self.joint_angles = copy.copy(msg.actual.positions)
self.joint_velocities = copy.copy(msg.actual.velocities)
self.time = msg.header.stamp.secs + msg.header.stamp.nsecs*(1e-9)
self.lock.release()
def updateHapticState(self):
pos, rot = self.kinematics.FK(self.joint_angles)
self.end_effector_position = pos
self.end_effector_orient_quat = tr.matrix_to_quaternion(rot)
self.J_h = self.kinematics.Jacobian(self.joint_angles)
def updateSendCmd(self):
self.joint_cmd.points[0].positions = np.array(self.desired_joint_angles, dtype=np.float32).tolist()
self.joint_pub.publish(self.joint_cmd)
def addDeltaToDesiredJointAngles(self, delta_angles):
self.desired_joint_angles = (np.array(self.desired_joint_angles) + np.array(delta_angles)).tolist()
def setDesiredJointAngles(self, angles):
self.desired_joint_angles = angles
def getDesiredJointAngles(self):
return self.desired_joint_angles
def getJointAngles(self):
return self.joint_angles
def get_joint_angles(self):
return self.getJointAngles()
def getJointVelocities(self):
return self.joint_velocities
def recordCurData(self):
if self.record_data:
self.q_record.append(copy.copy(self.joint_angles))
self.qd_record.append(copy.copy(self.joint_velocities))
self.times.append(copy.copy(self.time))
else:
print "you didn't pass in the right flags to record robot data ... exiting"
sys.exit()
def getRecordedData(self):
return self.q_record, self.qd_record, self.times
def run(self):
rate = rospy.Rate(50)
while not rospy.is_shutdown():
self.updateHapticState()
self.updateSendCmd()
if self.record_data == True:
self.q_record.append(copy.copy(self.joint_angles))
self.qd_record.append(copy.copy(self.joint_velocities))
self.times.append(copy.copy(self.time))
rate.sleep()
if self.record_data:
import scipy.io as io
data = {'q':self.q_record,
'qd':self.qd_record,
'times':self.times}
io.savemat('./darci_dynamics_data.mat', data)
if __name__ == '__main__':
# this was for testing the values from the joints
# however, it gives a good example of the different data and functions available to command the arm.
rospy.init_node( 'move_arm_node', anonymous = True )
darci = DarciSimClient(arm='l')
rospy.sleep(5)
inp = None
while inp != 'q':
#sending simple command to joints
angles = [0.0]*7
#angles[3] = np.pi/2
# uploading command to arms through ROS
darci.setDesiredJointAngles(angles)
darci.updateSendCmd()
# updating end effector position and orientation
darci.updateHapticState()
joint_angles = darci.joint_angles
inp = raw_input('q for quit, otherwise continue: \n')
#getting joint info after test
darci.updateHapticState()
joint_angles_new = darci.joint_angles
# going back to home position before quitting
darci.setDesiredJointAngles([0]*7)
darci.updateSendCmd()
| {
"content_hash": "71522bf58402955d92cdcfbb9992a978",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 145,
"avg_line_length": 37.84699453551912,
"alnum_prop": 0.6368755414380595,
"repo_name": "gt-ros-pkg/hrl-haptic-manip",
"id": "916b4733516e8f188221a34528abba16755dbe6b",
"size": "8663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hrl_dynamic_mpc/src/darci_sim_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "4487"
},
{
"name": "C",
"bytes": "5856683"
},
{
"name": "C++",
"bytes": "244682"
},
{
"name": "CSS",
"bytes": "20048"
},
{
"name": "JavaScript",
"bytes": "37153"
},
{
"name": "M",
"bytes": "35042"
},
{
"name": "Matlab",
"bytes": "179536"
},
{
"name": "Objective-C",
"bytes": "68"
},
{
"name": "Perl",
"bytes": "2517"
},
{
"name": "Python",
"bytes": "1184223"
},
{
"name": "Shell",
"bytes": "1887"
},
{
"name": "TeX",
"bytes": "237004"
}
],
"symlink_target": ""
} |
import StringIO
import webob
import glance.api.v2.image_data
from glance.common import exception
from glance.openstack.common import uuidutils
from glance.tests.unit import base
import glance.tests.unit.utils as unit_test_utils
import glance.tests.utils as test_utils
class Raise(object):
def __init__(self, exc):
self.exc = exc
def __call__(self, *args, **kwargs):
raise self.exc
class FakeImage(object):
def __init__(self, image_id=None, data=None, checksum=None, size=0,
locations=None, container_format='bear', disk_format='rawr',
status=None):
self.image_id = image_id
self.data = data
self.checksum = checksum
self.size = size
self.locations = locations
self.container_format = container_format
self.disk_format = disk_format
self._status = status
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if isinstance(self._status, BaseException):
raise self._status
else:
self._status = value
def get_data(self):
return self.data
def set_data(self, data, size=None):
self.data = ''.join(data)
self.size = size
self.status = 'modified-by-fake'
class FakeImageRepo(object):
def __init__(self, result=None):
self.result = result
def get(self, image_id):
if isinstance(self.result, BaseException):
raise self.result
else:
return self.result
def save(self, image):
self.saved_image = image
class FakeGateway(object):
def __init__(self, repo):
self.repo = repo
def get_repo(self, context):
return self.repo
class TestImagesController(base.StoreClearingUnitTest):
def setUp(self):
super(TestImagesController, self).setUp()
self.config(verbose=True, debug=True)
self.image_repo = FakeImageRepo()
self.gateway = FakeGateway(self.image_repo)
self.controller = glance.api.v2.image_data.ImageDataController(
gateway=self.gateway)
def test_download(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd', locations=['http://example.com/image'])
self.image_repo.result = image
image = self.controller.download(request, unit_test_utils.UUID1)
self.assertEqual(image.image_id, 'abcd')
def test_download_no_location(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = FakeImage('abcd')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.download,
request, unit_test_utils.UUID2)
def test_download_non_existent_image(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.NotFound()
self.assertRaises(webob.exc.HTTPNotFound, self.controller.download,
request, uuidutils.generate_uuid())
def test_download_forbidden(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.Forbidden()
self.assertRaises(webob.exc.HTTPForbidden, self.controller.download,
request, uuidutils.generate_uuid())
def test_upload(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
self.image_repo.result = image
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
self.assertEqual(image.data, 'YYYY')
self.assertEqual(image.size, 4)
def test_upload_status(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
self.image_repo.result = image
insurance = {'called': False}
def read_data():
insurance['called'] = True
self.assertEqual(self.image_repo.saved_image.status, 'saving')
yield 'YYYY'
self.controller.upload(request, unit_test_utils.UUID2,
read_data(), None)
self.assertTrue(insurance['called'])
self.assertEqual(self.image_repo.saved_image.status,
'modified-by-fake')
def test_upload_no_size(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
self.image_repo.result = image
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', None)
self.assertEqual(image.data, 'YYYY')
self.assertEqual(image.size, None)
def test_upload_invalid(self):
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd')
image.status = ValueError()
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.upload,
request, unit_test_utils.UUID1, 'YYYY', 4)
def test_upload_non_existent_image_during_save(self):
def fake_save(self):
raise exception.NotFound()
request = unit_test_utils.get_fake_request()
image = FakeImage('abcd', locations=['http://example.com/image'])
self.image_repo.result = image
self.image_repo.save = fake_save
self.assertRaises(webob.exc.HTTPGone, self.controller.upload,
request, uuidutils.generate_uuid(), 'ABC', 3)
def test_upload_non_existent_image_before_save(self):
request = unit_test_utils.get_fake_request()
self.image_repo.result = exception.NotFound()
self.assertRaises(webob.exc.HTTPNotFound, self.controller.upload,
request, uuidutils.generate_uuid(), 'ABC', 3)
def test_upload_data_exists(self):
request = unit_test_utils.get_fake_request()
image = FakeImage()
image.set_data = Raise(exception.Duplicate)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPConflict, self.controller.upload,
request, unit_test_utils.UUID1, 'YYYY', 4)
def test_upload_storage_full(self):
request = unit_test_utils.get_fake_request()
image = FakeImage()
image.set_data = Raise(exception.StorageFull)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPRequestEntityTooLarge,
self.controller.upload,
request, unit_test_utils.UUID2, 'YYYYYYY', 7)
def test_upload_storage_forbidden(self):
request = unit_test_utils.get_fake_request(user=unit_test_utils.USER2)
image = FakeImage()
image.set_data = Raise(exception.Forbidden)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPForbidden, self.controller.upload,
request, unit_test_utils.UUID2, 'YY', 2)
def test_upload_storage_write_denied(self):
request = unit_test_utils.get_fake_request(user=unit_test_utils.USER3)
image = FakeImage()
image.set_data = Raise(exception.StorageWriteDenied)
self.image_repo.result = image
self.assertRaises(webob.exc.HTTPServiceUnavailable,
self.controller.upload,
request, unit_test_utils.UUID2, 'YY', 2)
def _test_upload_download_prepare_notification(self):
request = unit_test_utils.get_fake_request()
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
output = self.controller.download(request, unit_test_utils.UUID2)
output_log = self.notifier.get_logs()
prepare_payload = output['meta'].copy()
prepare_payload['checksum'] = None
prepare_payload['size'] = None
prepare_payload['location'] = None
prepare_payload['status'] = 'queued'
del prepare_payload['updated_at']
prepare_log = {
'notification_type': "INFO",
'event_type': "image.prepare",
'payload': prepare_payload,
}
self.assertEqual(len(output_log), 3)
prepare_updated_at = output_log[0]['payload']['updated_at']
del output_log[0]['payload']['updated_at']
self.assertTrue(prepare_updated_at <= output['meta']['updated_at'])
self.assertEqual(output_log[0], prepare_log)
def _test_upload_download_upload_notification(self):
request = unit_test_utils.get_fake_request()
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
output = self.controller.download(request, unit_test_utils.UUID2)
output_log = self.notifier.get_logs()
upload_payload = output['meta'].copy()
upload_log = {
'notification_type': "INFO",
'event_type': "image.upload",
'payload': upload_payload,
}
self.assertEqual(len(output_log), 3)
self.assertEqual(output_log[1], upload_log)
def _test_upload_download_activate_notification(self):
request = unit_test_utils.get_fake_request()
self.controller.upload(request, unit_test_utils.UUID2, 'YYYY', 4)
output = self.controller.download(request, unit_test_utils.UUID2)
output_log = self.notifier.get_logs()
activate_payload = output['meta'].copy()
activate_log = {
'notification_type': "INFO",
'event_type': "image.activate",
'payload': activate_payload,
}
self.assertEqual(len(output_log), 3)
self.assertEqual(output_log[2], activate_log)
class TestImageDataDeserializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImageDataDeserializer, self).setUp()
self.deserializer = glance.api.v2.image_data.RequestDeserializer()
def test_upload(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
request.body = 'YYY'
request.headers['Content-Length'] = 3
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual(data.read(), 'YYY')
expected = {'size': 3}
self.assertEqual(expected, output)
def test_upload_chunked(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
# If we use body_file, webob assumes we want to do a chunked upload,
# ignoring the Content-Length header
request.body_file = StringIO.StringIO('YYY')
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual(data.read(), 'YYY')
expected = {'size': None}
self.assertEqual(expected, output)
def test_upload_chunked_with_content_length(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
request.body_file = StringIO.StringIO('YYY')
# The deserializer shouldn't care if the Content-Length is
# set when the user is attempting to send chunked data.
request.headers['Content-Length'] = 3
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual(data.read(), 'YYY')
expected = {'size': 3}
self.assertEqual(expected, output)
def test_upload_with_incorrect_content_length(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/octet-stream'
# The deserializer shouldn't care if the Content-Length and
# actual request body length differ. That job is left up
# to the controller
request.body = 'YYY'
request.headers['Content-Length'] = 4
output = self.deserializer.upload(request)
data = output.pop('data')
self.assertEqual(data.read(), 'YYY')
expected = {'size': 4}
self.assertEqual(expected, output)
def test_upload_wrong_content_type(self):
request = unit_test_utils.get_fake_request()
request.headers['Content-Type'] = 'application/json'
request.body = 'YYYYY'
self.assertRaises(webob.exc.HTTPUnsupportedMediaType,
self.deserializer.upload, request)
class TestImageDataSerializer(test_utils.BaseTestCase):
def setUp(self):
super(TestImageDataSerializer, self).setUp()
self.serializer = glance.api.v2.image_data.ResponseSerializer()
def test_download(self):
request = webob.Request.blank('/')
request.environ = {}
response = webob.Response()
response.request = request
image = FakeImage(size=3, data=iter('ZZZ'))
self.serializer.download(response, image)
self.assertEqual('ZZZ', response.body)
self.assertEqual('3', response.headers['Content-Length'])
self.assertFalse('Content-MD5' in response.headers)
self.assertEqual('application/octet-stream',
response.headers['Content-Type'])
def test_download_with_checksum(self):
request = webob.Request.blank('/')
request.environ = {}
response = webob.Response()
response.request = request
checksum = '0745064918b49693cca64d6b6a13d28a'
image = FakeImage(size=3, checksum=checksum, data=iter('ZZZ'))
self.serializer.download(response, image)
self.assertEqual('ZZZ', response.body)
self.assertEqual('3', response.headers['Content-Length'])
self.assertEqual(checksum, response.headers['Content-MD5'])
self.assertEqual('application/octet-stream',
response.headers['Content-Type'])
def test_upload(self):
request = webob.Request.blank('/')
request.environ = {}
response = webob.Response()
response.request = request
self.serializer.upload(response, {})
self.assertEqual(204, response.status_int)
self.assertEqual('0', response.headers['Content-Length'])
| {
"content_hash": "796067da2bc96aecbcc3e1195d4a67c3",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 78,
"avg_line_length": 39.19553072625698,
"alnum_prop": 0.6246436716077537,
"repo_name": "citrix-openstack-build/glance",
"id": "c84b984ad94ee0f88e1955f2d58f0a740a8a1d54",
"size": "14669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glance/tests/unit/v2/test_image_data_resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2464002"
},
{
"name": "Shell",
"bytes": "3488"
}
],
"symlink_target": ""
} |
from core.nn.neuralnet import *
from core.rl.portfolio import evalLinUCB
from core.rl.policyGradient import policyGradient
from core.util.data import StockHistory
import sys
def main() :
if len(sys.argv) < 2 :
usage()
method = sys.argv[1]
args = sys.argv[2:]
argDict = {arg.split('=')[0]:arg.split('=')[1] for arg in args}
if method == 'nn' :
trainNN(**argDict)
elif method == 'retrainNN' :
testNN(**argDict)
elif method == 'testNN' :
testNN(loadNN(**argDict))
elif method == 'rl' :
# number of stocks to choose, test set percentage, starting money amount
evalLinUCB(None, None, **argDict)
elif method == 'pg' :
policyGradient(**argDict)
def usage() :
print "That's not how you use this script!"
sys.exit(0)
if __name__ =='__main__':main()
| {
"content_hash": "4a67a21f19a73049b5aa97579bf2005a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 80,
"avg_line_length": 29.24137931034483,
"alnum_prop": 0.6084905660377359,
"repo_name": "hassaanm/stock-trading",
"id": "3b10775285ec6f989ee4c6b2e498b3cde84be7cd",
"size": "873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "822"
},
{
"name": "C++",
"bytes": "18096"
},
{
"name": "JavaScript",
"bytes": "19227"
},
{
"name": "Perl",
"bytes": "1924"
},
{
"name": "Python",
"bytes": "2461668"
},
{
"name": "Shell",
"bytes": "3384"
}
],
"symlink_target": ""
} |
"""Generated client library for deploymentmanager version v2."""
# NOTE: This file is autogenerated and should not be edited by hand.
from googlecloudsdk.third_party.apitools.base.py import base_api
from googlecloudsdk.third_party.apis.deploymentmanager.v2 import deploymentmanager_v2_messages as messages
class DeploymentmanagerV2(base_api.BaseApiClient):
"""Generated client library for service deploymentmanager version v2."""
MESSAGES_MODULE = messages
_PACKAGE = u'deploymentmanager'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloud-platform.read-only', u'https://www.googleapis.com/auth/ndev.cloudman', u'https://www.googleapis.com/auth/ndev.cloudman.readonly']
_VERSION = u'v2'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'DeploymentmanagerV2'
_URL_VERSION = u'v2'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new deploymentmanager handle."""
url = url or u'https://www.googleapis.com/deploymentmanager/v2/'
super(DeploymentmanagerV2, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.deployments = self.DeploymentsService(self)
self.manifests = self.ManifestsService(self)
self.operations = self.OperationsService(self)
self.resources = self.ResourcesService(self)
self.types = self.TypesService(self)
class DeploymentsService(base_api.BaseApiService):
"""Service class for the deployments resource."""
_NAME = u'deployments'
def __init__(self, client):
super(DeploymentmanagerV2.DeploymentsService, self).__init__(client)
self._method_configs = {
'CancelPreview': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'deploymentmanager.deployments.cancelPreview',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/deployments/{deployment}/cancelPreview',
request_field=u'deploymentsCancelPreviewRequest',
request_type_name=u'DeploymentmanagerDeploymentsCancelPreviewRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Delete': base_api.ApiMethodInfo(
http_method=u'DELETE',
method_id=u'deploymentmanager.deployments.delete',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/deployments/{deployment}',
request_field='',
request_type_name=u'DeploymentmanagerDeploymentsDeleteRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.deployments.get',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/deployments/{deployment}',
request_field='',
request_type_name=u'DeploymentmanagerDeploymentsGetRequest',
response_type_name=u'Deployment',
supports_download=False,
),
'Insert': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'deploymentmanager.deployments.insert',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'preview'],
relative_path=u'projects/{project}/global/deployments',
request_field=u'deployment',
request_type_name=u'DeploymentmanagerDeploymentsInsertRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.deployments.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/deployments',
request_field='',
request_type_name=u'DeploymentmanagerDeploymentsListRequest',
response_type_name=u'DeploymentsListResponse',
supports_download=False,
),
'Patch': base_api.ApiMethodInfo(
http_method=u'PATCH',
method_id=u'deploymentmanager.deployments.patch',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[u'createPolicy', u'deletePolicy', u'preview'],
relative_path=u'projects/{project}/global/deployments/{deployment}',
request_field=u'deploymentResource',
request_type_name=u'DeploymentmanagerDeploymentsPatchRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Stop': base_api.ApiMethodInfo(
http_method=u'POST',
method_id=u'deploymentmanager.deployments.stop',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/deployments/{deployment}/stop',
request_field=u'deploymentsStopRequest',
request_type_name=u'DeploymentmanagerDeploymentsStopRequest',
response_type_name=u'Operation',
supports_download=False,
),
'Update': base_api.ApiMethodInfo(
http_method=u'PUT',
method_id=u'deploymentmanager.deployments.update',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[u'createPolicy', u'deletePolicy', u'preview'],
relative_path=u'projects/{project}/global/deployments/{deployment}',
request_field=u'deploymentResource',
request_type_name=u'DeploymentmanagerDeploymentsUpdateRequest',
response_type_name=u'Operation',
supports_download=False,
),
}
self._upload_configs = {
}
def CancelPreview(self, request, global_params=None):
"""Cancels and removes the preview currently associated with the deployment.
Args:
request: (DeploymentmanagerDeploymentsCancelPreviewRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('CancelPreview')
return self._RunMethod(
config, request, global_params=global_params)
def Delete(self, request, global_params=None):
"""Deletes a deployment and all of the resources in the deployment.
Args:
request: (DeploymentmanagerDeploymentsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
def Get(self, request, global_params=None):
"""Gets information about a specific deployment.
Args:
request: (DeploymentmanagerDeploymentsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Deployment) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def Insert(self, request, global_params=None):
"""Creates a deployment and all of the resources described by the deployment manifest.
Args:
request: (DeploymentmanagerDeploymentsInsertRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Insert')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all deployments for a given project.
Args:
request: (DeploymentmanagerDeploymentsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeploymentsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
def Patch(self, request, global_params=None):
"""Updates a deployment and all of the resources described by the deployment manifest. This method supports patch semantics.
Args:
request: (DeploymentmanagerDeploymentsPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
def Stop(self, request, global_params=None):
"""Stops an ongoing operation. This does not roll back any work that has already been completed, but prevents any new work from being started.
Args:
request: (DeploymentmanagerDeploymentsStopRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Stop')
return self._RunMethod(
config, request, global_params=global_params)
def Update(self, request, global_params=None):
"""Updates a deployment and all of the resources described by the deployment manifest.
Args:
request: (DeploymentmanagerDeploymentsUpdateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Update')
return self._RunMethod(
config, request, global_params=global_params)
class ManifestsService(base_api.BaseApiService):
"""Service class for the manifests resource."""
_NAME = u'manifests'
def __init__(self, client):
super(DeploymentmanagerV2.ManifestsService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.manifests.get',
ordered_params=[u'project', u'deployment', u'manifest'],
path_params=[u'deployment', u'manifest', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/deployments/{deployment}/manifests/{manifest}',
request_field='',
request_type_name=u'DeploymentmanagerManifestsGetRequest',
response_type_name=u'Manifest',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.manifests.list',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/deployments/{deployment}/manifests',
request_field='',
request_type_name=u'DeploymentmanagerManifestsListRequest',
response_type_name=u'ManifestsListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Gets information about a specific manifest.
Args:
request: (DeploymentmanagerManifestsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Manifest) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all manifests for a given deployment.
Args:
request: (DeploymentmanagerManifestsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ManifestsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class OperationsService(base_api.BaseApiService):
"""Service class for the operations resource."""
_NAME = u'operations'
def __init__(self, client):
super(DeploymentmanagerV2.OperationsService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.operations.get',
ordered_params=[u'project', u'operation'],
path_params=[u'operation', u'project'],
query_params=[],
relative_path=u'projects/{project}/global/operations/{operation}',
request_field='',
request_type_name=u'DeploymentmanagerOperationsGetRequest',
response_type_name=u'Operation',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.operations.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/operations',
request_field='',
request_type_name=u'DeploymentmanagerOperationsListRequest',
response_type_name=u'OperationsListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Gets information about a specific operation.
Args:
request: (DeploymentmanagerOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all operations for a project.
Args:
request: (DeploymentmanagerOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(OperationsListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class ResourcesService(base_api.BaseApiService):
"""Service class for the resources resource."""
_NAME = u'resources'
def __init__(self, client):
super(DeploymentmanagerV2.ResourcesService, self).__init__(client)
self._method_configs = {
'Get': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.resources.get',
ordered_params=[u'project', u'deployment', u'resource'],
path_params=[u'deployment', u'project', u'resource'],
query_params=[],
relative_path=u'projects/{project}/global/deployments/{deployment}/resources/{resource}',
request_field='',
request_type_name=u'DeploymentmanagerResourcesGetRequest',
response_type_name=u'Resource',
supports_download=False,
),
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.resources.list',
ordered_params=[u'project', u'deployment'],
path_params=[u'deployment', u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/deployments/{deployment}/resources',
request_field='',
request_type_name=u'DeploymentmanagerResourcesListRequest',
response_type_name=u'ResourcesListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def Get(self, request, global_params=None):
"""Gets information about a single resource.
Args:
request: (DeploymentmanagerResourcesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Resource) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
def List(self, request, global_params=None):
"""Lists all resources in a given deployment.
Args:
request: (DeploymentmanagerResourcesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ResourcesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
class TypesService(base_api.BaseApiService):
"""Service class for the types resource."""
_NAME = u'types'
def __init__(self, client):
super(DeploymentmanagerV2.TypesService, self).__init__(client)
self._method_configs = {
'List': base_api.ApiMethodInfo(
http_method=u'GET',
method_id=u'deploymentmanager.types.list',
ordered_params=[u'project'],
path_params=[u'project'],
query_params=[u'filter', u'maxResults', u'pageToken'],
relative_path=u'projects/{project}/global/types',
request_field='',
request_type_name=u'DeploymentmanagerTypesListRequest',
response_type_name=u'TypesListResponse',
supports_download=False,
),
}
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists all resource types for Deployment Manager.
Args:
request: (DeploymentmanagerTypesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TypesListResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
| {
"content_hash": "055790a429f4c517d02d721b5b6673b8",
"timestamp": "",
"source": "github",
"line_count": 480,
"max_line_length": 233,
"avg_line_length": 41.6375,
"alnum_prop": 0.6297408185730011,
"repo_name": "flgiordano/netcash",
"id": "934d5d856a60c27b120eb9164655f11133e6412b",
"size": "19986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "+/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/deploymentmanager/v2/deploymentmanager_v2_client.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "33831"
},
{
"name": "JavaScript",
"bytes": "13859"
},
{
"name": "Shell",
"bytes": "2716"
}
],
"symlink_target": ""
} |
"""
This module provides a class :class:`MockTarget`, an implementation of :py:class:`~luigi.target.Target`.
:class:`MockTarget` contains all data in-memory.
The main purpose is unit testing workflows without writing to disk.
"""
import multiprocessing
from io import BytesIO
import sys
from luigi import six
from luigi import target
from luigi.format import get_default_format, MixedUnicodeBytes
class MockFileSystem(target.FileSystem):
"""
MockFileSystem inspects/modifies _data to simulate file system operations.
"""
_data = None
def copy(self, path, dest, raise_if_exists=False):
"""
Copies the contents of a single file path to dest
"""
if raise_if_exists and dest in self.get_all_data():
raise RuntimeError('Destination exists: %s' % path)
contents = self.get_all_data()[path]
self.get_all_data()[dest] = contents
def get_all_data(self):
# This starts a server in the background, so we don't want to do it in the global scope
if MockFileSystem._data is None:
MockFileSystem._data = multiprocessing.Manager().dict()
return MockFileSystem._data
def get_data(self, fn):
return self.get_all_data()[fn]
def exists(self, path):
return MockTarget(path).exists()
def remove(self, path, recursive=True, skip_trash=True):
"""
Removes the given mockfile. skip_trash doesn't have any meaning.
"""
if recursive:
to_delete = []
for s in self.get_all_data().keys():
if s.startswith(path):
to_delete.append(s)
for s in to_delete:
self.get_all_data().pop(s)
else:
self.get_all_data().pop(path)
def move(self, path, dest, raise_if_exists=False):
"""
Moves a single file from path to dest
"""
if raise_if_exists and dest in self.get_all_data():
raise RuntimeError('Destination exists: %s' % path)
contents = self.get_all_data().pop(path)
self.get_all_data()[dest] = contents
def listdir(self, path):
"""
listdir does a prefix match of self.get_all_data(), but doesn't yet support globs.
"""
return [s for s in self.get_all_data().keys()
if s.startswith(path)]
def isdir(self, path):
return any(self.listdir(path))
def mkdir(self, path, parents=True, raise_if_exists=False):
"""
mkdir is a noop.
"""
pass
def clear(self):
self.get_all_data().clear()
class MockTarget(target.FileSystemTarget):
fs = MockFileSystem()
def __init__(self, fn, is_tmp=None, mirror_on_stderr=False, format=None):
self._mirror_on_stderr = mirror_on_stderr
self.path = fn
if format is None:
format = get_default_format()
# Allow to write unicode in file for retrocompatibility
if six.PY2:
format = format >> MixedUnicodeBytes
self.format = format
def exists(self,):
return self.path in self.fs.get_all_data()
def move(self, path, raise_if_exists=False):
"""
Call MockFileSystem's move command
"""
self.fs.move(self.path, path, raise_if_exists)
def rename(self, *args, **kwargs):
"""
Call move to rename self
"""
self.move(*args, **kwargs)
def open(self, mode='r'):
fn = self.path
mock_target = self
class Buffer(BytesIO):
# Just to be able to do writing + reading from the same buffer
_write_line = True
def set_wrapper(self, wrapper):
self.wrapper = wrapper
def write(self, data):
if mock_target._mirror_on_stderr:
if self._write_line:
sys.stderr.write(fn + ": ")
if six.binary_type:
sys.stderr.write(data.decode('utf8'))
else:
sys.stderr.write(data)
if (data[-1]) == '\n':
self._write_line = True
else:
self._write_line = False
super(Buffer, self).write(data)
def close(self):
if mode[0] == 'w':
try:
mock_target.wrapper.flush()
except AttributeError:
pass
mock_target.fs.get_all_data()[fn] = self.getvalue()
super(Buffer, self).close()
def __exit__(self, exc_type, exc_val, exc_tb):
if not exc_type:
self.close()
def __enter__(self):
return self
def readable(self):
return mode[0] == 'r'
def writeable(self):
return mode[0] == 'w'
def seekable(self):
return False
if mode[0] == 'w':
wrapper = self.format.pipe_writer(Buffer())
wrapper.set_wrapper(wrapper)
return wrapper
else:
return self.format.pipe_reader(Buffer(self.fs.get_all_data()[fn]))
| {
"content_hash": "381317fe9f7ae37becd747ab5c243521",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 104,
"avg_line_length": 30.583815028901736,
"alnum_prop": 0.5365715365715366,
"repo_name": "PeteW/luigi",
"id": "e0aa364d9525f8d02777bdb8398f9f856e5e3015",
"size": "5894",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "luigi/mock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5051"
},
{
"name": "HTML",
"bytes": "41976"
},
{
"name": "JavaScript",
"bytes": "170555"
},
{
"name": "Python",
"bytes": "2067206"
},
{
"name": "Shell",
"bytes": "2901"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import logging
from django.conf import settings
from django.utils.functional import cached_property # noqa
from django.utils.translation import ugettext_lazy as _
from novaclient import exceptions as nova_exceptions
from novaclient.v1_1 import client as nova_client
from novaclient.v1_1.contrib import instance_action as nova_instance_action
from novaclient.v1_1.contrib import list_extensions as nova_list_extensions
from novaclient.v1_1 import security_group_rules as nova_rules
from novaclient.v1_1 import security_groups as nova_security_groups
from novaclient.v1_1 import servers as nova_servers
from horizon import conf
from horizon.utils import functions as utils
from horizon.utils.memoized import memoized # noqa
from openstack_dashboard.api import base
from openstack_dashboard.api import network_base
LOG = logging.getLogger(__name__)
# API static values
INSTANCE_ACTIVE_STATE = 'ACTIVE'
VOLUME_STATE_AVAILABLE = "available"
DEFAULT_QUOTA_NAME = 'default'
class VNCConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_vnc_console method.
"""
_attrs = ['url', 'type']
class SPICEConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_spice_console method.
"""
_attrs = ['url', 'type']
class RDPConsole(base.APIDictWrapper):
"""Wrapper for the "console" dictionary.
Returned by the novaclient.servers.get_rdp_console method.
"""
_attrs = ['url', 'type']
class Server(base.APIResourceWrapper):
"""Simple wrapper around novaclient.server.Server.
Preserves the request info so image name can later be retrieved.
"""
_attrs = ['addresses', 'attrs', 'id', 'image', 'links',
'metadata', 'name', 'private_ip', 'public_ip', 'status', 'uuid',
'image_name', 'VirtualInterfaces', 'flavor', 'key_name', 'fault',
'tenant_id', 'user_id', 'created', 'OS-EXT-STS:power_state',
'OS-EXT-STS:task_state', 'OS-EXT-SRV-ATTR:instance_name',
'OS-EXT-SRV-ATTR:host', 'OS-EXT-AZ:availability_zone',
'OS-DCF:diskConfig']
def __init__(self, apiresource, request):
super(Server, self).__init__(apiresource)
self.request = request
# TODO(gabriel): deprecate making a call to Glance as a fallback.
@property
def image_name(self):
import glanceclient.exc as glance_exceptions # noqa
from openstack_dashboard.api import glance # noqa
if not self.image:
return _("-")
if hasattr(self.image, 'name'):
return self.image.name
if 'name' in self.image:
return self.image['name']
else:
try:
image = glance.image_get(self.request, self.image['id'])
return image.name
except glance_exceptions.ClientException:
return _("-")
@property
def internal_name(self):
return getattr(self, 'OS-EXT-SRV-ATTR:instance_name', "")
@property
def availability_zone(self):
return getattr(self, 'OS-EXT-AZ:availability_zone', "")
@property
def host_server(self):
return getattr(self, 'OS-EXT-SRV-ATTR:host', '')
class Hypervisor(base.APIDictWrapper):
"""Simple wrapper around novaclient.hypervisors.Hypervisor."""
_attrs = ['manager', '_loaded', '_info', 'hypervisor_hostname', 'id',
'servers']
@property
def servers(self):
# if hypervisor doesn't have servers, the attribute is not present
servers = []
try:
servers = self._apidict.servers
except Exception:
pass
return servers
class NovaUsage(base.APIResourceWrapper):
"""Simple wrapper around contrib/simple_usage.py."""
_attrs = ['start', 'server_usages', 'stop', 'tenant_id',
'total_local_gb_usage', 'total_memory_mb_usage',
'total_vcpus_usage', 'total_hours']
def get_summary(self):
return {'instances': self.total_active_instances,
'memory_mb': self.memory_mb,
'vcpus': getattr(self, "total_vcpus_usage", 0),
'vcpu_hours': self.vcpu_hours,
'local_gb': self.local_gb,
'disk_gb_hours': self.disk_gb_hours,
'memory_mb_hours': self.memory_mb_hours}
@property
def total_active_instances(self):
return sum(1 for s in self.server_usages if s['ended_at'] is None)
@property
def vcpus(self):
return sum(s['vcpus'] for s in self.server_usages
if s['ended_at'] is None)
@property
def vcpu_hours(self):
return getattr(self, "total_hours", 0)
@property
def local_gb(self):
return sum(s['local_gb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def memory_mb(self):
return sum(s['memory_mb'] for s in self.server_usages
if s['ended_at'] is None)
@property
def disk_gb_hours(self):
return getattr(self, "total_local_gb_usage", 0)
@property
def memory_mb_hours(self):
return getattr(self, "total_memory_mb_usage", 0)
class SecurityGroup(base.APIResourceWrapper):
"""Wrapper around novaclient.security_groups.SecurityGroup.
Wraps its rules in SecurityGroupRule objects and allows access to them.
"""
_attrs = ['id', 'name', 'description', 'tenant_id']
@cached_property
def rules(self):
"""Wraps transmitted rule info in the novaclient rule class."""
manager = nova_rules.SecurityGroupRuleManager(None)
rule_objs = [nova_rules.SecurityGroupRule(manager, rule)
for rule in self._apiresource.rules]
return [SecurityGroupRule(rule) for rule in rule_objs]
class SecurityGroupRule(base.APIResourceWrapper):
"""Wrapper for individual rules in a SecurityGroup."""
_attrs = ['id', 'ip_protocol', 'from_port', 'to_port', 'ip_range', 'group']
def __unicode__(self):
if 'name' in self.group:
vals = {'from': self.from_port,
'to': self.to_port,
'group': self.group['name']}
return _('ALLOW %(from)s:%(to)s from %(group)s') % vals
else:
vals = {'from': self.from_port,
'to': self.to_port,
'cidr': self.ip_range['cidr']}
return _('ALLOW %(from)s:%(to)s from %(cidr)s') % vals
# The following attributes are defined to keep compatibility with Neutron
@property
def ethertype(self):
return None
@property
def direction(self):
return 'ingress'
class SecurityGroupManager(network_base.SecurityGroupManager):
backend = 'nova'
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list(self):
return [SecurityGroup(g) for g
in self.client.security_groups.list()]
def get(self, sg_id):
return SecurityGroup(self.client.security_groups.get(sg_id))
def create(self, name, desc):
return SecurityGroup(self.client.security_groups.create(name, desc))
def update(self, sg_id, name, desc):
return SecurityGroup(self.client.security_groups.update(sg_id,
name, desc))
def delete(self, security_group_id):
self.client.security_groups.delete(security_group_id)
def rule_create(self, parent_group_id,
direction=None, ethertype=None,
ip_protocol=None, from_port=None, to_port=None,
cidr=None, group_id=None):
# Nova Security Group API does not use direction and ethertype fields.
sg = self.client.security_group_rules.create(parent_group_id,
ip_protocol,
from_port,
to_port,
cidr,
group_id)
return SecurityGroupRule(sg)
def rule_delete(self, security_group_rule_id):
self.client.security_group_rules.delete(security_group_rule_id)
def list_by_instance(self, instance_id):
"""Gets security groups of an instance."""
# TODO(gabriel): This needs to be moved up to novaclient, and should
# be removed once novaclient supports this call.
security_groups = []
nclient = self.client
resp, body = nclient.client.get('/servers/%s/os-security-groups'
% instance_id)
if body:
# Wrap data in SG objects as novaclient would.
sg_objs = [
nova_security_groups.SecurityGroup(
nclient.security_groups, sg, loaded=True)
for sg in body.get('security_groups', [])]
# Then wrap novaclient's object with our own. Yes, sadly wrapping
# with two layers of objects is necessary.
security_groups = [SecurityGroup(sg) for sg in sg_objs]
return security_groups
def update_instance_security_group(self, instance_id,
new_security_group_ids):
try:
all_groups = self.list()
except Exception:
raise Exception(_("Couldn't get security group list."))
wanted_groups = set([sg.name for sg in all_groups
if sg.id in new_security_group_ids])
try:
current_groups = self.list_by_instance(instance_id)
except Exception:
raise Exception(_("Couldn't get current security group "
"list for instance %s.")
% instance_id)
current_group_names = set([sg.name for sg in current_groups])
groups_to_add = wanted_groups - current_group_names
groups_to_remove = current_group_names - wanted_groups
num_groups_to_modify = len(groups_to_add | groups_to_remove)
try:
for group in groups_to_add:
self.client.servers.add_security_group(instance_id, group)
num_groups_to_modify -= 1
for group in groups_to_remove:
self.client.servers.remove_security_group(instance_id, group)
num_groups_to_modify -= 1
except nova_exceptions.ClientException as err:
LOG.error(_("Failed to modify %(num_groups_to_modify)d instance "
"security groups: %(err)s") %
dict(num_groups_to_modify=num_groups_to_modify,
err=err))
# reraise novaclient.exceptions.ClientException, but with
# a sanitized error message so we don't risk exposing
# sensitive information to the end user. This has to be
# novaclient.exceptions.ClientException, not just
# Exception, since the former is recognized as a
# "recoverable" exception by horizon, and therefore the
# error message is passed along to the end user, while
# Exception is swallowed alive by horizon and a gneric
# error message is given to the end user
raise nova_exceptions.ClientException(
err.code,
_("Failed to modify %d instance security groups") %
num_groups_to_modify)
return True
class FlavorExtraSpec(object):
def __init__(self, flavor_id, key, val):
self.flavor_id = flavor_id
self.id = key
self.key = key
self.value = val
class FloatingIp(base.APIResourceWrapper):
_attrs = ['id', 'ip', 'fixed_ip', 'port_id', 'instance_id',
'instance_type', 'pool']
def __init__(self, fip):
fip.__setattr__('port_id', fip.instance_id)
fip.__setattr__('instance_type',
'compute' if fip.instance_id else None)
super(FloatingIp, self).__init__(fip)
class FloatingIpPool(base.APIDictWrapper):
def __init__(self, pool):
pool_dict = {'id': pool.name,
'name': pool.name}
super(FloatingIpPool, self).__init__(pool_dict)
class FloatingIpTarget(base.APIDictWrapper):
def __init__(self, server):
server_dict = {'name': '%s (%s)' % (server.name, server.id),
'id': server.id}
super(FloatingIpTarget, self).__init__(server_dict)
class FloatingIpManager(network_base.FloatingIpManager):
def __init__(self, request):
self.request = request
self.client = novaclient(request)
def list_pools(self):
return [FloatingIpPool(pool)
for pool in self.client.floating_ip_pools.list()]
def list(self):
return [FloatingIp(fip)
for fip in self.client.floating_ips.list()]
def get(self, floating_ip_id):
return FloatingIp(self.client.floating_ips.get(floating_ip_id))
def allocate(self, pool):
return FloatingIp(self.client.floating_ips.create(pool=pool))
def release(self, floating_ip_id):
self.client.floating_ips.delete(floating_ip_id)
def associate(self, floating_ip_id, port_id):
# In Nova implied port_id is instance_id
server = self.client.servers.get(port_id)
fip = self.client.floating_ips.get(floating_ip_id)
self.client.servers.add_floating_ip(server.id, fip.ip)
def disassociate(self, floating_ip_id):
fip = self.client.floating_ips.get(floating_ip_id)
server = self.client.servers.get(fip.instance_id)
self.client.servers.remove_floating_ip(server.id, fip.ip)
def list_targets(self):
return [FloatingIpTarget(s) for s in self.client.servers.list()]
def get_target_id_by_instance(self, instance_id, target_list=None):
return instance_id
def list_target_id_by_instance(self, instance_id, target_list=None):
return [instance_id, ]
def is_simple_associate_supported(self):
return conf.HORIZON_CONFIG["simple_ip_management"]
def is_supported(self):
return True
@memoized
def novaclient(request):
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
c = nova_client.Client(request.user.username,
request.user.token.id,
project_id=request.user.tenant_id,
auth_url=base.url_for(request, 'compute'),
insecure=insecure,
cacert=cacert,
http_log_debug=settings.DEBUG)
c.client.auth_token = request.user.token.id
c.client.management_url = base.url_for(request, 'compute')
return c
def server_vnc_console(request, instance_id, console_type='novnc'):
return VNCConsole(novaclient(request).servers.get_vnc_console(
instance_id, console_type)['console'])
def server_spice_console(request, instance_id, console_type='spice-html5'):
return SPICEConsole(novaclient(request).servers.get_spice_console(
instance_id, console_type)['console'])
def server_rdp_console(request, instance_id, console_type='rdp-html5'):
return RDPConsole(novaclient(request).servers.get_rdp_console(
instance_id, console_type)['console'])
def flavor_create(request, name, memory, vcpu, disk, flavorid='auto',
ephemeral=0, swap=0, metadata=None, is_public=True):
flavor = novaclient(request).flavors.create(name, memory, vcpu, disk,
flavorid=flavorid,
ephemeral=ephemeral,
swap=swap, is_public=is_public)
if (metadata):
flavor_extra_set(request, flavor.id, metadata)
return flavor
def flavor_delete(request, flavor_id):
novaclient(request).flavors.delete(flavor_id)
def flavor_get(request, flavor_id):
return novaclient(request).flavors.get(flavor_id)
@memoized
def flavor_list(request, is_public=True):
"""Get the list of available instance sizes (flavors)."""
return novaclient(request).flavors.list(is_public=is_public)
@memoized
def flavor_access_list(request, flavor=None):
"""Get the list of access instance sizes (flavors)."""
return novaclient(request).flavor_access.list(flavor=flavor)
def add_tenant_to_flavor(request, flavor, tenant):
"""Add a tenant to the given flavor access list."""
return novaclient(request).flavor_access.add_tenant_access(
flavor=flavor, tenant=tenant)
def remove_tenant_from_flavor(request, flavor, tenant):
"""Remove a tenant from the given flavor access list."""
return novaclient(request).flavor_access.remove_tenant_access(
flavor=flavor, tenant=tenant)
def flavor_get_extras(request, flavor_id, raw=False):
"""Get flavor extra specs."""
flavor = novaclient(request).flavors.get(flavor_id)
extras = flavor.get_keys()
if raw:
return extras
return [FlavorExtraSpec(flavor_id, key, value) for
key, value in extras.items()]
def flavor_extra_delete(request, flavor_id, keys):
"""Unset the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
return flavor.unset_keys(keys)
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
def snapshot_create(request, instance_id, name):
return novaclient(request).servers.create_image(instance_id, name)
def keypair_create(request, name):
return novaclient(request).keypairs.create(name)
def keypair_import(request, name, public_key):
return novaclient(request).keypairs.create(name, public_key)
def keypair_delete(request, keypair_id):
novaclient(request).keypairs.delete(keypair_id)
def keypair_list(request):
return novaclient(request).keypairs.list()
def server_create(request, name, image, flavor, key_name, user_data,
security_groups, block_device_mapping=None,
block_device_mapping_v2=None, nics=None,
availability_zone=None, instance_count=1, admin_pass=None,
disk_config=None, config_drive=None, meta=None):
return Server(novaclient(request).servers.create(
name, image, flavor, userdata=user_data,
security_groups=security_groups,
key_name=key_name, block_device_mapping=block_device_mapping,
block_device_mapping_v2=block_device_mapping_v2,
nics=nics, availability_zone=availability_zone,
min_count=instance_count, admin_pass=admin_pass,
disk_config=disk_config, config_drive=config_drive,
meta=meta), request)
def server_delete(request, instance):
novaclient(request).servers.delete(instance)
def server_get(request, instance_id):
return Server(novaclient(request).servers.get(instance_id), request)
def server_list(request, search_opts=None, all_tenants=False):
page_size = utils.get_page_size(request)
c = novaclient(request)
paginate = False
if search_opts is None:
search_opts = {}
elif 'paginate' in search_opts:
paginate = search_opts.pop('paginate')
if paginate:
search_opts['limit'] = page_size + 1
if all_tenants:
search_opts['all_tenants'] = True
else:
search_opts['project_id'] = request.user.tenant_id
servers = [Server(s, request)
for s in c.servers.list(True, search_opts)]
has_more_data = False
if paginate and len(servers) > page_size:
servers.pop(-1)
has_more_data = True
elif paginate and len(servers) == getattr(settings, 'API_RESULT_LIMIT',
1000):
has_more_data = True
return (servers, has_more_data)
def server_console_output(request, instance_id, tail_length=None):
"""Gets console output of an instance."""
return novaclient(request).servers.get_console_output(instance_id,
length=tail_length)
def server_pause(request, instance_id):
novaclient(request).servers.pause(instance_id)
def server_unpause(request, instance_id):
novaclient(request).servers.unpause(instance_id)
def server_suspend(request, instance_id):
novaclient(request).servers.suspend(instance_id)
def server_resume(request, instance_id):
novaclient(request).servers.resume(instance_id)
def server_reboot(request, instance_id, soft_reboot=False):
hardness = nova_servers.REBOOT_HARD
if soft_reboot:
hardness = nova_servers.REBOOT_SOFT
novaclient(request).servers.reboot(instance_id, hardness)
def server_rebuild(request, instance_id, image_id, password=None,
disk_config=None):
return novaclient(request).servers.rebuild(instance_id, image_id,
password, disk_config)
def server_update(request, instance_id, name):
return novaclient(request).servers.update(instance_id, name=name)
def server_migrate(request, instance_id):
novaclient(request).servers.migrate(instance_id)
def server_live_migrate(request, instance_id, host, block_migration=False,
disk_over_commit=False):
novaclient(request).servers.live_migrate(instance_id, host,
block_migration,
disk_over_commit)
def server_resize(request, instance_id, flavor, disk_config=None, **kwargs):
novaclient(request).servers.resize(instance_id, flavor,
disk_config, **kwargs)
def server_confirm_resize(request, instance_id):
novaclient(request).servers.confirm_resize(instance_id)
def server_revert_resize(request, instance_id):
novaclient(request).servers.revert_resize(instance_id)
def server_start(request, instance_id):
novaclient(request).servers.start(instance_id)
def server_stop(request, instance_id):
novaclient(request).servers.stop(instance_id)
def server_lock(request, instance_id):
novaclient(request).servers.lock(instance_id)
def server_unlock(request, instance_id):
novaclient(request).servers.unlock(instance_id)
def tenant_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.get(tenant_id))
def tenant_quota_update(request, tenant_id, **kwargs):
novaclient(request).quotas.update(tenant_id, **kwargs)
def default_quota_get(request, tenant_id):
return base.QuotaSet(novaclient(request).quotas.defaults(tenant_id))
def default_quota_update(request, **kwargs):
novaclient(request).quota_classes.update(DEFAULT_QUOTA_NAME, **kwargs)
def usage_get(request, tenant_id, start, end):
return NovaUsage(novaclient(request).usage.get(tenant_id, start, end))
def usage_list(request, start, end):
return [NovaUsage(u) for u in
novaclient(request).usage.list(start, end, True)]
def virtual_interfaces_list(request, instance_id):
return novaclient(request).virtual_interfaces.list(instance_id)
def get_x509_credentials(request):
return novaclient(request).certs.create()
def get_x509_root_certificate(request):
return novaclient(request).certs.get()
def get_password(request, instance_id, private_key=None):
return novaclient(request).servers.get_password(instance_id, private_key)
def instance_volume_attach(request, volume_id, instance_id, device):
return novaclient(request).volumes.create_server_volume(instance_id,
volume_id,
device)
def instance_volume_detach(request, instance_id, att_id):
return novaclient(request).volumes.delete_server_volume(instance_id,
att_id)
def instance_volumes_list(request, instance_id):
from openstack_dashboard.api import cinder
volumes = novaclient(request).volumes.get_server_volumes(instance_id)
for volume in volumes:
volume_data = cinder.cinderclient(request).volumes.get(volume.id)
volume.name = cinder.Volume(volume_data).name
return volumes
def hypervisor_list(request):
return novaclient(request).hypervisors.list()
def hypervisor_stats(request):
return novaclient(request).hypervisors.statistics()
def hypervisor_search(request, query, servers=True):
return novaclient(request).hypervisors.search(query, servers)
def evacuate_host(request, host, target=None, on_shared_storage=False):
# TODO(jmolle) This should be change for nova atomic api host_evacuate
hypervisors = novaclient(request).hypervisors.search(host, True)
response = []
err_code = None
for hypervisor in hypervisors:
hyper = Hypervisor(hypervisor)
# if hypervisor doesn't have servers, the attribute is not present
for server in hyper.servers:
try:
novaclient(request).servers.evacuate(server['uuid'],
target,
on_shared_storage)
except nova_exceptions.ClientException as err:
err_code = err.code
msg = _("Name: %(name)s ID: %(uuid)s")
msg = msg % {'name': server['name'], 'uuid': server['uuid']}
response.append(msg)
if err_code:
msg = _('Failed to evacuate instances: %s') % ', '.join(response)
raise nova_exceptions.ClientException(err_code, msg)
return True
def tenant_absolute_limits(request, reserved=False):
limits = novaclient(request).limits.get(reserved=reserved).absolute
limits_dict = {}
for limit in limits:
if limit.value < 0:
# Workaround for nova bug 1370867 that absolute_limits
# returns negative value for total.*Used instead of 0.
# For such case, replace negative values with 0.
if limit.name.startswith('total') and limit.name.endswith('Used'):
limits_dict[limit.name] = 0
else:
# -1 is used to represent unlimited quotas
limits_dict[limit.name] = float("inf")
else:
limits_dict[limit.name] = limit.value
return limits_dict
def availability_zone_list(request, detailed=False):
return novaclient(request).availability_zones.list(detailed=detailed)
def service_list(request, binary=None):
return novaclient(request).services.list(binary=binary)
def service_enable(request, host, binary):
return novaclient(request).services.enable(host, binary)
def service_disable(request, host, binary, reason=None):
if reason:
return novaclient(request).services.disable_log_reason(host,
binary, reason)
else:
return novaclient(request).services.disable(host, binary)
def aggregate_details_list(request):
result = []
c = novaclient(request)
for aggregate in c.aggregates.list():
result.append(c.aggregates.get_details(aggregate.id))
return result
def aggregate_create(request, name, availability_zone=None):
return novaclient(request).aggregates.create(name, availability_zone)
def aggregate_delete(request, aggregate_id):
return novaclient(request).aggregates.delete(aggregate_id)
def aggregate_get(request, aggregate_id):
return novaclient(request).aggregates.get(aggregate_id)
def aggregate_update(request, aggregate_id, values):
return novaclient(request).aggregates.update(aggregate_id, values)
def aggregate_set_metadata(request, aggregate_id, metadata):
return novaclient(request).aggregates.set_metadata(aggregate_id, metadata)
def host_list(request):
return novaclient(request).hosts.list()
def add_host_to_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.add_host(aggregate_id, host)
def remove_host_from_aggregate(request, aggregate_id, host):
return novaclient(request).aggregates.remove_host(aggregate_id, host)
@memoized
def list_extensions(request):
return nova_list_extensions.ListExtManager(novaclient(request)).show_all()
@memoized
def extension_supported(extension_name, request):
"""Determine if nova supports a given extension name.
Example values for the extension_name include AdminActions, ConsoleOutput,
etc.
"""
extensions = list_extensions(request)
for extension in extensions:
if extension.name == extension_name:
return True
return False
def can_set_server_password():
features = getattr(settings, 'OPENSTACK_HYPERVISOR_FEATURES', {})
return features.get('can_set_password', False)
def instance_action_list(request, instance_id):
return nova_instance_action.InstanceActionManager(
novaclient(request)).list(instance_id)
def can_set_mount_point():
"""Return the Hypervisor's capability of setting mount points."""
hypervisor_features = getattr(
settings, "OPENSTACK_HYPERVISOR_FEATURES", {})
return hypervisor_features.get("can_set_mount_point", False)
| {
"content_hash": "ea7515d83b8bb8f0c1f255c8c4bd66d4",
"timestamp": "",
"source": "github",
"line_count": 869,
"max_line_length": 79,
"avg_line_length": 34.208285385500574,
"alnum_prop": 0.6310424866283177,
"repo_name": "AlexOugh/horizon",
"id": "45c8d4068a5584ce897a0e1aaf8ca8c5f6051e54",
"size": "30591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/api/nova.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1000458"
},
{
"name": "JavaScript",
"bytes": "244031"
},
{
"name": "Makefile",
"bytes": "6165"
},
{
"name": "Python",
"bytes": "4545176"
},
{
"name": "Shell",
"bytes": "18285"
}
],
"symlink_target": ""
} |
import ctypes
import ctypes.util
import os
try:
# If possible, get nicer tracebacks from low-level signals.
import faulthandler
faulthandler.enable()
except:
pass
from .v31 import *
class _Mock(object):
def __init__(self, *args, **kwargs):
pass
try:
_llvm = ctypes.cdll.LoadLibrary(os.path.join(os.path.dirname(__file__), "_llvm.so"))
_libc = ctypes.cdll.LoadLibrary(ctypes.util.find_library("c"))
def _func(func_name, restype, argtypes=[]):
"""Creates ctypes wrapper for an LLVM API function.
LLVM{name} -> llvm.{name}
"""
g = globals()
g[func_name] = getattr(_llvm, "LLVM" + func_name)
g[func_name].restype = restype
g[func_name].argtypes = argtypes
except OSError:
# Allows us to complete the module import when LLMV library
# is not available; currently used for documentation building.
def _func(func_name, restype, argtypes=[]):
globals()[func_name] = _Mock
class owned_c_char_p(ctypes.c_char_p):
"""Char pointer which collects the memory of the return value."""
def __del__(self):
_libc.free(self)
Bool = ctypes.c_int
FALSE = 0
TRUE = 1
# Context
class OpaqueContext(ctypes.Structure):
pass
ContextRef = ctypes.POINTER(OpaqueContext)
_func("GetGlobalContext", ContextRef)
# Module
class OpaqueModule(ctypes.Structure):
pass
ModuleRef = ctypes.POINTER(OpaqueModule)
_func("ModuleCreateWithName", ModuleRef)
_func("GetModuleName", ctypes.c_char_p, [ModuleRef])
_func("DumpModule", None, [ModuleRef])
_func("DumpModuleToString", owned_c_char_p, [ModuleRef])
_func("DisposeModule", None, [ModuleRef])
_func("DisposeMessage", None, [ctypes.c_char_p])
# Linker
(LinkerDestroySource, LinkerPreserveSource) = range(2)
_func("LinkModules__", Bool,
[ModuleRef, ModuleRef, ctypes.c_int, ctypes.POINTER(ctypes.c_char_p)])
# Type
class OpaqueType(ctypes.Structure):
pass
TypeRef = ctypes.POINTER(OpaqueType)
TypeKind = ctypes.c_int
_func("VoidType", TypeRef)
_func("FloatType", TypeRef)
_func("DoubleType", TypeRef)
_func("IntType", TypeRef, [ctypes.c_uint])
_func("GetTypeKind", TypeKind, [TypeRef])
_func("GetIntTypeWidth", ctypes.c_uint, [TypeRef])
# Sequential types
_func("PointerType", TypeRef, [TypeRef, ctypes.c_uint])
_func("VectorType", TypeRef, [TypeRef, ctypes.c_uint])
_func("ArrayType", TypeRef, [TypeRef, ctypes.c_uint])
_func("GetElementType", TypeRef, [TypeRef])
# Value
class OpaqueValue(ctypes.Structure):
pass
ValueRef = ctypes.POINTER(OpaqueValue)
_func("TypeOf", TypeRef, [ValueRef])
_func("SetValueName", None, [ValueRef, ctypes.c_char_p])
_func("GetValueName", ctypes.c_char_p, [ValueRef])
_func("DumpValue", None, [ValueRef])
# Operations on scalar constants
_func("ConstNull", ValueRef, [TypeRef])
_func("ConstInt", ValueRef, [TypeRef, ctypes.c_ulonglong, Bool])
_func("ConstReal", ValueRef, [TypeRef, ctypes.c_double])
_func("ConstString", ValueRef, [ctypes.c_char_p, ctypes.c_uint, Bool])
_func("ConstArray", ValueRef, [TypeRef, ctypes.POINTER(ValueRef), ctypes.c_uint])
_func("IsATerminatorInst", ValueRef, [ValueRef])
Opcode = ctypes.c_int
# Globals
_func("AddGlobal", ValueRef, [ModuleRef, TypeRef, ctypes.c_char_p])
_func("GetNamedGlobal", ValueRef, [ModuleRef, ctypes.c_char_p])
_func("SetInitializer", None, [ValueRef, ValueRef])
_func("SetGlobalConstant", None, [ValueRef, Bool])
# Functions
_func("FunctionType", TypeRef, [TypeRef, ctypes.POINTER(TypeRef), ctypes.c_uint, ctypes.c_int])
_func("AddFunction", ValueRef, [ModuleRef, ctypes.c_char_p, TypeRef])
_func("GetNamedFunction", ValueRef, [ModuleRef, ctypes.c_char_p])
_func("SetLinkage", None, [ValueRef, ctypes.c_int])
_func("GetParam", ValueRef, [ValueRef, ctypes.c_uint])
_func("GetReturnType", TypeRef, [TypeRef])
_func("AddAttribute", None, [ValueRef, ctypes.c_int]);
_func("AddFunctionAttr", None, [ValueRef, ctypes.c_int])
NoAliasAttribute = 1 << 6
AlwaysInlineAttribute = 1 << 12
def function_return_type(func):
"""Gets the return type directly from a function object."""
return GetReturnType(GetElementType(TypeOf(func)))
_func("GetIntrinsicDeclaration", ValueRef,
[ModuleRef, ctypes.c_uint, ctypes.POINTER(TypeRef), ctypes.c_uint])
_func("GetIntrinsicCount__", ctypes.c_uint, [])
_func("GetIntrinsicName__", owned_c_char_p, [ctypes.c_uint])
# This is False if we're generating docs didn't build the LLVM interface
if GetIntrinsicCount__ is not _Mock:
INTRINSICS = dict((GetIntrinsicName__(i).value, i) for i in range(GetIntrinsicCount__()))
"""Intrinsic map; from name to intrinsic ID to use with GetIntrinsicDeclaration."""
ExternalLinkage = 0
PrivateLinkage = 8
# Structure Types
_func("StructType", TypeRef, [ctypes.POINTER(TypeRef), ctypes.c_uint, ctypes.c_bool])
_func("StructCreateNamed", TypeRef, [ContextRef, ctypes.c_char_p])
_func("StructSetBody", None, [TypeRef,
ctypes.POINTER(TypeRef),
ctypes.c_uint, ctypes.c_bool])
_func("GetStructName", ctypes.c_char_p, [TypeRef])
# Blocks
class OpaqueBasicBlock(ctypes.Structure):
pass
BasicBlockRef = ctypes.POINTER(OpaqueBasicBlock)
_func("AppendBasicBlock", BasicBlockRef, [ValueRef, ctypes.c_char_p])
_func("MoveBasicBlockBefore", None, [BasicBlockRef, BasicBlockRef])
_func("MoveBasicBlockAfter", None, [BasicBlockRef, BasicBlockRef])
_func("GetBasicBlockParent", ValueRef, [BasicBlockRef])
_func("DeleteBasicBlock", None, [BasicBlockRef])
_func("GetEntryBasicBlock", BasicBlockRef, [ValueRef])
_func("GetFirstInstruction", ValueRef, [BasicBlockRef])
_func("GetLastInstruction", ValueRef, [BasicBlockRef])
# Phi expressions
_func("AddIncoming", None, [ValueRef, ctypes.POINTER(ValueRef),
ctypes.POINTER(BasicBlockRef),
ctypes.c_uint])
# Builder
class OpaqueBuilder(ctypes.Structure):
pass
BuilderRef = ctypes.POINTER(OpaqueBuilder)
_func("CreateBuilder", BuilderRef)
_func("DisposeBuilder", None, [BuilderRef])
_func("PositionBuilderAtEnd", None, [BuilderRef, BasicBlockRef])
_func("PositionBuilder", None, [BuilderRef, BasicBlockRef, ValueRef])
_func("GetInsertBlock", BasicBlockRef, [BuilderRef])
_func("GetParentModule__", ModuleRef, [BuilderRef])
# Terminators
_func("BuildRetVoid", ValueRef, [BuilderRef])
_func("BuildRet", ValueRef, [BuilderRef, ValueRef])
_func("BuildBr", ValueRef, [BuilderRef, BasicBlockRef])
_func("BuildCondBr", ValueRef, [BuilderRef, ValueRef,
BasicBlockRef, BasicBlockRef])
# Float Expressions
for name in ("BuildFAdd", "BuildFSub", "BuildFMul", "BuildFDiv", "BuildFRem"):
_func(name, ValueRef, [BuilderRef, ValueRef, ValueRef, ctypes.c_char_p])
_func("BuildFNeg", ValueRef, [BuilderRef, ValueRef, ctypes.c_char_p])
_func("BuildFCmp", ValueRef, [BuilderRef, ctypes.c_int, ValueRef, ValueRef, ctypes.c_char_p])
# Int expressions
for name in ("BuildAdd", "BuildSub", "BuildMul", "BuildUDiv", "BuildSDiv", "BuildSRem", "BuildLShr"):
_func(name, ValueRef, [BuilderRef, ValueRef, ValueRef, ctypes.c_char_p])
_func("BuildNeg", ValueRef, [BuilderRef, ValueRef, ctypes.c_char_p])
_func("BuildICmp", ValueRef, [BuilderRef, ctypes.c_int, ValueRef, ValueRef, ctypes.c_char_p])
_func("BuildNot", ValueRef, [BuilderRef, ValueRef, ctypes.c_char_p])
for name in ("BuildAnd", "BuildOr", "BuildXor"):
_func(name, ValueRef, [BuilderRef, ValueRef, ValueRef, ctypes.c_char_p])
# Memory
_func("BuildLoad", ValueRef, [BuilderRef, ValueRef, ctypes.c_char_p])
_func("BuildStore", ValueRef, [BuilderRef, ValueRef, ValueRef])
_func("BuildGEP", ValueRef, [BuilderRef, ValueRef,
ctypes.POINTER(ValueRef), ctypes.c_uint,
ctypes.c_char_p])
_func("BuildStructGEP", ValueRef, [BuilderRef, ValueRef, ctypes.c_uint, ctypes.c_char_p])
_func("BuildAlloca", ValueRef, [BuilderRef, TypeRef, ctypes.c_char_p])
_func("BuildArrayAlloca", ValueRef, [BuilderRef, TypeRef, ValueRef, ctypes.c_char_p])
_func("BuildMalloc", ValueRef, [BuilderRef, TypeRef, ctypes.c_char_p])
_func("BuildArrayMalloc", ValueRef, [BuilderRef, TypeRef, ValueRef, ctypes.c_char_p])
# Casting
Trunc = 30
ZExt = 31
FPToSI = 34
SIToFP = 36
FPTrunc = 37
FPExt = 38
BitCast = 41
_func("BuildCast", ValueRef, [BuilderRef, Opcode, ValueRef, TypeRef, ctypes.c_char_p])
_func("BuildPointerCast", ValueRef, [BuilderRef, ValueRef, TypeRef, ctypes.c_char_p])
# Misc
_func("BuildPhi", ValueRef, [BuilderRef, TypeRef, ctypes.c_char_p])
_func("BuildCall", ValueRef, [BuilderRef, ValueRef,
ctypes.POINTER(ValueRef), ctypes.c_uint,
ctypes.c_char_p])
_func("BuildSelect", ValueRef, [BuilderRef, ValueRef, ValueRef, ValueRef, ctypes.c_char_p])
_func("BuildExtractElement", ValueRef, [BuilderRef, ValueRef, ValueRef, ctypes.c_char_p])
_func("BuildInsertElement", ValueRef, [BuilderRef, ValueRef, ValueRef, ValueRef, ctypes.c_char_p])
# Analysis
VerifierFailureAction = ctypes.c_int
(AbortProcessAction, PrintMessageAction, ReturnStatusAction) = range(3)
_func("VerifyFunction", Bool, [ValueRef, VerifierFailureAction])
# Metadata
_func("MDString", ValueRef, [ctypes.c_char_p, ctypes.c_uint])
_func("MDNode__", ValueRef, [ctypes.POINTER(ValueRef), ctypes.c_uint])
_func("SetNamedMetadata__", None, [ValueRef, ctypes.c_char_p, ValueRef])
# Target
class OpaqueTarget(ctypes.Structure):
pass
class OpaqueTargetData(ctypes.Structure):
pass
class OpaqueTargetMachine(ctypes.Structure):
pass
TargetRef = ctypes.POINTER(OpaqueTarget)
TargetDataRef = ctypes.POINTER(OpaqueTargetData)
TargetMachineRef = ctypes.POINTER(OpaqueTargetMachine)
_func("InitializeNativeTarget__", Bool, [])
_func("GetDefaultTargetTriple__", owned_c_char_p, [])
_func("LookupTarget__", TargetRef, [ctypes.c_char_p, ctypes.POINTER(ctypes.c_char_p)])
_func("GetFirstTarget", TargetRef, [])
_func("GetNextTarget", TargetRef, [TargetRef])
_func("GetTargetDescription", ctypes.c_char_p, [TargetRef])
_func("CreateTargetMachine", TargetMachineRef,
[TargetRef, ctypes.c_char_p, ctypes.c_char_p,
ctypes.c_char_p, ctypes.c_int,
ctypes.c_int, ctypes.c_int])
_func("DisposeTargetMachine", None, [TargetMachineRef])
_func("GetTargetMachineData", TargetDataRef, [TargetMachineRef])
_func("TargetMachineEmitToFile", Bool, [TargetMachineRef, ModuleRef,
ctypes.c_char_p, ctypes.c_int,
ctypes.POINTER(ctypes.c_char_p)])
# Pass Managers
class OpaquePassManager(ctypes.Structure):
pass
class OpaquePassManagerBuilder(ctypes.Structure):
pass
PassManagerRef = ctypes.POINTER(OpaquePassManager)
PassManagerBuilderRef = ctypes.POINTER(OpaquePassManagerBuilder)
_func("CreatePassManager", PassManagerRef, [])
_func("DisposePassManager", None, [PassManagerRef])
_func("RunPassManager", Bool, [PassManagerRef, ModuleRef])
_func("PassManagerBuilderCreate", PassManagerBuilderRef, [])
_func("PassManagerBuilderDispose", None, [PassManagerBuilderRef])
_func("PassManagerBuilderSetOptLevel", None, [PassManagerBuilderRef, ctypes.c_uint])
_func("PassManagerBuilderUseInlinerWithThreshold", None, [PassManagerBuilderRef, ctypes.c_uint])
_func("PassManagerBuilderPopulateModulePassManager", None,
[PassManagerBuilderRef, PassManagerRef])
_func("AddTargetData", None, [TargetDataRef, PassManagerRef])
# Execution engine
class OpaqueExecutionEngine(ctypes.Structure):
pass
ExecutionEngineRef = ctypes.POINTER(OpaqueExecutionEngine)
_func("CreateJITCompilerForModule", Bool,
[ctypes.POINTER(ExecutionEngineRef), ModuleRef,
ctypes.c_uint, ctypes.POINTER(ctypes.c_char_p)])
_func("GetExecutionEngineTargetData", TargetDataRef, [ExecutionEngineRef])
_func("GetPointerToGlobal", ctypes.c_void_p, [ExecutionEngineRef, ValueRef])
_func("DisposeExecutionEngine", None, [ExecutionEngineRef])
# Memory Buffers
class OpaqueMemoryBuffer(ctypes.Structure):
pass
MemoryBufferRef = ctypes.POINTER(OpaqueMemoryBuffer)
_func("CreateMemoryBufferWithContentsOfFile", Bool,
[ctypes.c_char_p, ctypes.POINTER(MemoryBufferRef), ctypes.POINTER(ctypes.c_char_p)])
_func("DisposeMemoryBuffer", None, [MemoryBufferRef])
# Bitcode Readers
_func("ParseBitcode", Bool,
[MemoryBufferRef, ctypes.POINTER(ModuleRef), ctypes.POINTER(ctypes.c_char_p)])
# Command line options
_func("ParseEnvironmentOptions", None, [ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p])
if os.environ.get("NITROUS_LLVM_OPTS"):
ParseEnvironmentOptions("nitrous", "NITROUS_LLVM_OPTS", None)
def address_of(r):
"""Returns LLVM reference (eg. ValueRef) pointer value."""
return ctypes.cast(r, ctypes.c_void_p).value
def types_equal(tx, ty):
"""Returns True if *tx* is the same LLVMTypeRef as *ty*.
To check equality, retrieve and compare raw pointer values.
"""
# In LLVM, getting the same type (eg. IntType(32)) yields
# same unique pointer value each time its invoked.
return address_of(tx) == address_of(ty)
def get_intrinsic(builder, name, spec):
"""Return intrinsic declaration for name and specialization types."""
module = GetParentModule__(builder)
i = INTRINSICS["llvm.{0}".format(name)]
return GetIntrinsicDeclaration(module, i, spec, len(spec))
def build_py_idiv(builder, a, b, name):
"""Build expression for floor integer division.
As seen in Cython:
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
"""
q = BuildSDiv(builder, a, b, name + "_q")
r = BuildSub(builder, a, BuildMul(builder, q, b, name + "_r"), name + "_sub")
# TODO Assumes signed integers
zero = ConstNull(TypeOf(r))
q_sub = BuildAnd(builder,
BuildICmp(builder, IntNE, r, zero, name + "_cmp_1"),
BuildICmp(builder, IntSLT,
BuildXor(builder, r, b, name + "_xor"),
zero, name + "_cmp_2"),
name + "_q_and")
return BuildSub(builder, q,
BuildCast(builder, ZExt, q_sub, TypeOf(a), name + "_cast"),
name)
def build_pow(builder, a, b, name):
"""Builds an expression for a ** b."""
pow = get_intrinsic(builder, "pow", (TypeRef * 1)(TypeOf(a)))
v = BuildCall(builder, pow, (ValueRef * 2)(a, b), 2, "call")
return v
# Modulo (%) implementation for integers and floats
#
# r = a (s|f)rem b
# if (a >> (sizeof(a) * 8)) ^ (b >> (sizeof(b) * 8)) == 1:
# return -r
# else
# return r
def _mod_scale(builder, int_a, int_b):
"""When building `mod`, returns True if the result of `rem` should
be scaled by -1, False otherwise.
Assume *int_a* and *int_b* are integers of equal size.
"""
ty = TypeOf(int_a)
size = GetIntTypeWidth(ty)
one = ConstInt(ty, 1, True)
sign_shift = ConstInt(ty, size - 1, True)
sign_a = BuildLShr(builder, int_a, sign_shift, "")
cond_a = BuildICmp(builder, IntEQ, sign_a, one, "")
sign_b = BuildLShr(builder, int_b, sign_shift, "")
cond_b = BuildICmp(builder, IntEQ, sign_b, one, "")
return BuildXor(builder, cond_a, cond_b, "")
def build_smod(builder, a, b, name):
"""Builds expression for signed int modulo."""
rem = BuildSRem(builder, a, b, "")
neg_rem = BuildMul(builder, rem, ConstInt(TypeOf(rem), -1, True), "")
return BuildSelect(builder, _mod_scale(builder, a, b), neg_rem, rem, name)
def build_fmod(builder, a, b, name):
"""Builds expression for floating point modulo."""
kind = GetTypeKind(TypeOf(a))
if kind == FloatTypeKind:
size = 32
elif kind == DoubleTypeKind:
size = 64
else:
raise TypeError("Cannot build %: unknown float type kind")
int_ty = IntType(size)
int_a = BuildCast(builder, BitCast, a, int_ty, "")
int_b = BuildCast(builder, BitCast, b, int_ty, "")
rem = BuildFRem(builder, a, b, "")
neg_rem = BuildFMul(builder, rem, ConstReal(TypeOf(rem), -1), "")
return BuildSelect(builder, _mod_scale(builder, int_a, int_b), neg_rem, rem, name)
def link_modules(dst, src):
"""Link source module into destination one.
Source module is destroyed.
"""
message = ctypes.c_char_p()
status = LinkModules__(dst, src, LinkerDestroySource, ctypes.byref(message))
if status != 0:
error = RuntimeError("Could not link modules: {0}".format(message.value))
llvm.DisposeMessage(message)
raise error
| {
"content_hash": "9ae1e87e840c591ef47620fe3e7d9c58",
"timestamp": "",
"source": "github",
"line_count": 532,
"max_line_length": 101,
"avg_line_length": 31.144736842105264,
"alnum_prop": 0.6813929627617841,
"repo_name": "dtcaciuc/nitrous",
"id": "99d19cba43cf58b4dd770e04166baa355eb98185",
"size": "16569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nitrous/llvm/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "4560"
},
{
"name": "Python",
"bytes": "182339"
}
],
"symlink_target": ""
} |
import os
import shutil
import signal
import sys
import threading
import warnings
import importlib
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from py4j.java_gateway import is_instance_of
from pyspark import accumulators, since
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway, local_connect_and_auth
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer, ChunkedStream
from pyspark.storagelevel import StorageLevel
from pyspark.resource.information import ResourceInformation
from pyspark.rdd import RDD, _load_from_socket
from pyspark.taskcontext import TaskContext
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create :class:`RDD` and
broadcast variables on that cluster.
When you create a new SparkContext, at least the master and app name should
be set, either through the named parameters here or through `conf`.
Parameters
----------
master : str, optional
Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
appName : str, optional
A name for your job, to display on the cluster web UI.
sparkHome : str, optional
Location where Spark is installed on cluster nodes.
pyFiles : list, optional
Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
environment : dict, optional
A dictionary of environment variables to set on
worker nodes.
batchSize : int, optional
The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
serializer : :class:`pyspark.serializers.Serializer`, optional
The serializer for RDDs.
conf : :py:class:`pyspark.SparkConf`, optional
An object setting Spark properties.
gateway : :py:class:`py4j.java_gateway.JavaGateway`, optional
Use an existing gateway and JVM, otherwise a new JVM
will be instantiated. This is only used internally.
jsc : :py:class:`py4j.java_gateway.JavaObject`, optional
The JavaSparkContext instance. This is only used internally.
profiler_cls : type, optional
A class of custom Profiler used to do profiling
(default is :class:`pyspark.profiler.BasicProfiler`).
Notes
-----
Only one :class:`SparkContext` should be active per JVM. You must `stop()`
the active :class:`SparkContext` before creating a new one.
:class:`SparkContext` instance is not supported to share across multiple
processes out of the box, and PySpark does not guarantee multi-processing execution.
Use threads instead for concurrent processing purpose.
Examples
--------
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError: ...
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
if (conf is None or
conf.get("spark.executor.allowSparkContext", "false").lower() != "true"):
# In order to prevent SparkContext from being created in executors.
SparkContext._assert_on_driver()
self._callsite = first_spark_call() or CallSite(None, None, None)
if gateway is not None and gateway.gateway_parameters.auth_token is None:
raise ValueError(
"You are trying to pass an insecure Py4j gateway to Spark. This"
" is not allowed as it is a security risk.")
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
# If encryption is enabled, we need to setup a server in the jvm to read broadcast
# data via a socket.
# scala's mangled names w/ $ in them require special treatment.
self._encryption_enabled = self._jvm.PythonUtils.isEncryptionEnabled(self._jsc)
os.environ["SPARK_AUTH_SOCKET_TIMEOUT"] = \
str(self._jvm.PythonUtils.getPythonAuthSocketTimeout(self._jsc))
os.environ["SPARK_BUFFER_SIZE"] = \
str(self._jvm.PythonUtils.getSparkBufferSize(self._jsc))
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python3')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] specified in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
Parameters
----------
conf : :py:class:`pyspark.SparkConf`, optional
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
Examples
--------
>>> sc.applicationId # doctest: +ELLIPSIS
'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
Parameters
----------
start : int
the start value
end : int, optional
the end value (exclusive)
step : int, optional
the incremental step (default: 1)
numSlices : int, optional
the number of partitions of the new RDD
Returns
-------
:py:class:`pyspark.RDD`
An RDD of int
Examples
--------
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(range(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using range
is recommended if the input represents a range for performance.
Examples
--------
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(range(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, range):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
# it's an empty iterator here but we need this line for triggering the
# logic of signal handling in FramedSerializer.load_stream, for instance,
# SpecialLengths.END_OF_DATA_SECTION in _read_with_length. Since
# FramedSerializer.load_stream produces a generator, the control should
# at least be in that function once. Here we do it by explicitly converting
# the empty iterator to a list, thus make sure worker reuse takes effect.
# See more details in SPARK-26549.
assert len(list(iterator)) == 0
return range(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
Examples
--------
data
object to be serialized
serializer : :py:class:`pyspark.serializers.Serializer`
reader_func : function
A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. Only used when encryption is disabled.
createRDDServer : function
A function which creates a PythonRDDServer in the jvm to
accept the serialized data, for use when encryption is enabled.
"""
if self._encryption_enabled:
# with encryption, we open a server in java and send the data directly
server = createRDDServer()
(sock_file, _) = local_connect_and_auth(server.port(), server.secret())
chunked_out = ChunkedStream(sock_file, 8192)
serializer.dump_stream(data, chunked_out)
chunked_out.close()
# this call will block until the server has read all the data and processed it (or
# throws an exception)
r = server.getResult()
return r
else:
# without encryption, we serialize to a file, and we read the file in java and
# parallelize from there.
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
try:
serializer.dump_stream(data, tempFile)
finally:
tempFile.close()
return reader_func(tempFile.name)
finally:
# we eagerly reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using :meth:`RDD.saveAsPickleFile` method.
Examples
--------
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
The text files must be encoded as UTF-8.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
Examples
--------
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
The text files must be encoded as UTF-8.
If `use_unicode` is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files:
.. code-block:: text
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do ``rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")``,
then ``rdd`` contains:
.. code-block:: text
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
Notes
-----
Small files are preferred, as each file will be loaded fully in memory.
Examples
--------
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[('.../1.txt', '1'), ('.../2.txt', '2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
Notes
-----
Small files are preferred, large file is also allowable, but may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
Parameters
----------
path : str
Directory to the input data files
recordLength : int
The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. :class:`PickleSerializer` is used to deserialize pickled objects on the Python side
Parameters
----------
path : str
path to sequencefile
keyClass: str, optional
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str, optional
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
valueConverter : str, optional
fully qualifiedname of a function returning value WritableConverter
minSplits : int, optional
minimum splits in dataset (default min(2, sc.defaultParallelism))
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
Parameters
----------
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
None by default
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
None by default
conf : dict, optional
Hadoop configuration, passed in as a dict
None by default
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
path : str
path to Hadoop file
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for :py:meth:`SparkContext.sequenceFile`.
Parameters
----------
inputFormatClass : str
fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
keyClass : str
fully qualified classname of key Writable class (e.g. "org.apache.hadoop.io.Text")
valueClass : str
fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
keyConverter : str, optional
fully qualified name of a function returning key WritableConverter
(None by default)
valueConverter : str, optional
fully qualified name of a function returning value WritableConverter
(None by default)
conf : dict, optional
Hadoop configuration, passed in as a dict (None by default)
batchSize : int, optional
The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
Examples
--------
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
['Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
['Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
gw = SparkContext._gateway
jvm = SparkContext._jvm
jrdd_cls = jvm.org.apache.spark.api.java.JavaRDD
jpair_rdd_cls = jvm.org.apache.spark.api.java.JavaPairRDD
jdouble_rdd_cls = jvm.org.apache.spark.api.java.JavaDoubleRDD
if is_instance_of(gw, rdds[0]._jrdd, jrdd_cls):
cls = jrdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jpair_rdd_cls):
cls = jpair_rdd_cls
elif is_instance_of(gw, rdds[0]._jrdd, jdouble_rdd_cls):
cls = jdouble_rdd_cls
else:
cls_name = rdds[0]._jrdd.getClass().getCanonicalName()
raise TypeError("Unsupported Java RDD class %s" % cls_name)
jrdds = gw.new_array(cls, len(rdds))
for i in range(0, len(rdds)):
jrdds[i] = rdds[i]._jrdd
return RDD(self._jsc.union(jrdds), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a :class:`Broadcast`
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an :class:`Accumulator` with the given initial value, using a given
:class:`AccumulatorParam` helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The `path` passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use :meth:`SparkFiles.get` with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
Examples
--------
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The `path` passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
Notes
-----
A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be an HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
@since(3.1)
def getCheckpointDir(self):
"""
Return the directory where RDDs are checkpointed. Returns None if no
checkpoint directory has been set.
"""
if not self._jsc.sc().getCheckpointDir().isEmpty():
return self._jsc.sc().getCheckpointDir().get()
return None
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use :meth:`SparkContext.cancelJobGroup` to cancel all
running jobs in this group.
Notes
-----
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
Currently, setting a group ID (set to local properties) with multiple threads
does not properly work. Internally threads on PVM and JVM are not synced, and JVM
thread can be reused for multiple threads on PVM, which fails to isolate local
properties for each thread on PVM.
To avoid this, enable the pinned thread mode by setting ``PYSPARK_PIN_THREAD``
environment variable to ``true`` and uses :class:`pyspark.InheritableThread`.
Examples
--------
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
Notes
-----
Currently, setting a local property with multiple threads does not properly work.
Internally threads on PVM and JVM are not synced, and JVM thread
can be reused for multiple threads on PVM, which fails to isolate local properties
for each thread on PVM.
To avoid this, enable the pinned thread mode by setting ``PYSPARK_PIN_THREAD``
environment variable to ``true`` and uses :class:`pyspark.InheritableThread`.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
:meth:`setLocalProperty`.
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
Notes
-----
Currently, setting a job description (set to local properties) with multiple
threads does not properly work. Internally threads on PVM and JVM are not synced,
and JVM thread can be reused for multiple threads on PVM, which fails to isolate
local properties for each thread on PVM.
To avoid this, enable the pinned thread mode by setting ``PYSPARK_PIN_THREAD``
environment variable to ``true`` and uses :class:`pyspark.InheritableThread`.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See :meth:`SparkContext.setJobGroup`.
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
Examples
--------
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
@property
def resources(self):
resources = {}
jresources = self._jsc.resources()
for x in jresources:
name = jresources[x].name()
jaddresses = jresources[x].addresses()
addrs = [addr for addr in jaddresses]
resources[name] = ResourceInformation(name, addrs)
return resources
@staticmethod
def _assert_on_driver():
"""
Called to ensure that SparkContext is created only on the Driver.
Throws an exception if a SparkContext is about to be created in executors.
"""
if TaskContext.get() is not None:
raise Exception("SparkContext should only be created and accessed on the driver.")
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| {
"content_hash": "debe7f68b43930fdffed4e03e90df18e",
"timestamp": "",
"source": "github",
"line_count": 1278,
"max_line_length": 99,
"avg_line_length": 41.91940532081377,
"alnum_prop": 0.6069475295391336,
"repo_name": "BryanCutler/spark",
"id": "79fdd22ab13fd9205aeb5ca40f26d71ce9cd3185",
"size": "54358",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/context.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "31536"
},
{
"name": "Batchfile",
"bytes": "24063"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23868"
},
{
"name": "HTML",
"bytes": "8567"
},
{
"name": "Java",
"bytes": "2740577"
},
{
"name": "JavaScript",
"bytes": "132645"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "3797"
},
{
"name": "PowerShell",
"bytes": "3735"
},
{
"name": "Python",
"bytes": "2073103"
},
{
"name": "R",
"bytes": "876803"
},
{
"name": "Roff",
"bytes": "27922"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "20944985"
},
{
"name": "Shell",
"bytes": "151467"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
} |
"""
Crawl Reddit for moderator and subscriber # data and insert it into the database.
"""
from bs4 import BeautifulSoup
import datetime
import re
import time
import urllib3
from redditchat.core.models import Room
from script_log import make_logger, log
logger = make_logger('crawl_reddit')
def get_rooms_to_crawl():
now = datetime.datetime.now()
one_day_ago = now - datetime.timedelta(days=1)
never_crawled = Room.objects.filter(last_crawled__isnull=True)
not_crawled_today = Room.objects.exclude(last_crawled__isnull=True).filter(last_crawled__lt=one_day_ago)
# Exclude Front Page:
return (never_crawled | not_crawled_today).exclude(shortname='frontpage')
def crawl_room(room, http):
"""
See if there's a subreddit corresponding to this room. If there is,
fill in the model's moderator list, subscriber count, and shortname_display.
"""
room.last_crawled = datetime.datetime.now()
room.save() # Save immediately so even if it errors we don't try again too fast
subreddit = room.shortname
log(logger, 'debug', 'Crawling', subreddit)
url = "http://www.reddit.com/r/%s/about/moderators/" % subreddit
r = http.request('GET', url)
if r.status != 200:
if subreddit not in ['tester']:
# We know that "tester" gives a 403 for whatever reason
log(logger, 'error', 'Request got error:', r.status, "on url", url)
return
soup = BeautifulSoup(r.data)
# Check whether subreddit exists:
if soup.find(id='noresults'):
r.moderators = r.subscribers = None
room.save()
return
# Get display shortname
try:
title = soup.find(id='moderator-table').h1.text
except AttributeError:
# We couldn't find the moderator table
log(logger, 'info', 'Could not find moderator table for:', room.shortname)
return
assert title.startswith('moderators of ')
shortname_display = title.replace('moderators of ', '')
room.shortname_display = shortname_display
# Get number of subscribers
number = soup.find('div', 'side').find('span', 'number').text
number = int(re.sub('[^0-9]', '', number))
room.subscribers = number
# Get moderator list
mods = soup.find(id='moderator-table').find_all('span', 'user')
mods = [m.a.text for m in mods]
room.moderators = mods
# Get image URL
room.image_url = soup.find(id='header-img').get('src') or room.image_url or ''
# Shit we need another URL to get the title
url = "http://www.reddit.com/r/%s/" % subreddit
r = http.request('GET', url)
if r.status != 200:
log(logger, 'error', 'Request got error:', r.status, "on url", url)
return
soup = BeautifulSoup(r.data)
room.title = soup.title.text
# Write
log(logger, 'debug', 'Setting', subreddit, 'to', room.to_dict())
room.save()
def crawl_rooms(rooms):
"""
Crawl rooms, giving enough delay that Reddit hopefully won't block us.
(They ask 2 seconds between requests: https://github.com/reddit/reddit/wiki/API)
"""
http = urllib3.PoolManager(headers={'User-Agent': 'Seddit.com - contact andrewbadr@gmail.com'})
for room in rooms:
crawl_room(room, http)
time.sleep(5)
def main():
rooms_to_crawl = get_rooms_to_crawl()
log(logger, 'debug', 'Got rooms to crawl:', rooms_to_crawl)
crawl_rooms(rooms_to_crawl)
if __name__ == '__main__':
main()
| {
"content_hash": "0dd2c92ba2c7dd487a2c84ea17fd67be",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 108,
"avg_line_length": 33.53398058252427,
"alnum_prop": 0.6479444122756225,
"repo_name": "reverie/seddit.com",
"id": "eb6a04e4fad4458ed9d7537bc84fc4dc2a3f14e7",
"size": "3454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/crawl_reddit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18378"
},
{
"name": "Erlang",
"bytes": "32513"
},
{
"name": "HTML",
"bytes": "23705"
},
{
"name": "JavaScript",
"bytes": "250467"
},
{
"name": "Nginx",
"bytes": "531"
},
{
"name": "Python",
"bytes": "82434"
},
{
"name": "Shell",
"bytes": "1076"
}
],
"symlink_target": ""
} |
"""
write_obs_cvs.py
================
Writes JSON CV files from the `config_tables.xls` vocabs from Climate-Grid package.
"""
# Third-party imports
import pandas
import simplejson
import numpy
CONFIG_XLS = 'config_tables.xls'
json_tmpl = {
"version_metadata":{
"author":"Ag Stephens <ag.stephens@stfc.ac.uk>",
"creation_date":"Tue Jun 06 07:45:06 2017 -0100",
"institution_id":"STFC",
"previous_commit":""
}
}
sheet_map = {'CF_metadata': ('variables', 'short_name'),
'grid_properties': ('grid_properties', 'grid_name'),
'projected_crs': ('projected_crs', 'epsg'),
'geographic_crs': ('geographic_crs', 'epsg')}
def _serialise(name, content):
output_path = "../../UKCP18_obs_{}.json".format(name)
resp = json_tmpl.copy()
resp[name] = content
with open(output_path, 'w') as writer:
simplejson.dump(resp, writer, indent=4, sort_keys=True)
print "Wrote: {}".format(output_path)
def write_json(sheet, name, key_name):
df = pandas.read_excel(CONFIG_XLS, sheetname=sheet, na_values="")
t = df.T # transposed
variables = {}
for i in range(t.shape[1]):
data = dict(t[i])
d = {}
for key, value in data.items():
if pandas.isnull(value): value = ""
d[key] = value
sn = d[key_name]
if sn:
variables[sn] = d.copy()
_serialise(name, variables)
def main():
for sheet, (name, key_name) in sheet_map.items():
write_json(sheet, name, key_name)
if __name__ == "__main__":
main()
| {
"content_hash": "31167f030230fa761ff4c1041b13408d",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 83,
"avg_line_length": 23.231884057971016,
"alnum_prop": 0.5676855895196506,
"repo_name": "ukcp-data/UKCP18_CVs",
"id": "c02adb7686f65712ddf8c761f3e8a7c2b7d59f19",
"size": "1626",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/climate_grid_vocabs/write_obs_cvs.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "16135"
}
],
"symlink_target": ""
} |
import os
import pwd
import random
import sys
from conary import dbstore
from conary.repository import errors
from conary.repository.netrepos import netauth
from conary.server import schema
def main(argv):
if len(argv) < 2:
dbPath = '/srv/rmake-repos/db/sqldb'
else:
dbPath = sys.argv[1]
db = getDb(path=dbPath)
addUser(db, 'anonymous', 'anonymous')
newPassword = ''.join([chr(random.randint(ord('A'), ord('z'))) for x in range(10)])
newPassword = 'rmake'
passwordPath = '/etc/rmake/server.d/repospassword'
open(passwordPath, 'w').write('')
os.chmod(passwordPath, 0700)
os.chown(passwordPath, pwd.getpwnam('rmake').pw_uid,
pwd.getpwnam('rmake').pw_gid)
open(passwordPath, 'w').write('user localhost rmake %s\n' % newPassword)
addUser(db, 'rmake', newPassword, write=True)
print "New password stored in database and at %s" % passwordPath
return 0
def getDb(path):
if os.path.exists(path):
print "Deleting database"
os.remove(path)
if os.listdir("/srv/rmake-repos/contents/"):
print "Deleting contents..."
os.system("rm -rf /srv/rmake-repos/contents/*")
open(path, 'w')
os.chown(path, pwd.getpwnam('apache').pw_uid,
pwd.getpwnam('apache').pw_gid)
db = dbstore.connect(path, driver='sqlite')
schema.loadSchema(db, True)
schema.setupTempTables(db)
return db
def addUser(db, name, password=None, write=False):
auth = netauth.NetworkAuthorization(db, [])
try:
auth.userAuth.getUserIdByName(name)
except errors.UserNotFound: # yuck, we need a hasUser interface
pass
else:
auth.deleteUserByName(name)
auth.addUser(name, password)
auth.addRole(name)
auth.addRoleMember(name, name)
auth.addAcl(name, None, None, write, False)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| {
"content_hash": "79a755593b7c38c7491e7eedab63b112",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 87,
"avg_line_length": 30,
"alnum_prop": 0.6447916666666667,
"repo_name": "fedora-conary/rmake-2",
"id": "9b65e599a9d8bb3498d0e084d33ba3b5538af365",
"size": "2525",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "extra/repos/resetrepos.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "35796"
},
{
"name": "C++",
"bytes": "3953"
},
{
"name": "Python",
"bytes": "1682020"
},
{
"name": "Shell",
"bytes": "12415"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
# https://github.com/QuantConnect/Lean/blob/master/LICENSE
with open('../LICENSE') as f:
license = f.read()
with open('README.rst') as f:
readme = f.read()
setup(
name='quantconnect',
version='0.1',
description = 'QuantConnect API',
long_description=readme,
author = 'QuantConnect Python Team',
author_email = 'support@quantconnect.com',
url='https://www.quantconnect.com/',
license=license,
packages = find_packages(exclude=('tests', 'docs')),
install_requires=['requests']
) | {
"content_hash": "3f100e44c51b8b32b55c61f91f1f472f",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 58,
"avg_line_length": 27.904761904761905,
"alnum_prop": 0.6518771331058021,
"repo_name": "redmeros/Lean",
"id": "dcd06cc6c7f9fd9fe96ac4e2a050a251291c57f5",
"size": "1298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ApiPython/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3280"
},
{
"name": "C#",
"bytes": "5870523"
},
{
"name": "F#",
"bytes": "1723"
},
{
"name": "Java",
"bytes": "852"
},
{
"name": "Python",
"bytes": "122953"
},
{
"name": "Shell",
"bytes": "2845"
},
{
"name": "Visual Basic",
"bytes": "2448"
}
],
"symlink_target": ""
} |
from skimage import io
import pandas as pd
import fastparquet as fp
import numpy as np
import os
import tempfile
def readTiff(filename):
"""Read data from the tiff file and return a Pandas dataframe"""
filenamePrefix = os.path.splitext(os.path.basename(filename))[0]
im = io.imread(filename)
# Reshape 3D to one giant 1D
imgdata1d = im.reshape(im.shape[0] * im.shape[1] * im.shape[2])
dataSize = im.shape[0] * im.shape[1] * im.shape[2]
sliceSize = im.shape[2] * im.shape[1]
data = {
'x': [(i % im.shape[2]) for i in range(0, dataSize)],
'y': [(i / im.shape[2] % im.shape[1]) for i in range(0, dataSize)],
'z': [int(i / sliceSize) for i in range(0, dataSize)],
'value': imgdata1d.astype(np.int32),
}
# Convert to Pandas dataframe
df = pd.DataFrame(data)
return df
def writeParquet(inputFilename, df):
"""Export Pandas dataframe as Parquet"""
filenamePrefix = os.path.splitext(os.path.basename(inputFilename))[0]
outFilepath = os.path.join(tempfile.gettempdir(), ''.join([filenamePrefix, '.parq']))
fp.write(outFilepath, df, compression='GZIP')
print outFilepath
return outFilepath
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Script to convert data in tiff format to Parquet format')
parser.add_argument('--tiff', dest='filename', help='input tiff file')
args = parser.parse_args()
# Read TIFF file and convert it into a Pandas dataframe
df = readTiff(args.filename)
# Export dataframe as parquet
outFilepath = writeParquet(args.filename, df)
| {
"content_hash": "271f2b25e960fc035b03049c70e6b0b5",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 107,
"avg_line_length": 28.310344827586206,
"alnum_prop": 0.6583434835566383,
"repo_name": "OpenDataAnalytics/etl",
"id": "94238c07c86e28683e8309fee9e1edceeb3cb877",
"size": "1700",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recon_to_xyzv_parq.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "648297"
},
{
"name": "Python",
"bytes": "9241"
},
{
"name": "Shell",
"bytes": "74"
}
],
"symlink_target": ""
} |
import os
import re
from concurrent import futures
import grpc
import structlog
from google.cloud import storage
from grpc_health.v1 import health_pb2, health_pb2_grpc
from opentelemetry import propagate, trace
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
from opentelemetry.instrumentation.grpc import GrpcInstrumentorServer
from opentelemetry.propagators.cloud_trace_propagator import CloudTraceFormatPropagator
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
import shakesapp_pb2
import shakesapp_pb2_grpc
BUCKET_NAME = "dataflow-samples"
BUCKET_PREFIX = "shakespeare/"
# enable auto gRPC server trace instrumentation
GrpcInstrumentorServer().instrument()
# Structured log configuration
def field_name_modifier(_, __, event_dict):
"""Replace log level field name 'level' with 'serverity' to meet
Cloud Logging's data model.
Make sure to call this processor after structlog.stdlib.add_log_level.
https://cloud.google.com/logging/docs/reference/v2/rpc/google.logging.v2?hl=en#google.logging.v2.LogEntry
"""
event_dict["severity"] = event_dict["level"]
del event_dict["level"]
return event_dict
def get_json_logger():
structlog.configure(
processors=[
structlog.stdlib.add_log_level,
field_name_modifier,
structlog.processors.TimeStamper("iso"),
structlog.processors.JSONRenderer(),
]
)
return structlog.get_logger()
logger = get_json_logger()
class ShakesappService(shakesapp_pb2_grpc.ShakespeareServiceServicer):
"""ShakesappService accepts request from the clients and search query
string from Shakespare works fetched from GCS.
"""
def __init__(self):
super().__init__()
def GetMatchCount(self, request, context):
logger.info(f"query: {request.query}")
texts = read_files_multi()
count = 0
query = request.query.lower()
# TODO: intentionally implemented in inefficient way.
for text in texts:
lines = text.split("\n")
for line in lines:
line = line.lower()
matched = re.search(query, line)
if matched is not None:
count += 1
logger.info(f"query '{query}' matched count: {count}")
return shakesapp_pb2.ShakespeareResponse(match_count=count)
def Check(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.SERVING
)
def Watch(self, request, context):
return health_pb2.HealthCheckResponse(
status=health_pb2.HealthCheckResponse.UNIMPLEMENTED
)
def read_files_multi():
"""read_files_multi fetchse Shakespeare works from GCS in multi threads.
TODO: This part should be multiprocess.
"""
client = storage.Client()
bucket = client.get_bucket(BUCKET_NAME)
itr = client.list_blobs(bucket, prefix=BUCKET_PREFIX)
blobs = list(itr)
executor = futures.ThreadPoolExecutor(max_workers=8)
results = []
for blob in blobs:
ret = executor.submit(blob.download_as_bytes)
results.append(ret)
executor.shutdown()
logger.info(f"number of files: {len(results)}")
return [r.result().decode("utf-8") for r in results]
def serve():
# start trace exporter
trace.set_tracer_provider(TracerProvider())
trace.get_tracer_provider().add_span_processor(
SimpleSpanProcessor(CloudTraceSpanExporter())
)
propagate.set_global_textmap(CloudTraceFormatPropagator())
# Add gRPC services to server
server = grpc.server(futures.ThreadPoolExecutor(max_workers=4))
service = ShakesappService()
shakesapp_pb2_grpc.add_ShakespeareServiceServicer_to_server(service, server)
health_pb2_grpc.add_HealthServicer_to_server(service, server)
# Start gRCP server
port = os.environ.get("PORT", "5050")
addr = f"0.0.0.0:{port}"
logger.info(f"starting server: {addr}")
server.add_insecure_port(addr)
server.start()
server.wait_for_termination()
if __name__ == "__main__":
serve()
| {
"content_hash": "7314ed8bc32b286d96081b134df66aa7",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 109,
"avg_line_length": 31.33582089552239,
"alnum_prop": 0.6865920457251726,
"repo_name": "GoogleCloudPlatform/opentelemetry-trace-codelab-python",
"id": "7c02a5d8383cf0432f11a7000791d6f84fa322fd",
"size": "4774",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "step3/src/server/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "8871"
},
{
"name": "Python",
"bytes": "83557"
},
{
"name": "Shell",
"bytes": "4332"
}
],
"symlink_target": ""
} |
import re
import yaml
def parse_front_matter(fileobj):
line = fileobj.readline().strip()
match = re.match(r'^\s*#\s*cob:\s*(.*)$', line)
if match:
return _parse_oneline_dict(match.group(1))
if re.match(r'^#\s?cob-yaml:\s*$', line):
s = ""
for line in fileobj:
if not line.startswith('#'):
return None
line = line[1:]
if line and line[0].isspace():
line = line[1:]
if line.strip() == '---':
break
s += line
return yaml.full_load(s)
return None
def _parse_oneline_dict(line):
returned = {}
for element in line.split():
try:
key, value = element.split('=')
except ValueError:
return None
returned[key] = value
return returned
| {
"content_hash": "85170f308c9afc47b6ac2710a4fd4f75",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 51,
"avg_line_length": 23,
"alnum_prop": 0.4935370152761457,
"repo_name": "getweber/weber-cli",
"id": "8b0b09bb0d41a453b1ec626ef084aa65b0d134df",
"size": "851",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cob/utils/parsing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "327"
},
{
"name": "Python",
"bytes": "10717"
}
],
"symlink_target": ""
} |
"""Recursion
With an understanding of how to write and call
functions, we can now combine the two concepts in
a really nifty way called **recursion**. For
seasoned programmers, this concept will not be at
all new - please feel free to move on. Everyone
else: strap in.
Python functions, like those in many programming
languages, are _recurrent_: they can "call
themselves".
A |def| is really a sort of template: it tells you
*how something is to be done*. When you call it,
you are making it do something *specific*, because
you are providing all of the needed data as
arguments.
From inside of the function, you can call that
same template with something specific *and
different* - this is recursion.
For example, look at the |factorial| function in
the code window.
It starts with a **base case**, which is usually a
really easy version of the problem, where you know
the answer right away. For non-easy versions of the
problem, it then defines a **recursion**, where
it calls itself with a smaller version of the
problem and uses that to compute the answwer.
Exercises
- Uncomment the |print| statements inside of |factorial|
(above and below |smaller_problem|) to see what
is happening.
"""
__doc__ = """Introduction to Recursion
The "factorial" of something is formed by
multiplying all of the integers from 1 to the
given number, like this:
factorial(5) == 5 * 4 * 3 * 2 * 1
You can do this recursively by noting that, e.g.,
factorial(5) == 5 * factorial(4)
This can't go forever, because we know that
factorial(1) == 1
See below.
"""
def factorial(n):
if n <= 1:
return 1
# print "before recursion", n
smaller_problem = factorial(n - 1)
# print "after recursion", n
return n * smaller_problem
# This gets big fast
print "2! =", factorial(2)
print "7! =", factorial(7)
print "20! =", factorial(20)
| {
"content_hash": "7f3854327fee31b0e32ebf1445c43efe",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 56,
"avg_line_length": 25.65277777777778,
"alnum_prop": 0.7249593936112615,
"repo_name": "shiblon/pytour",
"id": "946e782cd29ef204e749b02d02573df74e8371aa",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tutorials/recursion.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "195977"
},
{
"name": "HTML",
"bytes": "2110262"
},
{
"name": "JavaScript",
"bytes": "5106892"
},
{
"name": "Python",
"bytes": "15081380"
},
{
"name": "Shell",
"bytes": "1018"
}
],
"symlink_target": ""
} |
"""
family.py
Created by Thomas Mangin on 2014-06-22.
Copyright (c) 2014-2014 Exa Networks. All rights reserved.
"""
from exabgp.configuration.engine.registry import Raised
from exabgp.configuration.engine.registry import Entry
from exabgp.protocol.family import AFI
from exabgp.protocol.family import SAFI
# ======================================================================= Family
#
class SectionFamily (Entry):
syntax = \
'family {\n' \
'# all; # default, announce all the families we know\n' \
'# minimal; # announce the AFI/SAFI of the routes in the configuration\n' \
'\n' \
' ipv4 unicast;\n' \
' ipv4 multicast;\n' \
' ipv4 nlri-mpls;\n' \
' ipv4 mpls-vpn;\n' \
' ipv4 flow;\n' \
' ipv4 flow-vpn;\n' \
' ipv6 unicast;\n' \
' ipv6 flow;\n' \
' ipv6 flow-vpn;\n' \
'}\n'
def __init__ (self):
self.content = []
def enter (self,tokeniser):
token = tokeniser()
if token != '{': raise Raised(self.syntax)
if 'families' in self.content:
raise Raised('duplicate family blocks')
self.content = []
def exit (self,tokeniser):
# no verification to do
pass
def inet (self,tokeniser):
raise Raised("the word inet is deprecated, please use ipv4 instead",'error')
def inet4 (self,tokeniser):
raise Raised("the word inet4 is deprecated, please use ipv4 instead",'error')
def inet6 (self,tokeniser):
raise Raised("the word inet6 is deprecated, please use ipv6 instead",'error')
def _check_conflict (self):
if 'all' in self.content:
raise Raised('ipv4 can not be used with all or minimal')
if 'minimal' in self.content:
raise Raised('ipv4 can not be used with all or minimal')
def _drop_colon (self,tokeniser):
if tokeniser() != ';':
raise Raised('missing semi-colon')
def ipv4 (self,tokeniser):
self._check_conflict()
safi = tokeniser()
if safi == 'unicast':
self.content.append((AFI(AFI.ipv4),SAFI(SAFI.unicast)))
elif safi == 'multicast':
self.content.append((AFI(AFI.ipv4),SAFI(SAFI.multicast)))
elif safi == 'nlri-mpls':
self.content.append((AFI(AFI.ipv4),SAFI(SAFI.nlri_mpls)))
elif safi == 'mpls-vpn':
self.content.append((AFI(AFI.ipv4),SAFI(SAFI.mpls_vpn)))
elif safi in ('flow'):
self.content.append((AFI(AFI.ipv4),SAFI(SAFI.flow_ip)))
elif safi == 'flow-vpn':
self.content.append((AFI(AFI.ipv4),SAFI(SAFI.flow_vpn)))
else:
raise Raised('unknow family safi %s' % safi)
self._drop_colon(tokeniser)
def ipv6 (self,tokeniser):
self._check_conflict()
safi = tokeniser()
if safi == 'unicast':
self.content.append((AFI(AFI.ipv6),SAFI(SAFI.unicast)))
elif safi == 'mpls-vpn':
self.content.append((AFI(AFI.ipv6),SAFI(SAFI.mpls_vpn)))
elif safi in ('flow'):
self.content.append((AFI(AFI.ipv6),SAFI(SAFI.flow_ip)))
elif safi == 'flow-vpn':
self.content.append((AFI(AFI.ipv6),SAFI(SAFI.flow_vpn)))
else:
raise Raised('unknow family safi %s' % safi)
self._drop_colon(tokeniser)
def l2vpn (self,tokeniser):
self._check_conflict()
safi = tokeniser()
if safi == 'vpls':
self.content.append((AFI(AFI.l2vpn),SAFI(SAFI.vpls)))
else:
raise Raised('unknow family safi %s' % safi)
self._drop_colon(tokeniser)
def all (self,tokeniser):
self._check_conflict()
# bad, we are changing the type
self.content = ['all',]
self._drop_colon(tokeniser)
def minimal (self,tokeniser):
self._check_conflict()
# bad, we are changing the type
self.content = ['minimal',]
self._drop_colon(tokeniser)
@classmethod
def register (cls,registry,location):
registry.register_class(cls)
registry.register_hook(cls,'enter',location,'enter')
registry.register_hook(cls,'exit',location,'exit')
registry.register_hook(cls,'action',location+['inet'],'inet')
registry.register_hook(cls,'action',location+['inet4'],'inet4')
registry.register_hook(cls,'action',location+['inet6'],'inet6')
registry.register_hook(cls,'action',location+['ipv4'],'ipv4')
registry.register_hook(cls,'action',location+['ipv6'],'ipv6')
registry.register_hook(cls,'action',location+['l2vpn'],'l2vpn')
registry.register_hook(cls,'action',location+['all'],'all')
registry.register_hook(cls,'action',location+['minimal'],'minimal')
| {
"content_hash": "4a70d96b19f59f7aba394e532be7d380",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 80,
"avg_line_length": 29.06206896551724,
"alnum_prop": 0.6623160892263882,
"repo_name": "jbfavre/exabgp",
"id": "0e45c4e3f7b3f52cd902b0fb697f946d284eb204",
"size": "4232",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/exabgp/configuration/neighbor/family.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Perl",
"bytes": "1516"
},
{
"name": "Python",
"bytes": "838070"
},
{
"name": "Shell",
"bytes": "17096"
}
],
"symlink_target": ""
} |
import atexit
import sqlite3
get_all_mappings_query = '''
SELECT * FROM mappings
'''
get_all_mapping_names_query = '''
SELECT DISTINCT mname FROM mappings
'''
get_mapping_from_name_query = '''
SELECT key, note FROM mappings
WHERE mname = "{0}"
'''
delete_mapping_from_name_query = '''
DELETE FROM mappings
WHERE mname = "{0}"
'''
insert_mapping_query = '''
INSERT OR IGNORE INTO mappings VALUES(?, ?, ?)
'''
class db(object):
def __init__(self):
super(db, self).__init__()
self.conn = sqlite3.connect('mappings.db')
atexit.register(self.close_connection)
self.c = self.conn.cursor()
def close_connection(self):
self.conn.close()
print "db connection closed"
def get_all_mappings(self):
self.c.execute(get_all_mappings_query)
return self.c.fetchall()
def get_all_mapping_names(self):
self.c.execute(get_all_mapping_names_query)
return self.c.fetchall()
def get_mapping_from_name(self, mname):
self.c.execute(get_mapping_from_name_query.format(mname))
return self.c.fetchall()
def delete_mapping_from_name(self, mname):
self.c.execute(delete_mapping_from_name_query.format(mname))
self.conn.commit()
def insert_mapping(self, mapping):
if not mapping:
return
self.c.executemany(insert_mapping_query, mapping)
self.conn.commit()
| {
"content_hash": "96b9304ea6199b770bfe5d8bbb316154",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 68,
"avg_line_length": 23.6,
"alnum_prop": 0.6370056497175142,
"repo_name": "killalea/digital-keyboard",
"id": "b552adb1b4c85d350003b29e07a6f96e60ba351f",
"size": "1416",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27047"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fcm_app', '0005_auto_20170827_1811'),
]
operations = [
migrations.DeleteModel(
name='Maps',
),
migrations.RenameField(
model_name='fcm',
old_name='name',
new_name='title',
),
migrations.RemoveField(
model_name='fcm',
name='map_file',
),
migrations.AddField(
model_name='fcm',
name='description',
field=models.CharField(default='Test description', max_length=2000),
preserve_default=False,
),
migrations.AddField(
model_name='fcm',
name='map_html',
field=models.FileField(default='media/images/one', upload_to='media/html'),
preserve_default=False,
),
migrations.AddField(
model_name='fcm',
name='map_image',
field=models.ImageField(default='media/images/two', upload_to='media/images'),
preserve_default=False,
),
]
| {
"content_hash": "a8dcf1773da8182dc5f03138ade94b3c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 90,
"avg_line_length": 27.813953488372093,
"alnum_prop": 0.532608695652174,
"repo_name": "gtsapelas/TRANSrisk_fcm_project",
"id": "f4434d926f7fd3290e7710afe5dc0c949c450388",
"size": "1267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fcm_app/migrations/0006_auto_20170910_2226.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "186489"
},
{
"name": "HTML",
"bytes": "485179"
},
{
"name": "JavaScript",
"bytes": "20243"
},
{
"name": "Python",
"bytes": "74089"
}
],
"symlink_target": ""
} |
import json
import time
import socket
import multiprocessing
import sys
import os
import queue
import random
import traceback
from http.server import BaseHTTPRequestHandler, HTTPServer
from multiprocessing.context import ForkProcess
from unittest import TestCase
from unittest.mock import patch, MagicMock
from threading import Thread, Event
from decimal import Decimal
import datetime as dt
import urllib3.exceptions
from base64 import b64decode
from urllib.parse import urlparse, parse_qs
from setuptools.ssl_support import find_ca_bundle
from .http import Client, _get_socket_opts, _remove_certs_for_non_https
from .exceptions import ConnectionError, ProgrammingError
REQUEST = 'crate.client.http.Server.request'
CA_CERT_PATH = find_ca_bundle()
def fake_request(response=None):
def request(*args, **kwargs):
if isinstance(response, list):
resp = response.pop(0)
response.append(resp)
return resp
elif response:
return response
else:
return MagicMock(spec=urllib3.response.HTTPResponse)
return request
def fake_response(status, reason=None, content_type='application/json'):
m = MagicMock(spec=urllib3.response.HTTPResponse)
m.status = status
m.reason = reason or ''
m.headers = {'content-type': content_type}
return m
def fake_redirect(location):
m = fake_response(307)
m.get_redirect_location.return_value = location
return m
def bad_bulk_response():
r = fake_response(400, 'Bad Request')
r.data = json.dumps({
"results": [
{"rowcount": 1},
{"error_message": "an error occured"},
{"error_message": "another error"},
{"error_message": ""},
{"error_message": None}
]}).encode()
return r
def fail_sometimes(*args, **kwargs):
if random.randint(1, 100) % 10 == 0:
raise urllib3.exceptions.MaxRetryError(None, '/_sql', '')
return fake_response(200)
class HttpClientTest(TestCase):
@patch(REQUEST, fake_request([fake_response(200),
fake_response(104, 'Connection reset by peer'),
fake_response(503, 'Service Unavailable')]))
def test_connection_reset_exception(self):
client = Client(servers="localhost:4200")
client.sql('select 1')
client.sql('select 2')
self.assertEqual(['http://localhost:4200'], list(client._active_servers))
try:
client.sql('select 3')
except ProgrammingError:
self.assertEqual([], list(client._active_servers))
else:
self.assertTrue(False)
finally:
client.close()
def test_no_connection_exception(self):
client = Client()
self.assertRaises(ConnectionError, client.sql, 'select foo')
client.close()
@patch(REQUEST)
def test_http_error_is_re_raised(self, request):
request.side_effect = Exception
client = Client()
self.assertRaises(ProgrammingError, client.sql, 'select foo')
client.close()
@patch(REQUEST)
def test_programming_error_contains_http_error_response_content(self, request):
request.side_effect = Exception("this shouldn't be raised")
client = Client()
try:
client.sql('select 1')
except ProgrammingError as e:
self.assertEqual("this shouldn't be raised", e.message)
else:
self.assertTrue(False)
finally:
client.close()
@patch(REQUEST, fake_request([fake_response(200),
fake_response(503, 'Service Unavailable')]))
def test_server_error_50x(self):
client = Client(servers="localhost:4200 localhost:4201")
client.sql('select 1')
client.sql('select 2')
try:
client.sql('select 3')
except ProgrammingError as e:
self.assertEqual("No more Servers available, " +
"exception from last server: Service Unavailable",
e.message)
self.assertEqual([], list(client._active_servers))
else:
self.assertTrue(False)
finally:
client.close()
def test_connect(self):
client = Client(servers="localhost:4200 localhost:4201")
self.assertEqual(client._active_servers,
["http://localhost:4200", "http://localhost:4201"])
client.close()
client = Client(servers="localhost:4200")
self.assertEqual(client._active_servers, ["http://localhost:4200"])
client.close()
client = Client(servers=["localhost:4200"])
self.assertEqual(client._active_servers, ["http://localhost:4200"])
client.close()
client = Client(servers=["localhost:4200", "127.0.0.1:4201"])
self.assertEqual(client._active_servers,
["http://localhost:4200", "http://127.0.0.1:4201"])
client.close()
@patch(REQUEST, fake_request(fake_redirect('http://localhost:4201')))
def test_redirect_handling(self):
client = Client(servers='localhost:4200')
try:
client.blob_get('blobs', 'fake_digest')
except ProgrammingError:
# 4201 gets added to serverpool but isn't available
# that's why we run into an infinite recursion
# exception message is: maximum recursion depth exceeded
pass
self.assertEqual(
['http://localhost:4200', 'http://localhost:4201'],
sorted(list(client.server_pool.keys()))
)
# the new non-https server must not contain any SSL only arguments
# regression test for github issue #179/#180
self.assertEqual(
{'socket_options': _get_socket_opts(keepalive=True)},
client.server_pool['http://localhost:4201'].pool.conn_kw
)
client.close()
@patch(REQUEST)
def test_server_infos(self, request):
request.side_effect = urllib3.exceptions.MaxRetryError(
None, '/', "this shouldn't be raised")
client = Client(servers="localhost:4200 localhost:4201")
self.assertRaises(
ConnectionError, client.server_infos, 'http://localhost:4200')
client.close()
@patch(REQUEST, fake_request(fake_response(503)))
def test_server_infos_503(self):
client = Client(servers="localhost:4200")
self.assertRaises(
ConnectionError, client.server_infos, 'http://localhost:4200')
client.close()
@patch(REQUEST, fake_request(
fake_response(401, 'Unauthorized', 'text/html')))
def test_server_infos_401(self):
client = Client(servers="localhost:4200")
try:
client.server_infos('http://localhost:4200')
except ProgrammingError as e:
self.assertEqual("401 Client Error: Unauthorized", e.message)
else:
self.assertTrue(False, msg="Exception should have been raised")
finally:
client.close()
@patch(REQUEST, fake_request(bad_bulk_response()))
def test_bad_bulk_400(self):
client = Client(servers="localhost:4200")
try:
client.sql("Insert into users (name) values(?)",
bulk_parameters=[["douglas"], ["monthy"]])
except ProgrammingError as e:
self.assertEqual("an error occured\nanother error", e.message)
else:
self.assertTrue(False, msg="Exception should have been raised")
finally:
client.close()
@patch(REQUEST, autospec=True)
def test_decimal_serialization(self, request):
client = Client(servers="localhost:4200")
request.return_value = fake_response(200)
dec = Decimal(0.12)
client.sql('insert into users (float_col) values (?)', (dec,))
data = json.loads(request.call_args[1]['data'])
self.assertEqual(data['args'], [str(dec)])
client.close()
@patch(REQUEST, autospec=True)
def test_datetime_is_converted_to_ts(self, request):
client = Client(servers="localhost:4200")
request.return_value = fake_response(200)
datetime = dt.datetime(2015, 2, 28, 7, 31, 40)
client.sql('insert into users (dt) values (?)', (datetime,))
# convert string to dict
# because the order of the keys isn't deterministic
data = json.loads(request.call_args[1]['data'])
self.assertEqual(data['args'], [1425108700000])
client.close()
@patch(REQUEST, autospec=True)
def test_date_is_converted_to_ts(self, request):
client = Client(servers="localhost:4200")
request.return_value = fake_response(200)
day = dt.date(2016, 4, 21)
client.sql('insert into users (dt) values (?)', (day,))
data = json.loads(request.call_args[1]['data'])
self.assertEqual(data['args'], [1461196800000])
client.close()
def test_socket_options_contain_keepalive(self):
server = 'http://localhost:4200'
client = Client(servers=server)
conn_kw = client.server_pool[server].pool.conn_kw
self.assertIn(
(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), conn_kw['socket_options']
)
client.close()
@patch(REQUEST, fail_sometimes)
class ThreadSafeHttpClientTest(TestCase):
"""
Using a pool of 5 Threads to emit commands to the multiple servers through
one Client-instance
check if number of servers in _inactive_servers and _active_servers always
equals the number of servers initially given.
"""
servers = [
"127.0.0.1:44209",
"127.0.0.2:44209",
"127.0.0.3:44209",
]
num_threads = 5
num_commands = 1000
thread_timeout = 5.0 # seconds
def __init__(self, *args, **kwargs):
self.event = Event()
self.err_queue = queue.Queue()
super(ThreadSafeHttpClientTest, self).__init__(*args, **kwargs)
def setUp(self):
self.client = Client(self.servers)
self.client.retry_interval = 0.2 # faster retry
def tearDown(self):
self.client.close()
def _run(self):
self.event.wait() # wait for the others
expected_num_servers = len(self.servers)
for x in range(self.num_commands):
try:
self.client.sql('select name from sys.cluster')
except ConnectionError:
pass
try:
with self.client._lock:
num_servers = len(self.client._active_servers) + \
len(self.client._inactive_servers)
self.assertEqual(
expected_num_servers,
num_servers,
"expected %d but got %d" % (expected_num_servers,
num_servers)
)
except AssertionError:
self.err_queue.put(sys.exc_info())
def test_client_threaded(self):
"""
Testing if lists of servers is handled correctly when client is used
from multiple threads with some requests failing.
**ATTENTION:** this test is probabilistic and does not ensure that the
client is indeed thread-safe in all cases, it can only show that it
withstands this scenario.
"""
threads = [
Thread(target=self._run, name=str(x))
for x in range(self.num_threads)
]
for thread in threads:
thread.start()
self.event.set()
for t in threads:
t.join(self.thread_timeout)
if not self.err_queue.empty():
self.assertTrue(False, "".join(
traceback.format_exception(*self.err_queue.get(block=False))))
class ClientAddressRequestHandler(BaseHTTPRequestHandler):
"""
http handler for use with HTTPServer
returns client host and port in crate-conform-responses
"""
protocol_version = 'HTTP/1.1'
def do_GET(self):
content_length = self.headers.get("content-length")
if content_length:
self.rfile.read(int(content_length))
response = json.dumps({
"cols": ["host", "port"],
"rows": [
self.client_address[0],
self.client_address[1]
],
"rowCount": 1,
})
self.send_response(200)
self.send_header("Content-Length", len(response))
self.send_header("Content-Type", "application/json; charset=UTF-8")
self.end_headers()
self.wfile.write(response.encode('UTF-8'))
do_POST = do_PUT = do_DELETE = do_HEAD = do_GET
class KeepAliveClientTest(TestCase):
server_address = ("127.0.0.1", 65535)
def __init__(self, *args, **kwargs):
super(KeepAliveClientTest, self).__init__(*args, **kwargs)
self.server_process = ForkProcess(target=self._run_server)
def setUp(self):
super(KeepAliveClientTest, self).setUp()
self.client = Client(["%s:%d" % self.server_address])
self.server_process.start()
time.sleep(.10)
def tearDown(self):
self.server_process.terminate()
self.client.close()
super(KeepAliveClientTest, self).tearDown()
def _run_server(self):
self.server = HTTPServer(self.server_address,
ClientAddressRequestHandler)
self.server.handle_request()
def test_client_keepalive(self):
for x in range(10):
result = self.client.sql("select * from fake")
another_result = self.client.sql("select again from fake")
self.assertEqual(result, another_result)
class ParamsTest(TestCase):
def test_params(self):
client = Client(['127.0.0.1:4200'], error_trace=True)
parsed = urlparse(client.path)
params = parse_qs(parsed.query)
self.assertEqual(params["error_trace"], ["true"])
client.close()
def test_no_params(self):
client = Client()
self.assertEqual(client.path, "/_sql?types=true")
client.close()
class RequestsCaBundleTest(TestCase):
def test_open_client(self):
os.environ["REQUESTS_CA_BUNDLE"] = CA_CERT_PATH
try:
Client('http://127.0.0.1:4200')
except ProgrammingError:
self.fail("HTTP not working with REQUESTS_CA_BUNDLE")
finally:
os.unsetenv('REQUESTS_CA_BUNDLE')
os.environ["REQUESTS_CA_BUNDLE"] = ''
def test_remove_certs_for_non_https(self):
d = _remove_certs_for_non_https('https', {"ca_certs": 1})
self.assertIn('ca_certs', d)
kwargs = {'ca_certs': 1, 'foobar': 2, 'cert_file': 3}
d = _remove_certs_for_non_https('http', kwargs)
self.assertNotIn('ca_certs', d)
self.assertNotIn('cert_file', d)
self.assertIn('foobar', d)
class TimeoutRequestHandler(BaseHTTPRequestHandler):
"""
HTTP handler for use with TestingHTTPServer
updates the shared counter and waits so that the client times out
"""
def do_POST(self):
self.server.SHARED['count'] += 1
time.sleep(5)
class SharedStateRequestHandler(BaseHTTPRequestHandler):
"""
HTTP handler for use with TestingHTTPServer
sets the shared state of the server and returns an empty response
"""
def do_POST(self):
self.server.SHARED['count'] += 1
self.server.SHARED['schema'] = self.headers.get('Default-Schema')
if self.headers.get('Authorization') is not None:
auth_header = self.headers['Authorization'].replace('Basic ', '')
credentials = b64decode(auth_header).decode('utf-8').split(":", 1)
self.server.SHARED['username'] = credentials[0]
if len(credentials) > 1 and credentials[1]:
self.server.SHARED['password'] = credentials[1]
else:
self.server.SHARED['password'] = None
else:
self.server.SHARED['username'] = None
if self.headers.get('X-User') is not None:
self.server.SHARED['usernameFromXUser'] = self.headers['X-User']
else:
self.server.SHARED['usernameFromXUser'] = None
# send empty response
response = '{}'
self.send_response(200)
self.send_header("Content-Length", len(response))
self.send_header("Content-Type", "application/json; charset=UTF-8")
self.end_headers()
self.wfile.write(response.encode('utf-8'))
class TestingHTTPServer(HTTPServer):
"""
http server providing a shared dict
"""
manager = multiprocessing.Manager()
SHARED = manager.dict()
SHARED['count'] = 0
SHARED['usernameFromXUser'] = None
SHARED['username'] = None
SHARED['password'] = None
SHARED['schema'] = None
@classmethod
def run_server(cls, server_address, request_handler_cls):
cls(server_address, request_handler_cls).serve_forever()
class TestingHttpServerTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.assertIsNotNone(self.request_handler)
self.server_address = ('127.0.0.1', random.randint(65000, 65535))
self.server_process = ForkProcess(target=TestingHTTPServer.run_server,
args=(self.server_address, self.request_handler))
def setUp(self):
self.server_process.start()
self.wait_for_server()
def wait_for_server(self):
while True:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(self.server_address)
except Exception:
time.sleep(.25)
else:
break
def tearDown(self):
self.server_process.terminate()
def clientWithKwargs(self, **kwargs):
return Client(["%s:%d" % self.server_address], timeout=1, **kwargs)
class RetryOnTimeoutServerTest(TestingHttpServerTestCase):
request_handler = TimeoutRequestHandler
def setUp(self):
super().setUp()
self.client = self.clientWithKwargs()
def tearDown(self):
super().tearDown()
self.client.close()
def test_no_retry_on_read_timeout(self):
try:
self.client.sql("select * from fake")
except ConnectionError as e:
self.assertIn('Read timed out', e.message,
msg='Error message must contain: Read timed out')
self.assertEqual(TestingHTTPServer.SHARED['count'], 1)
class TestDefaultSchemaHeader(TestingHttpServerTestCase):
request_handler = SharedStateRequestHandler
def setUp(self):
super().setUp()
self.client = self.clientWithKwargs(schema='my_custom_schema')
def tearDown(self):
self.client.close()
super().tearDown()
def test_default_schema(self):
self.client.sql('SELECT 1')
self.assertEqual(TestingHTTPServer.SHARED['schema'], 'my_custom_schema')
class TestUsernameSentAsHeader(TestingHttpServerTestCase):
request_handler = SharedStateRequestHandler
def setUp(self):
super().setUp()
self.clientWithoutUsername = self.clientWithKwargs()
self.clientWithUsername = self.clientWithKwargs(username='testDBUser')
self.clientWithUsernameAndPassword = self.clientWithKwargs(username='testDBUser',
password='test:password')
def tearDown(self):
self.clientWithoutUsername.close()
self.clientWithUsername.close()
self.clientWithUsernameAndPassword.close()
super().tearDown()
def test_username(self):
self.clientWithoutUsername.sql("select * from fake")
self.assertEqual(TestingHTTPServer.SHARED['usernameFromXUser'], None)
self.assertEqual(TestingHTTPServer.SHARED['username'], None)
self.assertEqual(TestingHTTPServer.SHARED['password'], None)
self.clientWithUsername.sql("select * from fake")
self.assertEqual(TestingHTTPServer.SHARED['usernameFromXUser'], 'testDBUser')
self.assertEqual(TestingHTTPServer.SHARED['username'], 'testDBUser')
self.assertEqual(TestingHTTPServer.SHARED['password'], None)
self.clientWithUsernameAndPassword.sql("select * from fake")
self.assertEqual(TestingHTTPServer.SHARED['usernameFromXUser'], 'testDBUser')
self.assertEqual(TestingHTTPServer.SHARED['username'], 'testDBUser')
self.assertEqual(TestingHTTPServer.SHARED['password'], 'test:password')
| {
"content_hash": "11faabf794363fd3bcfbed0286a4182a",
"timestamp": "",
"source": "github",
"line_count": 607,
"max_line_length": 92,
"avg_line_length": 34.200988467874794,
"alnum_prop": 0.6081888246628131,
"repo_name": "crate/crate-python",
"id": "ee32778b6379097a643b9e826aae561103bba5a0",
"size": "21783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/crate/client/test_http.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "228149"
},
{
"name": "Shell",
"bytes": "5367"
}
],
"symlink_target": ""
} |
"""The ViCare integration."""
from __future__ import annotations
from collections.abc import Callable
from dataclasses import dataclass
import logging
from PyViCare.PyViCare import PyViCare
from PyViCare.PyViCareDevice import Device
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_NAME,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.storage import STORAGE_DIR
from .const import (
CONF_CIRCUIT,
CONF_HEATING_TYPE,
DEFAULT_HEATING_TYPE,
DOMAIN,
HEATING_TYPE_TO_CREATOR_METHOD,
PLATFORMS,
VICARE_API,
VICARE_CIRCUITS,
VICARE_DEVICE_CONFIG,
HeatingType,
)
_LOGGER = logging.getLogger(__name__)
@dataclass()
class ViCareRequiredKeysMixin:
"""Mixin for required keys."""
value_getter: Callable[[Device], bool]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.deprecated(CONF_CIRCUIT),
vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=60): vol.All(
cv.time_period, lambda value: value.total_seconds()
),
vol.Optional(
CONF_CIRCUIT
): int, # Ignored: All circuits are now supported. Will be removed when switching to Setup via UI.
vol.Optional(CONF_NAME, default="ViCare"): cv.string,
vol.Optional(
CONF_HEATING_TYPE, default=DEFAULT_HEATING_TYPE.value
): vol.In([e.value for e in HeatingType]),
}
),
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config) -> bool:
"""Set up the ViCare component from yaml."""
if DOMAIN not in config:
# Setup via UI. No need to continue yaml-based setup
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up from config entry."""
_LOGGER.debug("Setting up ViCare component")
hass.data[DOMAIN] = {}
hass.data[DOMAIN][entry.entry_id] = {}
await hass.async_add_executor_job(setup_vicare_api, hass, entry)
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
def vicare_login(hass, entry_data):
"""Login via PyVicare API."""
vicare_api = PyViCare()
vicare_api.setCacheDuration(entry_data[CONF_SCAN_INTERVAL])
vicare_api.initWithCredentials(
entry_data[CONF_USERNAME],
entry_data[CONF_PASSWORD],
entry_data[CONF_CLIENT_ID],
hass.config.path(STORAGE_DIR, "vicare_token.save"),
)
return vicare_api
def setup_vicare_api(hass, entry):
"""Set up PyVicare API."""
vicare_api = vicare_login(hass, entry.data)
for device in vicare_api.devices:
_LOGGER.info(
"Found device: %s (online: %s)", device.getModel(), str(device.isOnline())
)
# Currently we only support a single device
device = vicare_api.devices[0]
hass.data[DOMAIN][entry.entry_id][VICARE_DEVICE_CONFIG] = device
hass.data[DOMAIN][entry.entry_id][VICARE_API] = getattr(
device,
HEATING_TYPE_TO_CREATOR_METHOD[HeatingType(entry.data[CONF_HEATING_TYPE])],
)()
hass.data[DOMAIN][entry.entry_id][VICARE_CIRCUITS] = hass.data[DOMAIN][
entry.entry_id
][VICARE_API].circuits
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload ViCare config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
| {
"content_hash": "e6123231b1dcaa6218b4ea6ec73347be",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 119,
"avg_line_length": 29.310344827586206,
"alnum_prop": 0.6294117647058823,
"repo_name": "home-assistant/home-assistant",
"id": "a79dbf0657d97483b317f27fa9fa32a6fa4dff2e",
"size": "4250",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/vicare/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "20557383"
},
{
"name": "Shell",
"bytes": "6671"
}
],
"symlink_target": ""
} |
import os
DEBUG = False
TEMPLATE_DEBUG = DEBUG
APPEND_SLASH=True
BRUTE_PROTECT = False
BRUTE_COOLOFF = 3
BRUTE_LIMIT = 3
PROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
AUTH_PROFILE_MODULE = "sal.UserProfile"
DISPLAY_NAME = 'Sal'
MANAGERS = ADMINS
# The order plugins (if they're able to be shown on that particular page) will be displayed in. If not listed here, will be listed alphabetically after.
PLUGIN_ORDER = ['Activity','Status','OperatingSystem', 'MunkiVersion', 'Uptime', 'Memory', 'DiskSpace', 'PendingAppleUpdates', 'Pending3rdPartyUpdates', 'PuppetStatus']
# Only show these plugins on the front page - some things only the admins should see.
LIMIT_PLUGIN_TO_FRONT_PAGE = []
# Hide these plugins from the front page
HIDE_PLUGIN_FROM_FRONT_PAGE = []
# Hide these plugins from the specified business units
HIDE_PLUGIN_FROM_BUSINESS_UNIT = {
# 'Encryption':['1']
}
# Hide these plugins from the specified machine groups
HIDE_PLUGIN_FROM_MACHINE_GROUP = {
# 'DiskSpace':['1']
}
PLUGIN_DIR = os.path.join(PROJECT_DIR, 'plugins')
# If you want to have a default machine group, define this to the key of
# that group.
#DEFAULT_MACHINE_GROUP_KEY = ''
# Facts which will have historical data kept in addition to the most
# recent instanct of that fact.
HISTORICAL_FACTS = [
# 'memoryfree_mb',
]
# How long to keep historical facts around before pruning them.
HISTORICAL_DAYS = 180
EXCLUDED_FACTS = {
'sshrsakey',
'sshfp_rsa',
'sshfp_dsa',
'sshdsakey',
}
EXCLUDED_CONDITIONS = {
# 'some_condition',
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['*']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = os.path.join(PROJECT_DIR, 'static')
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'site_static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ppf%ls0f)mzkf#2dl-nbf^8f&=84py=y^u8^z-f559*d36y_@v'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"sal.context_processors.display_name",
"sal.context_processors.config_installed",
"django.core.context_processors.request",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
LOGIN_URL='/login'
LOGIN_REDIRECT_URL='/'
ROOT_URLCONF = 'sal.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'sal.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_DIR, 'templates'),
os.path.join(PROJECT_DIR, 'server', 'plugins'),
PLUGIN_DIR,
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'sal',
'server',
'api',
'catalog',
'inventory',
'licenses',
'bootstrap3',
'watson',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
BOOTSTRAP3 = {
'set_placeholder': False,
}
if 'DYNO' in os.environ:
# Parse database configuration from $DATABASE_URL
import dj_database_url
DATABASES['default'] = dj_database_url.config()
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
| {
"content_hash": "712d8e0aef9ac27699294ebc2abcbaad",
"timestamp": "",
"source": "github",
"line_count": 236,
"max_line_length": 168,
"avg_line_length": 31.970338983050848,
"alnum_prop": 0.7033797216699801,
"repo_name": "chasetb/sal",
"id": "880818d59981c42936a0db526bd5f97eabc33a9a",
"size": "7545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sal/system_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "192288"
},
{
"name": "HTML",
"bytes": "119776"
},
{
"name": "JavaScript",
"bytes": "683793"
},
{
"name": "Makefile",
"bytes": "2284"
},
{
"name": "Nginx",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "346909"
},
{
"name": "Shell",
"bytes": "1964"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.