text stringlengths 4 1.02M | meta dict |
|---|---|
from rstem import led_matrix, accel
import RPi.GPIO as GPIO
import time
import os
import sys
import random
# notify of progress
print("P50")
sys.stdout.flush()
# set up led matrix
#led_matrix.init_grid(2,2)
led_matrix.init_matrices([(0,8),(8,8),(8,0),(0,0)])
# set up accelerometer
accel.init(1)
# set up GPIO
GPIO.setmode(GPIO.BCM)
# notify of progress
print("P60")
sys.stdout.flush()
# set up dice sprites
dice = []
for value in range(1,7):
sprite = os.path.join(os.path.dirname(os.path.abspath(__file__)), "dice_sprites", str(value) + ".spr")
dice.append(led_matrix.LEDSprite(sprite))
# notify of progress
print("P90")
sys.stdout.flush()
# set up buttons
UP = 25
DOWN = 24
LEFT = 23
RIGHT = 18
A = 4
B = 17
START = 27
SELECT = 22
# setup start button to exit game
GPIO.setup(START, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(SELECT, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# setup A button to roll dice
GPIO.setup(A, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# notify of progress
print("P90")
sys.stdout.flush()
# create flag to indicate to display some dice initially on start up
just_started = True
# get base_elevation
base_elevation = accel.angles()[2]
# set change in angle/acceleration needed to roll dice
THRESHOLD = 20
# notify menu we are ready for the led matrix
print("READY")
sys.stdout.flush()
while True:
# exit if start button is pressed
if GPIO.input(START) == 0 or GPIO.input(SELECT) == 0:
led_matrix.cleanup()
GPIO.cleanup()
sys.exit(0)
# roll dice if A button is pressed or accelerometer detects steep enough angle
if just_started or GPIO.input(A) == 0 or abs(accel.angles()[2] - base_elevation) > THRESHOLD:
led_matrix.erase() # clear old dice values
# set a new random die at each matrix
for y in range(0, led_matrix.height(), 8):
for x in range(0, led_matrix.width(), 8):
led_matrix.sprite(random.choice(dice), (x+1,y+1))
just_started = False
led_matrix.show()
| {
"content_hash": "865f636dcd360ee8b1ab6d367c3d8df0",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 106,
"avg_line_length": 23.183908045977013,
"alnum_prop": 0.6693108577094695,
"repo_name": "scottsilverlabs/raspberrystem",
"id": "9664fbbbc9f339763925ec4e1645c89883787550",
"size": "2640",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rstem/projects/led_matrix_games/dice.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "22961"
},
{
"name": "HTML",
"bytes": "231804"
},
{
"name": "Makefile",
"bytes": "10187"
},
{
"name": "Python",
"bytes": "327178"
},
{
"name": "Shell",
"bytes": "8375"
}
],
"symlink_target": ""
} |
"""
Defines base data types and models required specifically
for VRF Flow Specification support.
"""
import abc
import logging
import six
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_ORIGIN
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_AS_PATH
from ryu.lib.packet.bgp import BGP_ATTR_TYPE_EXTENDED_COMMUNITIES
from ryu.lib.packet.bgp import BGPPathAttributeOrigin
from ryu.lib.packet.bgp import BGPPathAttributeAsPath
from ryu.lib.packet.bgp import BGPPathAttributeExtendedCommunities
from ryu.services.protocols.bgp.base import OrderedDict
from ryu.services.protocols.bgp.info_base.vrf import VrfTable
from ryu.services.protocols.bgp.info_base.vrf import VrfDest
from ryu.services.protocols.bgp.info_base.vrf import VrfPath
from ryu.services.protocols.bgp.utils.bgp import create_rt_extended_community
LOG = logging.getLogger('bgpspeaker.info_base.vrffs')
@six.add_metaclass(abc.ABCMeta)
class VRFFlowSpecTable(VrfTable):
"""Virtual Routing and Forwarding information base.
Keeps destination imported to given VRF Flow Specification
in represents.
"""
def insert_vrffs_path(self, nlri, communities, is_withdraw=False):
assert nlri
assert isinstance(communities, list)
vrf_conf = self.vrf_conf
from ryu.services.protocols.bgp.core import EXPECTED_ORIGIN
pattrs = OrderedDict()
pattrs[BGP_ATTR_TYPE_ORIGIN] = BGPPathAttributeOrigin(
EXPECTED_ORIGIN)
pattrs[BGP_ATTR_TYPE_AS_PATH] = BGPPathAttributeAsPath([])
for rt in vrf_conf.export_rts:
communities.append(create_rt_extended_community(rt, 2))
for soo in vrf_conf.soo_list:
communities.append(create_rt_extended_community(soo, 3))
pattrs[BGP_ATTR_TYPE_EXTENDED_COMMUNITIES] = (
BGPPathAttributeExtendedCommunities(communities=communities))
puid = self.VRF_PATH_CLASS.create_puid(
vrf_conf.route_dist, nlri.prefix)
path = self.VRF_PATH_CLASS(
puid, None, nlri, 0,
pattrs=pattrs, is_withdraw=is_withdraw
)
# Insert the path into VRF table, get affected destination so that we
# can process it further.
eff_dest = self.insert(path)
# Enqueue the eff_dest for further processing.
self._signal_bus.dest_changed(eff_dest)
@six.add_metaclass(abc.ABCMeta)
class VRFFlowSpecDest(VrfDest):
"""Base class for VRF Flow Specification."""
@six.add_metaclass(abc.ABCMeta)
class VRFFlowSpecPath(VrfPath):
"""Represents a way of reaching an IP destination with
a VPN Flow Specification.
"""
| {
"content_hash": "e78e10f99630e9fa90ae62c5bdfdcc21",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 77,
"avg_line_length": 33.87012987012987,
"alnum_prop": 0.7162576687116564,
"repo_name": "osrg/ryu",
"id": "8f0fe3eb40dd4b45cb2293b4e78b19ed046f4047",
"size": "3221",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ryu/services/protocols/bgp/info_base/vrffs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "28540"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "874721"
},
{
"name": "Gnuplot",
"bytes": "1094"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "6137808"
},
{
"name": "Shell",
"bytes": "17573"
}
],
"symlink_target": ""
} |
import glob
import itertools
import os
import shutil
import tempfile
import time
import unittest
import mock
import pytest
from chainer import testing
from chainer import training
from chainer.training import extensions
from chainer.training.extensions._snapshot import _find_latest_snapshot
from chainer.training.extensions._snapshot import _find_snapshot_files
from chainer.training.extensions._snapshot import _find_stale_snapshots
class TestSnapshot(unittest.TestCase):
def test_call(self):
t = mock.MagicMock()
c = mock.MagicMock(side_effect=[True, False])
w = mock.MagicMock()
snapshot = extensions.snapshot(target=t, condition=c, writer=w)
trainer = mock.MagicMock()
snapshot(trainer)
snapshot(trainer)
assert c.call_count == 2
assert w.call_count == 1
def test_savefun_and_writer_exclusive(self):
# savefun and writer arguments cannot be specified together.
def savefun(*args, **kwargs):
assert False
writer = extensions.snapshot_writers.SimpleWriter()
with pytest.raises(TypeError):
extensions.snapshot(savefun=savefun, writer=writer)
trainer = mock.MagicMock()
with pytest.raises(TypeError):
extensions.snapshot_object(trainer, savefun=savefun, writer=writer)
class TestSnapshotSaveFile(unittest.TestCase):
def setUp(self):
self.trainer = testing.get_trainer_with_mock_updater()
self.trainer.out = '.'
self.trainer._done = True
def tearDown(self):
if os.path.exists('myfile.dat'):
os.remove('myfile.dat')
def test_save_file(self):
w = extensions.snapshot_writers.SimpleWriter()
snapshot = extensions.snapshot_object(self.trainer, 'myfile.dat',
writer=w)
snapshot(self.trainer)
self.assertTrue(os.path.exists('myfile.dat'))
def test_clean_up_tempdir(self):
snapshot = extensions.snapshot_object(self.trainer, 'myfile.dat')
snapshot(self.trainer)
left_tmps = [fn for fn in os.listdir('.')
if fn.startswith('tmpmyfile.dat')]
self.assertEqual(len(left_tmps), 0)
class TestSnapshotOnError(unittest.TestCase):
def setUp(self):
self.trainer = testing.get_trainer_with_mock_updater()
self.trainer.out = '.'
self.filename = 'myfile-deadbeef.dat'
def tearDown(self):
if os.path.exists(self.filename):
os.remove(self.filename)
def test_on_error(self):
class TheOnlyError(Exception):
pass
@training.make_extension(trigger=(1, 'iteration'), priority=100)
def exception_raiser(trainer):
raise TheOnlyError()
self.trainer.extend(exception_raiser)
snapshot = extensions.snapshot_object(self.trainer, self.filename,
snapshot_on_error=True)
self.trainer.extend(snapshot)
self.assertFalse(os.path.exists(self.filename))
with self.assertRaises(TheOnlyError):
self.trainer.run()
self.assertTrue(os.path.exists(self.filename))
@testing.parameterize(*testing.product({'fmt':
['snapshot_iter_{}',
'snapshot_iter_{}.npz',
'{}_snapshot_man_suffix.npz']}))
class TestFindSnapshot(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path)
def test_find_snapshot_files(self):
files = (self.fmt.format(i) for i in range(1, 100))
noise = ('dummy-foobar-iter{}'.format(i) for i in range(10, 304))
noise2 = ('tmpsnapshot_iter_{}'.format(i) for i in range(10, 304))
for file in itertools.chain(noise, files, noise2):
file = os.path.join(self.path, file)
open(file, 'w').close()
snapshot_files = _find_snapshot_files(self.fmt, self.path)
expected = sorted([self.fmt.format(i) for i in range(1, 100)])
assert len(snapshot_files) == 99
timestamps, snapshot_files = zip(*snapshot_files)
assert expected == sorted(list(snapshot_files))
def test_find_latest_snapshot(self):
files = [self.fmt.format(i) for i in range(1, 100)]
base_timestamp = time.time()
for i, file in enumerate(files):
file = os.path.join(self.path, file)
open(file, 'w').close()
# mtime resolution of some filesystems e.g. ext3 or HFS+
# is a second and thus snapshot files such as
# ``snapshot_iter_9`` and ``snapshot_iter_99`` may have
# same timestamp if it does not have enough interval
# between file creation. As current autosnapshot does not
# uses integer knowledge, timestamp is intentionally
# modified here. This comment also applies to other tests
# in this file on snapshot freshness.
t = base_timestamp + i
os.utime(file, (t, t))
assert self.fmt.format(99) == _find_latest_snapshot(self.fmt,
self.path)
@testing.parameterize(*testing.product({'fmt':
['snapshot_iter_{}_{}',
'snapshot_iter_{}_{}.npz',
'{}_snapshot_man_{}-suffix.npz',
'snapshot_iter_{}.{}']}))
class TestFindSnapshot2(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
self.files = (self.fmt.format(i*10, j*10) for i, j
in itertools.product(range(0, 10), range(0, 10)))
def tearDown(self):
shutil.rmtree(self.path)
def test_find_snapshot_files(self):
noise = ('tmpsnapshot_iter_{}.{}'.format(i, j)
for i, j in zip(range(10, 304), range(10, 200)))
for file in itertools.chain(noise, self.files):
file = os.path.join(self.path, file)
open(file, 'w').close()
snapshot_files = _find_snapshot_files(self.fmt, self.path)
expected = [self.fmt.format(i*10, j*10)
for i, j in itertools.product(range(0, 10), range(0, 10))]
timestamps, snapshot_files = zip(*snapshot_files)
expected.sort()
snapshot_files = sorted(list(snapshot_files))
assert expected == snapshot_files
@testing.parameterize(*testing.product({'length_retain':
[(100, 30), (10, 30), (1, 1000),
(1000, 1), (1, 1), (1, 3), (2, 3)]}))
class TestFindStaleSnapshot(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path)
def test_find_stale_snapshot(self):
length, retain = self.length_retain
fmt = 'snapshot_iter_{}'
files = [fmt.format(i) for i in range(0, length)]
base_timestamp = time.time() - length * 2
for i, file in enumerate(files):
file = os.path.join(self.path, file)
open(file, 'w').close()
# Same comment applies here. See comment in ``TestFindSnapshot``
t = base_timestamp + i
os.utime(file, (t, t))
stale = list(_find_stale_snapshots(fmt, self.path, retain))
assert max(length-retain, 0) == len(stale)
expected = [fmt.format(i) for i in range(0, max(length-retain, 0))]
assert expected == stale
class TestRemoveStaleSnapshots(unittest.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.path)
def test_remove_stale_snapshots(self):
fmt = 'snapshot_iter_{.updater.iteration}'
retain = 3
snapshot = extensions.snapshot(filename=fmt, n_retains=retain,
autoload=False)
trainer = testing.get_trainer_with_mock_updater()
trainer.out = self.path
trainer.extend(snapshot, trigger=(1, 'iteration'), priority=2)
class TimeStampUpdater():
t = time.time() - 100
name = 'ts_updater'
priority = 1 # This must be called after snapshot taken
def __call__(self, _trainer):
filename = os.path.join(_trainer.out, fmt.format(_trainer))
self.t += 1
# For filesystems that does low timestamp precision
os.utime(filename, (self.t, self.t))
trainer.extend(TimeStampUpdater(), trigger=(1, 'iteration'))
trainer.run()
assert 10 == trainer.updater.iteration
assert trainer._done
pattern = os.path.join(trainer.out, "snapshot_iter_*")
found = [os.path.basename(path) for path in glob.glob(pattern)]
assert retain == len(found)
found.sort()
# snapshot_iter_(8, 9, 10) expected
expected = ['snapshot_iter_{}'.format(i) for i in range(8, 11)]
expected.sort()
assert expected == found
trainer2 = testing.get_trainer_with_mock_updater()
trainer2.out = self.path
assert not trainer2._done
snapshot2 = extensions.snapshot(filename=fmt, autoload=True)
# Just making sure no error occurs
snapshot2.initialize(trainer2)
testing.run_module(__name__, __file__)
| {
"content_hash": "d24aa5383f48c69f4a663adb07edc071",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 79,
"avg_line_length": 35.298892988929886,
"alnum_prop": 0.5831068367133598,
"repo_name": "hvy/chainer",
"id": "b5d469ac4d757a6b81d2b974976172eb31895ff0",
"size": "9566",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/training_tests/extensions_tests/test_snapshot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3796"
},
{
"name": "C",
"bytes": "1099"
},
{
"name": "C++",
"bytes": "1688016"
},
{
"name": "CMake",
"bytes": "51351"
},
{
"name": "Cuda",
"bytes": "191633"
},
{
"name": "Dockerfile",
"bytes": "6423"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "6425409"
},
{
"name": "Shell",
"bytes": "50581"
}
],
"symlink_target": ""
} |
import sge
from sge.gfx import Sprite, Color
from . import config
class Background(sge.gfx.Background):
# setup scolling background image
def __init__(self, state_name):
self.state_name = state_name
scroll_level = config.BACKGROUND_SPRITES[self.state_name]
sprite_lev_1 = sge.gfx.Sprite(scroll_level['name'], scroll_level['directory'])
background_layer = sge.gfx.BackgroundLayer(sprite_lev_1, 0, 0)
self.layers = [background_layer]
self.color = sge.gfx.Color("black")
| {
"content_hash": "837ecdde9f511b729574211d58c3595f",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 86,
"avg_line_length": 35.13333333333333,
"alnum_prop": 0.6736242884250474,
"repo_name": "jrrickerson/pyweek24",
"id": "38ab484a6fd4d42adb4819529d02ce73400903c1",
"size": "527",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "behind/background.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13320"
}
],
"symlink_target": ""
} |
from . import exc
class Transaction(object):
"""Represent a database transaction in progress.
The Transaction object is procured by
calling the SAConnection.begin() method of
SAConnection:
with (yield from engine) as conn:
trans = yield from conn.begin()
try:
yield from conn.execute("insert into x (a, b) values (1, 2)")
except Exception:
yield from trans.rollback()
else:
yield from trans.commit()
The object provides .rollback() and .commit()
methods in order to control transaction boundaries.
See also: SAConnection.begin(), SAConnection.begin_twophase(),
SAConnection.begin_nested().
"""
def __init__(self, connection, parent):
self._connection = connection
self._parent = parent or self
self._is_active = True
@property
def is_active(self):
"""Return ``True`` if a transaction is active."""
return self._is_active
@property
def connection(self):
"""Return transaction's connection (SAConnection instance)."""
return self._connection
async def close(self):
"""Close this transaction.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
method returns.
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
"""
if not self._parent._is_active:
return
if self._parent is self:
await self.rollback()
else:
self._is_active = False
async def rollback(self):
"""Roll back this transaction."""
if not self._parent._is_active:
return
await self._do_rollback()
self._is_active = False
async def _do_rollback(self):
await self._parent.rollback()
async def commit(self):
"""Commit this transaction."""
if not self._parent._is_active:
raise exc.InvalidRequestError("This transaction is inactive")
await self._do_commit()
self._is_active = False
async def _do_commit(self):
pass
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if exc_type:
await self.rollback()
else:
if self._is_active:
await self.commit()
class RootTransaction(Transaction):
def __init__(self, connection):
super().__init__(connection, None)
async def _do_rollback(self):
await self._connection._rollback_impl()
async def _do_commit(self):
await self._connection._commit_impl()
class NestedTransaction(Transaction):
"""Represent a 'nested', or SAVEPOINT transaction.
A new NestedTransaction object may be procured
using the SAConnection.begin_nested() method.
The interface is the same as that of Transaction class.
"""
_savepoint = None
def __init__(self, connection, parent):
super(NestedTransaction, self).__init__(connection, parent)
async def _do_rollback(self):
assert self._savepoint is not None, "Broken transaction logic"
if self._is_active:
await self._connection._rollback_to_savepoint_impl(
self._savepoint, self._parent)
async def _do_commit(self):
assert self._savepoint is not None, "Broken transaction logic"
if self._is_active:
await self._connection._release_savepoint_impl(
self._savepoint, self._parent)
class TwoPhaseTransaction(Transaction):
"""Represent a two-phase transaction.
A new TwoPhaseTransaction object may be procured
using the SAConnection.begin_twophase() method.
The interface is the same as that of Transaction class
with the addition of the .prepare() method.
"""
def __init__(self, connection, xid):
super().__init__(connection, None)
self._is_prepared = False
self._xid = xid
@property
def xid(self):
"""Returns twophase transaction id."""
return self._xid
async def prepare(self):
"""Prepare this TwoPhaseTransaction.
After a PREPARE, the transaction can be committed.
"""
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
await self._connection._prepare_twophase_impl(self._xid)
self._is_prepared = True
async def _do_rollback(self):
await self._connection.rollback_prepared(
self._xid, is_prepared=self._is_prepared)
async def _do_commit(self):
await self._connection.commit_prepared(
self._xid, is_prepared=self._is_prepared)
| {
"content_hash": "e6d52bab47b0a2163447817d16fc9a57",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 77,
"avg_line_length": 29.077844311377245,
"alnum_prop": 0.6132619439868204,
"repo_name": "aio-libs/aiomysql",
"id": "ff15ac080c02b634c92f9bd437c7a88b7edba169",
"size": "4943",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiomysql/sa/transaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1739"
},
{
"name": "Python",
"bytes": "297984"
}
],
"symlink_target": ""
} |
import bisect
import itertools
import math
from array import array
from collections import defaultdict
from random import randint, shuffle
from time import time
from swift.common import exceptions
from swift.common.ring import RingData
from swift.common.ring.utils import tiers_for_dev, build_tier_tree
class RingBuilder(object):
"""
Used to build swift.common.ring.RingData instances to be written to disk
and used with swift.common.ring.Ring instances. See bin/swift-ring-builder
for example usage.
The instance variable devs_changed indicates if the device information has
changed since the last balancing. This can be used by tools to know whether
a rebalance request is an isolated request or due to added, changed, or
removed devices.
:param part_power: number of partitions = 2**part_power
:param replicas: number of replicas for each partition
:param min_part_hours: minimum number of hours between partition changes
"""
def __init__(self, part_power, replicas, min_part_hours):
self.part_power = part_power
self.replicas = replicas
self.min_part_hours = min_part_hours
self.parts = 2 ** self.part_power
self.devs = []
self.devs_changed = False
self.version = 0
# _replica2part2dev maps from replica number to partition number to
# device id. So, for a three replica, 2**23 ring, it's an array of
# three 2**23 arrays of device ids (unsigned shorts). This can work a
# bit faster than the 2**23 array of triplet arrays of device ids in
# many circumstances. Making one big 2**23 * 3 array didn't seem to
# have any speed change; though you're welcome to try it again (it was
# a while ago, code-wise, when I last tried it).
self._replica2part2dev = None
# _last_part_moves is a 2**23 array of unsigned bytes representing the
# number of hours since a given partition was last moved. This is used
# to guarantee we don't move a partition twice within a given number of
# hours (24 is my usual test). Removing a device or setting its weight
# to 0 overrides this behavior as it's assumed those actions are done
# because of device failure.
# _last_part_moves_epoch indicates the time the offsets in
# _last_part_moves is based on.
self._last_part_moves_epoch = None
self._last_part_moves = None
self._last_part_gather_start = 0
self._remove_devs = []
self._ring = None
def weight_of_one_part(self):
"""
Returns the weight of each partition as calculated from the
total weight of all the devices.
"""
try:
return self.parts * self.replicas / \
sum(d['weight'] for d in self._iter_devs())
except ZeroDivisionError:
raise exceptions.EmptyRingError('There are no devices in this '
'ring, or all devices have been '
'deleted')
def copy_from(self, builder):
"""
Reinitializes this RingBuilder instance from data obtained from the
builder dict given. Code example::
b = RingBuilder(1, 1, 1) # Dummy values
b.copy_from(builder)
This is to restore a RingBuilder that has had its b.to_dict()
previously saved.
"""
if hasattr(builder, 'devs'):
self.part_power = builder.part_power
self.replicas = builder.replicas
self.min_part_hours = builder.min_part_hours
self.parts = builder.parts
self.devs = builder.devs
self.devs_changed = builder.devs_changed
self.version = builder.version
self._replica2part2dev = builder._replica2part2dev
self._last_part_moves_epoch = builder._last_part_moves_epoch
self._last_part_moves = builder._last_part_moves
self._last_part_gather_start = builder._last_part_gather_start
self._remove_devs = builder._remove_devs
else:
self.part_power = builder['part_power']
self.replicas = builder['replicas']
self.min_part_hours = builder['min_part_hours']
self.parts = builder['parts']
self.devs = builder['devs']
self.devs_changed = builder['devs_changed']
self.version = builder['version']
self._replica2part2dev = builder['_replica2part2dev']
self._last_part_moves_epoch = builder['_last_part_moves_epoch']
self._last_part_moves = builder['_last_part_moves']
self._last_part_gather_start = builder['_last_part_gather_start']
self._remove_devs = builder['_remove_devs']
self._ring = None
def to_dict(self):
"""
Returns a dict that can be used later with copy_from to
restore a RingBuilder. swift-ring-builder uses this to
pickle.dump the dict to a file and later load that dict into
copy_from.
"""
return {'part_power': self.part_power,
'replicas': self.replicas,
'min_part_hours': self.min_part_hours,
'parts': self.parts,
'devs': self.devs,
'devs_changed': self.devs_changed,
'version': self.version,
'_replica2part2dev': self._replica2part2dev,
'_last_part_moves_epoch': self._last_part_moves_epoch,
'_last_part_moves': self._last_part_moves,
'_last_part_gather_start': self._last_part_gather_start,
'_remove_devs': self._remove_devs}
def change_min_part_hours(self, min_part_hours):
"""
Changes the value used to decide if a given partition can be moved
again. This restriction is to give the overall system enough time to
settle a partition to its new location before moving it to yet another
location. While no data would be lost if a partition is moved several
times quickly, it could make that data unreachable for a short period
of time.
This should be set to at least the average full partition replication
time. Starting it at 24 hours and then lowering it to what the
replicator reports as the longest partition cycle is best.
:param min_part_hours: new value for min_part_hours
"""
self.min_part_hours = min_part_hours
def get_ring(self):
"""
Get the ring, or more specifically, the swift.common.ring.RingData.
This ring data is the minimum required for use of the ring. The ring
builder itself keeps additional data such as when partitions were last
moved.
"""
# We cache the self._ring value so multiple requests for it don't build
# it multiple times. Be sure to set self._ring = None whenever the ring
# will need to be rebuilt.
if not self._ring:
# Make devs list (with holes for deleted devices) and not including
# builder-specific extra attributes.
devs = [None] * len(self.devs)
for dev in self._iter_devs():
devs[dev['id']] = dict((k, v) for k, v in dev.items()
if k not in ('parts', 'parts_wanted'))
# Copy over the replica+partition->device assignments, the device
# information, and the part_shift value (the number of bits to
# shift an unsigned int >I right to obtain the partition for the
# int).
if not self._replica2part2dev:
self._ring = RingData([], devs, 32 - self.part_power)
else:
self._ring = \
RingData([array('H', p2d) for p2d in self._replica2part2dev],
devs, 32 - self.part_power)
return self._ring
def add_dev(self, dev):
"""
Add a device to the ring. This device dict should have a minimum of the
following keys:
====== ===============================================================
id unique integer identifier amongst devices
weight a float of the relative weight of this device as compared to
others; this indicates how many partitions the builder will try
to assign to this device
zone integer indicating which zone the device is in; a given
partition will not be assigned to multiple devices within the
same zone
ip the ip address of the device
port the tcp port of the device
device the device's name on disk (sdb1, for example)
meta general use 'extra' field; for example: the online date, the
hardware description
====== ===============================================================
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev: device dict
"""
if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None:
raise exceptions.DuplicateDeviceError(
'Duplicate device id: %d' % dev['id'])
# Add holes to self.devs to ensure self.devs[dev['id']] will be the dev
while dev['id'] >= len(self.devs):
self.devs.append(None)
dev['weight'] = float(dev['weight'])
dev['parts'] = 0
self.devs[dev['id']] = dev
self._set_parts_wanted()
self.devs_changed = True
self.version += 1
def set_dev_weight(self, dev_id, weight):
"""
Set the weight of a device. This should be called rather than just
altering the weight key in the device dict directly, as the builder
will need to rebuild some internal state to reflect the change.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
:param weight: new weight for device
"""
self.devs[dev_id]['weight'] = weight
self._set_parts_wanted()
self.devs_changed = True
self.version += 1
def remove_dev(self, dev_id):
"""
Remove a device from the ring.
.. note::
This will not rebalance the ring immediately as you may want to
make multiple changes for a single rebalance.
:param dev_id: device id
"""
dev = self.devs[dev_id]
dev['weight'] = 0
self._remove_devs.append(dev)
self._set_parts_wanted()
self.devs_changed = True
self.version += 1
def rebalance(self):
"""
Rebalance the ring.
This is the main work function of the builder, as it will assign and
reassign partitions to devices in the ring based on weights, distinct
zones, recent reassignments, etc.
The process doesn't always perfectly assign partitions (that'd take a
lot more analysis and therefore a lot more time -- I had code that did
that before). Because of this, it keeps rebalancing until the device
skew (number of partitions a device wants compared to what it has) gets
below 1% or doesn't change by more than 1% (only happens with ring that
can't be balanced no matter what -- like with 3 zones of differing
weights with replicas set to 3).
:returns: (number_of_partitions_altered, resulting_balance)
"""
self._ring = None
if self._last_part_moves_epoch is None:
self._initial_balance()
self.devs_changed = False
return self.parts, self.get_balance()
retval = 0
self._update_last_part_moves()
last_balance = 0
while True:
reassign_parts = self._gather_reassign_parts()
self._reassign_parts(reassign_parts)
retval += len(reassign_parts)
while self._remove_devs:
self.devs[self._remove_devs.pop()['id']] = None
balance = self.get_balance()
if balance < 1 or abs(last_balance - balance) < 1 or \
retval == self.parts:
break
last_balance = balance
self.devs_changed = False
self.version += 1
return retval, balance
def validate(self, stats=False):
"""
Validate the ring.
This is a safety function to try to catch any bugs in the building
process. It ensures partitions have been assigned to real devices,
aren't doubly assigned, etc. It can also optionally check the even
distribution of partitions across devices.
:param stats: if True, check distribution of partitions across devices
:returns: if stats is True, a tuple of (device_usage, worst_stat), else
(None, None). device_usage[dev_id] will equal the number of
partitions assigned to that device. worst_stat will equal the
number of partitions the worst device is skewed from the
number it should have.
:raises RingValidationError: problem was found with the ring.
"""
if sum(d['parts'] for d in self._iter_devs()) != \
self.parts * self.replicas:
raise exceptions.RingValidationError(
'All partitions are not double accounted for: %d != %d' %
(sum(d['parts'] for d in self._iter_devs()),
self.parts * self.replicas))
if stats:
# dev_usage[dev_id] will equal the number of partitions assigned to
# that device.
dev_usage = array('I', (0 for _junk in xrange(len(self.devs))))
for part2dev in self._replica2part2dev:
for dev_id in part2dev:
dev_usage[dev_id] += 1
for part in xrange(self.parts):
for replica in xrange(self.replicas):
dev_id = self._replica2part2dev[replica][part]
if dev_id >= len(self.devs) or not self.devs[dev_id]:
raise exceptions.RingValidationError(
"Partition %d, replica %d was not allocated "
"to a device." %
(part, replica))
if stats:
weight_of_one_part = self.weight_of_one_part()
worst = 0
for dev in self._iter_devs():
if not dev['weight']:
if dev_usage[dev['id']]:
# If a device has no weight, but has partitions, then
# its overage is considered "infinity" and therefore
# always the worst possible. We show 999.99 for
# convenience.
worst = 999.99
break
continue
skew = abs(100.0 * dev_usage[dev['id']] /
(dev['weight'] * weight_of_one_part) - 100.0)
if skew > worst:
worst = skew
return dev_usage, worst
return None, None
def get_balance(self):
"""
Get the balance of the ring. The balance value is the highest
percentage off the desired amount of partitions a given device wants.
For instance, if the "worst" device wants (based on its relative weight
and its zone's relative weight) 123 partitions and it has 124
partitions, the balance value would be 0.83 (1 extra / 123 wanted * 100
for percentage).
:returns: balance of the ring
"""
balance = 0
weight_of_one_part = self.weight_of_one_part()
for dev in self._iter_devs():
if not dev['weight']:
if dev['parts']:
# If a device has no weight, but has partitions, then its
# overage is considered "infinity" and therefore always the
# worst possible. We show 999.99 for convenience.
balance = 999.99
break
continue
dev_balance = abs(100.0 * dev['parts'] /
(dev['weight'] * weight_of_one_part) - 100.0)
if dev_balance > balance:
balance = dev_balance
return balance
def pretend_min_part_hours_passed(self):
"""
Override min_part_hours by marking all partitions as having been moved
255 hours ago. This can be used to force a full rebalance on the next
call to rebalance.
"""
for part in xrange(self.parts):
self._last_part_moves[part] = 0xff
def get_part_devices(self, part):
"""
Get the devices that are responsible for the partition.
:param part: partition to get devices for
:returns: list of device dicts
"""
return [self.devs[r[part]] for r in self._replica2part2dev]
def _iter_devs(self):
"""
Returns an iterator all the non-None devices in the ring. Note that
this means list(b._iter_devs())[some_id] may not equal b.devs[some_id];
you will have to check the 'id' key of each device to obtain its
dev_id.
"""
for dev in self.devs:
if dev is not None:
yield dev
def _set_parts_wanted(self):
"""
Sets the parts_wanted key for each of the devices to the number of
partitions the device wants based on its relative weight. This key is
used to sort the devices according to "most wanted" during rebalancing
to best distribute partitions. A negative parts_wanted indicates the
device is "overweight" and wishes to give partitions away if possible.
"""
weight_of_one_part = self.weight_of_one_part()
for dev in self._iter_devs():
if not dev['weight']:
# With no weight, that means we wish to "drain" the device. So
# we set the parts_wanted to a really large negative number to
# indicate its strong desire to give up everything it has.
dev['parts_wanted'] = -self.parts * self.replicas
else:
dev['parts_wanted'] = \
int(weight_of_one_part * dev['weight']) - dev['parts']
def _initial_balance(self):
"""
Initial partition assignment is the same as rebalancing an
existing ring, but with some initial setup beforehand.
"""
self._replica2part2dev = \
[array('H', (0 for _junk in xrange(self.parts)))
for _junk in xrange(self.replicas)]
replicas = range(self.replicas)
self._last_part_moves = array('B', (0 for _junk in xrange(self.parts)))
self._last_part_moves_epoch = int(time())
self._reassign_parts((p, replicas) for p in xrange(self.parts))
def _update_last_part_moves(self):
"""
Updates how many hours ago each partition was moved based on the
current time. The builder won't move a partition that has been moved
more recently than min_part_hours.
"""
elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600
for part in xrange(self.parts):
self._last_part_moves[part] = \
min(self._last_part_moves[part] + elapsed_hours, 0xff)
self._last_part_moves_epoch = int(time())
def _gather_reassign_parts(self):
"""
Returns a list of (partition, replicas) pairs to be reassigned by
gathering from removed devices, insufficiently-far-apart replicas, and
overweight drives.
"""
# First we gather partitions from removed devices. Since removed
# devices usually indicate device failures, we have no choice but to
# reassign these partitions. However, we mark them as moved so later
# choices will skip other replicas of the same partition if possible.
removed_dev_parts = defaultdict(list)
if self._remove_devs:
dev_ids = [d['id'] for d in self._remove_devs if d['parts']]
if dev_ids:
for replica in xrange(self.replicas):
part2dev = self._replica2part2dev[replica]
for part in xrange(self.parts):
if part2dev[part] in dev_ids:
self._last_part_moves[part] = 0
removed_dev_parts[part].append(replica)
# Now we gather partitions that are "at risk" because they aren't
# currently sufficient spread out across the cluster.
spread_out_parts = defaultdict(list)
max_allowed_replicas = self._build_max_replicas_by_tier()
for part in xrange(self.parts):
# Only move one replica at a time if possible.
if part in removed_dev_parts:
continue
# First, add up the count of replicas at each tier for each
# partition.
replicas_at_tier = defaultdict(lambda: 0)
for replica in xrange(self.replicas):
dev = self.devs[self._replica2part2dev[replica][part]]
for tier in tiers_for_dev(dev):
replicas_at_tier[tier] += 1
# Now, look for partitions not yet spread out enough and not
# recently moved.
for replica in xrange(self.replicas):
dev = self.devs[self._replica2part2dev[replica][part]]
removed_replica = False
for tier in tiers_for_dev(dev):
if (replicas_at_tier[tier] > max_allowed_replicas[tier] and
self._last_part_moves[part] >= self.min_part_hours):
self._last_part_moves[part] = 0
spread_out_parts[part].append(replica)
dev['parts_wanted'] += 1
dev['parts'] -= 1
removed_replica = True
break
if removed_replica:
for tier in tiers_for_dev(dev):
replicas_at_tier[tier] -= 1
# Last, we gather partitions from devices that are "overweight" because
# they have more partitions than their parts_wanted.
reassign_parts = defaultdict(list)
# We randomly pick a new starting point in the "circular" ring of
# partitions to try to get a better rebalance when called multiple
# times.
start = self._last_part_gather_start / 4 + randint(0, self.parts / 2)
self._last_part_gather_start = start
for replica in xrange(self.replicas):
part2dev = self._replica2part2dev[replica]
for part in itertools.chain(xrange(start, self.parts),
xrange(0, start)):
if self._last_part_moves[part] < self.min_part_hours:
continue
if part in removed_dev_parts or part in spread_out_parts:
continue
dev = self.devs[part2dev[part]]
if dev['parts_wanted'] < 0:
self._last_part_moves[part] = 0
dev['parts_wanted'] += 1
dev['parts'] -= 1
reassign_parts[part].append(replica)
reassign_parts.update(spread_out_parts)
reassign_parts.update(removed_dev_parts)
reassign_parts_list = list(reassign_parts.iteritems())
# We shuffle the partitions to reassign so we get a more even
# distribution later. There has been discussion of trying to distribute
# partitions more "regularly" because that would actually reduce risk
# but 1) it is really difficult to do this with uneven clusters and 2)
# it would concentrate load during failure recovery scenarios
# (increasing risk). The "right" answer has yet to be debated to
# conclusion, but working code wins for now.
shuffle(reassign_parts_list)
return reassign_parts_list
def _reassign_parts(self, reassign_parts):
"""
For an existing ring data set, partitions are reassigned similarly to
the initial assignment. The devices are ordered by how many partitions
they still want and kept in that order throughout the process. The
gathered partitions are iterated through, assigning them to devices
according to the "most wanted" while keeping the replicas as "far
apart" as possible. Two different zones are considered the
farthest-apart things, followed by different ip/port pairs within a
zone; the least-far-apart things are different devices with the same
ip/port pair in the same zone.
If you want more replicas than devices, you won't get all your
replicas.
:param reassign_parts: An iterable of (part, replicas_to_replace)
pairs. replicas_to_replace is an iterable of the
replica (an int) to replace for that partition.
replicas_to_replace may be shared for multiple
partitions, so be sure you do not modify it.
"""
for dev in self._iter_devs():
dev['sort_key'] = self._sort_key_for(dev)
available_devs = \
sorted((d for d in self._iter_devs() if d['weight']),
key=lambda x: x['sort_key'])
tier2children = build_tier_tree(available_devs)
tier2devs = defaultdict(list)
tier2sort_key = defaultdict(list)
tiers_by_depth = defaultdict(set)
for dev in available_devs:
for tier in tiers_for_dev(dev):
tier2devs[tier].append(dev) # <-- starts out sorted!
tier2sort_key[tier].append(dev['sort_key'])
tiers_by_depth[len(tier)].add(tier)
for part, replace_replicas in reassign_parts:
# Gather up what other tiers (zones, ip_ports, and devices) the
# replicas not-to-be-moved are in for this part.
other_replicas = defaultdict(lambda: 0)
for replica in xrange(self.replicas):
if replica not in replace_replicas:
dev = self.devs[self._replica2part2dev[replica][part]]
for tier in tiers_for_dev(dev):
other_replicas[tier] += 1
def find_home_for_replica(tier=(), depth=1):
# Order the tiers by how many replicas of this
# partition they already have. Then, of the ones
# with the smallest number of replicas, pick the
# tier with the hungriest drive and then continue
# searching in that subtree.
#
# There are other strategies we could use here,
# such as hungriest-tier (i.e. biggest
# sum-of-parts-wanted) or picking one at random.
# However, hungriest-drive is what was used here
# before, and it worked pretty well in practice.
#
# Note that this allocator will balance things as
# evenly as possible at each level of the device
# layout. If your layout is extremely unbalanced,
# this may produce poor results.
candidate_tiers = tier2children[tier]
min_count = min(other_replicas[t] for t in candidate_tiers)
candidate_tiers = [t for t in candidate_tiers
if other_replicas[t] == min_count]
candidate_tiers.sort(
key=lambda t: tier2sort_key[t][-1])
if depth == max(tiers_by_depth.keys()):
return tier2devs[candidate_tiers[-1]][-1]
return find_home_for_replica(tier=candidate_tiers[-1],
depth=depth + 1)
for replica in replace_replicas:
dev = find_home_for_replica()
dev['parts_wanted'] -= 1
dev['parts'] += 1
old_sort_key = dev['sort_key']
new_sort_key = dev['sort_key'] = self._sort_key_for(dev)
for tier in tiers_for_dev(dev):
other_replicas[tier] += 1
index = bisect.bisect_left(tier2sort_key[tier],
old_sort_key)
tier2devs[tier].pop(index)
tier2sort_key[tier].pop(index)
new_index = bisect.bisect_left(tier2sort_key[tier],
new_sort_key)
tier2devs[tier].insert(new_index, dev)
tier2sort_key[tier].insert(new_index, new_sort_key)
self._replica2part2dev[replica][part] = dev['id']
# Just to save memory and keep from accidental reuse.
for dev in self._iter_devs():
del dev['sort_key']
def _sort_key_for(self, dev):
# The maximum value of self.parts is 2^32, which is 9 hex
# digits wide (0x100000000). Using a width of 16 here gives us
# plenty of breathing room; you'd need more than 2^28 replicas
# to overflow it.
# Since the sort key is a string and therefore an ascii sort applies,
# the maximum_parts_wanted + parts_wanted is used so negative
# parts_wanted end up sorted above positive parts_wanted.
return '%016x.%04x.%04x' % (
(self.parts * self.replicas) + dev['parts_wanted'],
randint(0, 0xffff),
dev['id'])
def _build_max_replicas_by_tier(self):
"""
Returns a dict of (tier: replica_count) for all tiers in the ring.
There will always be a () entry as the root of the structure, whose
replica_count will equal the ring's replica_count.
Then there will be (dev_id,) entries for each device, indicating the
maximum number of replicas the device might have for any given
partition. Anything greater than 1 indicates a partition at serious
risk, as the data on that partition will not be stored distinctly at
the ring's replica_count.
Next there will be (dev_id, ip_port) entries for each device,
indicating the maximum number of replicas the device shares with other
devices on the same ip_port for any given partition. Anything greater
than 1 indicates a partition at elevated risk, as if that ip_port were
to fail multiple replicas of that partition would be unreachable.
Last there will be (dev_id, ip_port, zone) entries for each device,
indicating the maximum number of replicas the device shares with other
devices within the same zone for any given partition. Anything greater
than 1 indicates a partition at slightly elevated risk, as if that zone
were to fail multiple replicas of that partition would be unreachable.
Example return dict for the common SAIO setup::
{(): 3,
(1,): 1.0,
(1, '127.0.0.1:6010'): 1.0,
(1, '127.0.0.1:6010', 0): 1.0,
(2,): 1.0,
(2, '127.0.0.1:6020'): 1.0,
(2, '127.0.0.1:6020', 1): 1.0,
(3,): 1.0,
(3, '127.0.0.1:6030'): 1.0,
(3, '127.0.0.1:6030', 2): 1.0,
(4,): 1.0,
(4, '127.0.0.1:6040'): 1.0,
(4, '127.0.0.1:6040', 3): 1.0}
"""
# Used by walk_tree to know what entries to create for each recursive
# call.
tier2children = build_tier_tree(self._iter_devs())
def walk_tree(tier, replica_count):
mr = {tier: replica_count}
if tier in tier2children:
subtiers = tier2children[tier]
for subtier in subtiers:
submax = math.ceil(float(replica_count) / len(subtiers))
mr.update(walk_tree(subtier, submax))
return mr
return walk_tree((), self.replicas)
| {
"content_hash": "1fcaf0f023c9962a05ba5792a4dceb3c",
"timestamp": "",
"source": "github",
"line_count": 724,
"max_line_length": 79,
"avg_line_length": 45.011049723756905,
"alnum_prop": 0.5728182152939733,
"repo_name": "NewpTone/StackLab-swift",
"id": "89ae9d2ec1b800289e644b38b6820891cd49f3d6",
"size": "33178",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "swift/common/ring/builder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2253824"
},
{
"name": "Shell",
"bytes": "364"
}
],
"symlink_target": ""
} |
from client import exceptions as ex
from client.sources.common import models
from client.sources.ok_test import concept
import mock
import unittest
class ConceptSuiteTest(unittest.TestCase):
TEST_NAME = 'A'
SUITE_NUMBER = 0
def makeTest(self, cases):
test = mock.Mock(spec=models.Test)
return concept.ConceptSuite(test, False, False, type='concept', cases=cases)
def testConstructor_noCases(self):
try:
self.makeTest([])
except TypeError:
self.fail()
def testConstructor_validTestCase(self):
try:
self.makeTest([
{
'question': 'Question 1',
'answer': 'Answer',
},
{
'question': 'Question 1',
'answer': 'Answer',
},
])
except TypeError:
self.fail()
def testConstructor_missingQuestion(self):
self.assertRaises(ex.SerializeException, self.makeTest, [
{
'answer': 'Answer',
},
{
'question': 'Question 1',
'answer': 'Answer',
},
])
def testConstructor_missingAnswer(self):
self.assertRaises(ex.SerializeException, self.makeTest, [
{
'question': 'Question 1',
'answer': 'Answer',
},
{
'question': 'Question 1',
},
])
def testRun_noCases(self):
test = self.makeTest([])
self.assertEqual({
'passed': 0,
'failed': 0,
'locked': 0,
}, test.run(self.TEST_NAME, self.SUITE_NUMBER))
def testRun_lockedCases(self):
test = self.makeTest([
{
'question': 'Question 1',
'answer': 'Answer',
'locked': True,
},
{
'question': 'Question 1',
'answer': 'Answer',
},
])
self.assertEqual({
'passed': 0,
'failed': 0,
'locked': 2, # Can't continue if preceding test is locked.
}, test.run(self.TEST_NAME, self.SUITE_NUMBER))
def testRun_noLockedCases(self):
test = self.makeTest([
{
'question': 'Question 1',
'answer': 'Answer',
},
{
'question': 'Question 1',
'answer': 'Answer',
},
])
self.assertEqual({
'passed': 2,
'failed': 0,
'locked': 0,
}, test.run(self.TEST_NAME, self.SUITE_NUMBER))
| {
"content_hash": "90fa12e2d761b65bc4bbc826e1a46ae9",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 84,
"avg_line_length": 28.2020202020202,
"alnum_prop": 0.44197707736389685,
"repo_name": "Cal-CS-61A-Staff/ok-client",
"id": "4739b470841f5bdd908da37495db3a147aad8b3f",
"size": "2792",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sources/ok_test/concept_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "368980"
}
],
"symlink_target": ""
} |
from random import randint
import packet
import time
import os
from zeroos.core0 import client
import configparser
import sys
import requests
def create_new_device(manager, hostname, branch='master'):
project = manager.list_projects()[0]
ipxe_script_url = 'http://unsecure.bootstrap.gig.tech/ipxe/{}/0/development'.format(branch)
available_facility = None
facilities = [x.code for x in manager.list_facilities()]
for facility in facilities:
if manager.validate_capacity([(facility, 'baremetal_0', 1)]):
available_facility = facility
break
if not available_facility:
print('No enough resources on packet.net to create nodes')
sys.exit(1)
print("Available facility: %s" % available_facility)
print('creating new machine .. ')
device = manager.create_device(project_id=project.id,
hostname=hostname,
plan='baremetal_0',
operating_system='custom_ipxe',
ipxe_script_url=ipxe_script_url,
facility=available_facility)
return device
def delete_device(manager):
config = configparser.ConfigParser()
config.read('config.ini')
hostname = config['main']['machine_hostname']
if hostname:
project = manager.list_projects()[0]
devices = manager.list_devices(project.id)
for dev in devices:
if dev.hostname == hostname:
print('%s is about to be deleted' % hostname)
for i in range(5):
try:
manager.call_api('devices/%s' % dev.id, type='DELETE')
print("machine has been deleted successfully")
break
except Exception as e:
print(e.args)
print(e.cause)
continue
else:
print("%s hasn't been deleted" % hostname)
def check_status(found, branch):
session = requests.Session()
url = 'https://build.gig.tech/build/status'
t1 = time.time()
while True:
try:
if found:
t2 = time.time()
if t1+10 > t2:
return 'No_build_triggered'
res_st = session.get(url)
t = res_st.json()['zero-os/0-core/{}'.format(branch)]['started']
if found:
return t
except:
if found:
continue
break
time.sleep(1)
def create_pkt_machine(manager, branch):
hostname = '0core{}-travis'.format(randint(100, 300))
try:
device = create_new_device(manager, hostname, branch=branch)
except:
print('device hasn\'t been created')
raise
print('provisioning the new machine ..')
while True:
dev = manager.get_device(device.id)
time.sleep(5)
if dev.state == 'active':
break
print('Giving the machine time till it finish booting')
time.sleep(150)
print('preparing machine for tests')
config = configparser.ConfigParser()
config.read('config.ini')
config['main']['target_ip'] = dev.ip_addresses[0]['address']
config['main']['machine_hostname'] = hostname
with open('config.ini', 'w') as configfile:
config.write(configfile)
if __name__ == '__main__':
action = sys.argv[1]
token = sys.argv[2]
manager = packet.Manager(auth_token=token)
print(os.system('echo $TRAVIS_EVENT_TYPE'))
if action == 'delete':
print('deleting the g8os machine ..')
delete_device(manager)
else:
branch = sys.argv[3]
if len(sys.argv) == 5:
branch = sys.argv[4]
print('branch: {}'.format(branch))
t = check_status(True, branch)
if t != 'No_build_triggered':
print('build has been started at {}'.format(t))
print('waiting for g8os build to pass ..')
check_status(False, branch)
time.sleep(2)
url2 = 'https://build.gig.tech/build/history'
session = requests.Session()
res_hs = session.get(url2)
if res_hs.json()[0]['started'] == t:
if res_hs.json()[0]['status'] == 'success':
create_pkt_machine(manager, branch)
else:
print('build has failed')
else:
print('build wasn\'t found in the history page')
else:
create_pkt_machine(manager, branch)
| {
"content_hash": "97b7ee232f2fb905ddf111b0b035e6fe",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 95,
"avg_line_length": 34.634328358208954,
"alnum_prop": 0.5432018961430726,
"repo_name": "g8os/core0",
"id": "fe7de9ce26cca5764ac045251ab2e8723a1e1d7d",
"size": "4660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/packet_script.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "295426"
},
{
"name": "Lua",
"bytes": "3514"
},
{
"name": "Makefile",
"bytes": "833"
},
{
"name": "Python",
"bytes": "62060"
},
{
"name": "Shell",
"bytes": "271"
}
],
"symlink_target": ""
} |
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import optcoretech
version = optcoretech.__version__
setup(
name='optcoretech-web',
version=version,
author='',
author_email='sheeshmohsin@gmail.com',
packages = [
'optcoretech',
],
include_package_data=True,
install_requires=[
'Django>=1.6.1',
],
zip_safe=False,
scripts=['optcoretech/manage.py'],
)
| {
"content_hash": "10f948948bfd012434f3300ab473d897",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 42,
"avg_line_length": 17.296296296296298,
"alnum_prop": 0.6616702355460385,
"repo_name": "nishantsingla/optcoretech",
"id": "aebbd5cb3389671222a1f6f4b0818c06bf59b67e",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
} |
"""Classes for different types of export output."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.util.tf_export import estimator_export
@estimator_export('estimator.export.ExportOutput')
class ExportOutput(object):
"""Represents an output of a model that can be served.
These typically correspond to model heads.
"""
__metaclass__ = abc.ABCMeta
_SEPARATOR_CHAR = '/'
@abc.abstractmethod
def as_signature_def(self, receiver_tensors):
"""Generate a SignatureDef proto for inclusion in a MetaGraphDef.
The SignatureDef will specify outputs as described in this ExportOutput,
and will use the provided receiver_tensors as inputs.
Args:
receiver_tensors: a `Tensor`, or a dict of string to `Tensor`, specifying
input nodes that will be fed.
"""
pass
def _check_output_key(self, key, error_label):
# For multi-head models, the key can be a tuple.
if isinstance(key, tuple):
key = self._SEPARATOR_CHAR.join(key)
if not isinstance(key, six.string_types):
raise ValueError(
'{} output key must be a string; got {}.'.format(error_label, key))
return key
def _wrap_and_check_outputs(
self, outputs, single_output_default_name, error_label=None):
"""Wraps raw tensors as dicts and checks type.
Note that we create a new dict here so that we can overwrite the keys
if necessary.
Args:
outputs: A `Tensor` or a dict of string to `Tensor`.
single_output_default_name: A string key for use in the output dict
if the provided `outputs` is a raw tensor.
error_label: descriptive string for use in error messages. If none,
single_output_default_name will be used.
Returns:
A dict of tensors
Raises:
ValueError: if the outputs dict keys are not strings or tuples of strings
or the values are not Tensors.
"""
if not isinstance(outputs, dict):
outputs = {single_output_default_name: outputs}
output_dict = {}
for key, value in outputs.items():
error_name = error_label or single_output_default_name
key = self._check_output_key(key, error_name)
if not isinstance(value, ops.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
error_name, value))
output_dict[key] = value
return output_dict
@estimator_export('estimator.export.ClassificationOutput')
class ClassificationOutput(ExportOutput):
"""Represents the output of a classification head.
Either classes or scores or both must be set.
The classes `Tensor` must provide string labels, not integer class IDs.
If only classes is set, it is interpreted as providing top-k results in
descending order.
If only scores is set, it is interpreted as providing a score for every class
in order of class ID.
If both classes and scores are set, they are interpreted as zipped, so each
score corresponds to the class at the same index. Clients should not depend
on the order of the entries.
"""
def __init__(self, scores=None, classes=None):
"""Constructor for `ClassificationOutput`.
Args:
scores: A float `Tensor` giving scores (sometimes but not always
interpretable as probabilities) for each class. May be `None`, but
only if `classes` is set. Interpretation varies-- see class doc.
classes: A string `Tensor` giving predicted class labels. May be `None`,
but only if `scores` is set. Interpretation varies-- see class doc.
Raises:
ValueError: if neither classes nor scores is set, or one of them is not a
`Tensor` with the correct dtype.
"""
if (scores is not None
and not (isinstance(scores, ops.Tensor)
and scores.dtype.is_floating)):
raise ValueError('Classification scores must be a float32 Tensor; '
'got {}'.format(scores))
if (classes is not None
and not (isinstance(classes, ops.Tensor)
and dtypes.as_dtype(classes.dtype) == dtypes.string)):
raise ValueError('Classification classes must be a string Tensor; '
'got {}'.format(classes))
if scores is None and classes is None:
raise ValueError('At least one of scores and classes must be set.')
self._scores = scores
self._classes = classes
@property
def scores(self):
return self._scores
@property
def classes(self):
return self._classes
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Classification input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.classification_signature_def(
examples, self.classes, self.scores)
@estimator_export('estimator.export.RegressionOutput')
class RegressionOutput(ExportOutput):
"""Represents the output of a regression head."""
def __init__(self, value):
"""Constructor for `RegressionOutput`.
Args:
value: a float `Tensor` giving the predicted values. Required.
Raises:
ValueError: if the value is not a `Tensor` with dtype tf.float32.
"""
if not (isinstance(value, ops.Tensor) and value.dtype.is_floating):
raise ValueError('Regression output value must be a float32 Tensor; '
'got {}'.format(value))
self._value = value
@property
def value(self):
return self._value
def as_signature_def(self, receiver_tensors):
if len(receiver_tensors) != 1:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
(_, examples), = receiver_tensors.items()
if dtypes.as_dtype(examples.dtype) != dtypes.string:
raise ValueError('Regression input must be a single string Tensor; '
'got {}'.format(receiver_tensors))
return signature_def_utils.regression_signature_def(examples, self.value)
@estimator_export('estimator.export.PredictOutput')
class PredictOutput(ExportOutput):
"""Represents the output of a generic prediction head.
A generic prediction need not be either a classification or a regression.
Named outputs must be provided as a dict from string to `Tensor`,
"""
_SINGLE_OUTPUT_DEFAULT_NAME = 'output'
def __init__(self, outputs):
"""Constructor for PredictOutput.
Args:
outputs: A `Tensor` or a dict of string to `Tensor` representing the
predictions.
Raises:
ValueError: if the outputs is not dict, or any of its keys are not
strings, or any of its values are not `Tensor`s.
"""
self._outputs = self._wrap_and_check_outputs(
outputs, self._SINGLE_OUTPUT_DEFAULT_NAME, error_label='Prediction')
@property
def outputs(self):
return self._outputs
def as_signature_def(self, receiver_tensors):
return signature_def_utils.predict_signature_def(receiver_tensors,
self.outputs)
class _SupervisedOutput(ExportOutput):
"""Represents the output of a supervised training or eval process."""
__metaclass__ = abc.ABCMeta
LOSS_NAME = 'loss'
PREDICTIONS_NAME = 'predictions'
METRICS_NAME = 'metrics'
METRIC_VALUE_SUFFIX = 'value'
METRIC_UPDATE_SUFFIX = 'update_op'
_loss = None
_predictions = None
_metrics = None
def __init__(self, loss=None, predictions=None, metrics=None):
"""Constructor for SupervisedOutput (ie, Train or Eval output).
Args:
loss: dict of Tensors or single Tensor representing calculated loss.
predictions: dict of Tensors or single Tensor representing model
predictions.
metrics: dict of (metric_value, update_op) tuples, or a single tuple.
metric_value must be a Tensor, and update_op must be a Tensor or Op.
Raises:
ValueError: if any of the outputs' dict keys are not strings or tuples of
strings or the values are not Tensors (or Operations in the case of
update_op).
"""
if loss is not None:
loss_dict = self._wrap_and_check_outputs(loss, self.LOSS_NAME)
self._loss = self._prefix_output_keys(loss_dict, self.LOSS_NAME)
if predictions is not None:
pred_dict = self._wrap_and_check_outputs(
predictions, self.PREDICTIONS_NAME)
self._predictions = self._prefix_output_keys(
pred_dict, self.PREDICTIONS_NAME)
if metrics is not None:
self._metrics = self._wrap_and_check_metrics(metrics)
def _prefix_output_keys(self, output_dict, output_name):
"""Prepend output_name to the output_dict keys if it doesn't exist.
This produces predictable prefixes for the pre-determined outputs
of SupervisedOutput.
Args:
output_dict: dict of string to Tensor, assumed valid.
output_name: prefix string to prepend to existing keys.
Returns:
dict with updated keys and existing values.
"""
new_outputs = {}
for key, val in output_dict.items():
key = self._prefix_key(key, output_name)
new_outputs[key] = val
return new_outputs
def _prefix_key(self, key, output_name):
if key.find(output_name) != 0:
key = output_name + self._SEPARATOR_CHAR + key
return key
def _wrap_and_check_metrics(self, metrics):
"""Handle the saving of metrics.
Metrics is either a tuple of (value, update_op), or a dict of such tuples.
Here, we separate out the tuples and create a dict with names to tensors.
Args:
metrics: dict of (metric_value, update_op) tuples, or a single tuple.
Returns:
dict of output_names to tensors
Raises:
ValueError: if the dict key is not a string, or the metric values or ops
are not tensors.
"""
if not isinstance(metrics, dict):
metrics = {self.METRICS_NAME: metrics}
outputs = {}
for key, (metric_val, metric_op) in metrics.items():
key = self._check_output_key(key, self.METRICS_NAME)
key = self._prefix_key(key, self.METRICS_NAME)
val_name = key + self._SEPARATOR_CHAR + self.METRIC_VALUE_SUFFIX
op_name = key + self._SEPARATOR_CHAR + self.METRIC_UPDATE_SUFFIX
if not isinstance(metric_val, ops.Tensor):
raise ValueError(
'{} output value must be a Tensor; got {}.'.format(
key, metric_val))
if (not isinstance(metric_op, ops.Tensor) and
not isinstance(metric_op, ops.Operation)):
raise ValueError(
'{} update_op must be a Tensor or Operation; got {}.'.format(
key, metric_op))
outputs[val_name] = metric_val
outputs[op_name] = metric_op
return outputs
@property
def loss(self):
return self._loss
@property
def predictions(self):
return self._predictions
@property
def metrics(self):
return self._metrics
@abc.abstractmethod
def _get_signature_def_fn(self):
"""Returns a function that produces a SignatureDef given desired outputs."""
pass
def as_signature_def(self, receiver_tensors):
signature_def_fn = self._get_signature_def_fn()
return signature_def_fn(
receiver_tensors, self.loss, self.predictions, self.metrics)
class TrainOutput(_SupervisedOutput):
"""Represents the output of a supervised training process.
This class generates the appropriate signature def for exporting
training output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_train_signature_def
class EvalOutput(_SupervisedOutput):
"""Represents the output of a supervised eval process.
This class generates the appropriate signature def for exporting
eval output by type-checking and wrapping loss, predictions, and metrics
values.
"""
def _get_signature_def_fn(self):
return signature_def_utils.supervised_eval_signature_def
| {
"content_hash": "24570338246d5a0d6bf5d601e6496aad",
"timestamp": "",
"source": "github",
"line_count": 380,
"max_line_length": 80,
"avg_line_length": 32.81315789473684,
"alnum_prop": 0.6719865265859332,
"repo_name": "gojira/tensorflow",
"id": "6c26d299851eaea74f1e564d0fac217f238d76a2",
"size": "13158",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tensorflow/python/estimator/export/export_output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "337045"
},
{
"name": "C++",
"bytes": "41535344"
},
{
"name": "CMake",
"bytes": "201232"
},
{
"name": "Go",
"bytes": "1147256"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "737815"
},
{
"name": "Jupyter Notebook",
"bytes": "2155207"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48293"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "35216559"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "428390"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
"""Module for testing the del network device command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelVirtualSwitch(TestBrokerCommand):
def test_100_unregister_pg_tag(self):
self.noouttest(["unbind_port_group", "--virtual_switch", "utvswitch",
"--tag", "710"])
def test_102_unbind_pg_custom_type(self):
net = self.net["autopg3"]
command = ["unbind_port_group", "--virtual_switch", "utvswitch",
"--networkip", net.ip]
self.noouttest(command)
def test_105_verify_pg_gone(self):
command = ["show_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.commandtest(command)
self.matchclean(out, "Port Group", command)
def test_110_del_utvswitch(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch"]
self.noouttest(command)
def test_115_verify_utvswitch(self):
command = ["show_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.notfoundtest(command)
self.matchoutput(out, "Virtual Switch utvswitch not found.", command)
def test_120_del_utvswitch2(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch2"]
self.noouttest(command)
def test_130_del_camelcase(self):
self.check_plenary_exists("virtualswitchdata", "camelcase")
self.noouttest(["del_virtual_switch", "--virtual_switch", "CaMeLcAsE"])
self.check_plenary_gone("virtualswitchdata", "camelcase")
def test_200_del_again(self):
command = ["del_virtual_switch", "--virtual_switch", "utvswitch"]
out = self.notfoundtest(command)
self.matchoutput(out, "Virtual Switch utvswitch not found.", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelVirtualSwitch)
unittest.TextTestRunner(verbosity=2).run(suite)
| {
"content_hash": "45cd88816f5d3511816bf6d43506b0e5",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 79,
"avg_line_length": 37.735849056603776,
"alnum_prop": 0.6445,
"repo_name": "quattor/aquilon",
"id": "c6d7db09fb05399d6f71f06ced95273d207bd83b",
"size": "2740",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "tests/broker/test_del_virtual_switch.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
} |
import atexit
import logging
import math
import os
import threading
import types
import numpy
import large_image.tilesource.base
from large_image import config
from large_image.cache_util import LruCacheMetaclass, methodcache
from large_image.constants import TILE_FORMAT_NUMPY, SourcePriority
from large_image.exceptions import TileSourceError, TileSourceFileNotFoundError
from large_image.tilesource import FileTileSource, nearPowerOfTwo
try:
from importlib.metadata import PackageNotFoundError
from importlib.metadata import version as _importlib_version
except ImportError:
from importlib_metadata import PackageNotFoundError
from importlib_metadata import version as _importlib_version
try:
__version__ = _importlib_version(__name__)
except PackageNotFoundError:
# package is not installed
pass
bioformats = None
# import javabridge
javabridge = None
_javabridgeStarted = None
_openImages = []
# Default to ignoring files with no extension and some specific extensions.
config.ConfigValues['source_bioformats_ignored_names'] = \
r'(^[^.]*|\.(jpg|jpeg|jpe|png|tif|tiff|ndpi|nd2))$'
def _monitor_thread():
main_thread = threading.main_thread()
main_thread.join()
if len(_openImages):
try:
javabridge.attach()
while len(_openImages):
source = _openImages.pop()
try:
source._bioimage.close()
except Exception:
pass
source._bioimage = None
except AssertionError:
pass
finally:
if javabridge.get_env():
javabridge.detach()
_stopJavabridge()
def _reduceLogging():
# As of bioformat 4.0.0, org.apache.log4j isn't in the bundled
# jar file, so setting log levels just produces needless warnings.
# bioformats.log4j.basic_config()
# javabridge.JClassWrapper('loci.common.Log4jTools').setRootLevel(
# logging.getLevelName(logger.level))
#
# This is taken from
# https://github.com/pskeshu/microscoper/blob/master/microscoper/io.py
try:
rootLoggerName = javabridge.get_static_field(
'org/slf4j/Logger', 'ROOT_LOGGER_NAME', 'Ljava/lang/String;')
rootLogger = javabridge.static_call(
'org/slf4j/LoggerFactory', 'getLogger',
'(Ljava/lang/String;)Lorg/slf4j/Logger;', rootLoggerName)
logLevel = javabridge.get_static_field(
'ch/qos/logback/classic/Level', 'WARN', 'Lch/qos/logback/classic/Level;')
javabridge.call(rootLogger, 'setLevel', '(Lch/qos/logback/classic/Level;)V', logLevel)
except Exception:
pass
bioformats.formatreader.logger.setLevel(logging.ERROR)
def _startJavabridge(logger):
global _javabridgeStarted
if _javabridgeStarted is None:
# Only import these when first asked. They are slow to import.
global bioformats
global javabridge
if bioformats is None:
import bioformats
if javabridge is None:
import javabridge
# We need something to wake up at exit and shut things down
monitor = threading.Thread(target=_monitor_thread)
monitor.daemon = True
monitor.start()
try:
javabridge.start_vm(class_path=bioformats.JARS, run_headless=True)
_reduceLogging()
atexit.register(_stopJavabridge)
logger.info('Started JVM for Bioformats tile source.')
_javabridgeStarted = True
except RuntimeError as exc:
logger.exception('Cannot start JVM for Bioformats tile source.', exc)
_javabridgeStarted = False
return _javabridgeStarted
def _stopJavabridge(*args, **kwargs):
global _javabridgeStarted
if javabridge is not None:
javabridge.kill_vm()
_javabridgeStarted = None
class BioformatsFileTileSource(FileTileSource, metaclass=LruCacheMetaclass):
"""
Provides tile access to via Bioformats.
"""
cacheName = 'tilesource'
name = 'bioformats'
extensions = {
None: SourcePriority.FALLBACK,
'czi': SourcePriority.PREFERRED,
'lif': SourcePriority.MEDIUM,
'vsi': SourcePriority.PREFERRED,
}
mimeTypes = {
None: SourcePriority.FALLBACK,
'image/czi': SourcePriority.PREFERRED,
'image/vsi': SourcePriority.PREFERRED,
}
# If frames are smaller than this they are served as single tiles, which
# can be more efficient than handling multiple tiles.
_singleTileThreshold = 2048
_tileSize = 512
_associatedImageMaxSize = 8192
def __init__(self, path, **kwargs): # noqa
"""
Initialize the tile class. See the base class for other available
parameters.
:param path: the associated file path.
"""
super().__init__(path, **kwargs)
largeImagePath = str(self._getLargeImagePath())
self._ignoreSourceNames('bioformats', largeImagePath, r'\.png$')
if not _startJavabridge(self.logger):
raise TileSourceError(
'File cannot be opened by bioformats reader because javabridge failed to start')
self._tileLock = threading.RLock()
try:
javabridge.attach()
try:
self._bioimage = bioformats.ImageReader(largeImagePath)
except (AttributeError, OSError) as exc:
if not os.path.isfile(largeImagePath):
raise TileSourceFileNotFoundError(largeImagePath) from None
self.logger.debug('File cannot be opened via Bioformats. (%r)' % exc)
raise TileSourceError('File cannot be opened via Bioformats (%r)' % exc)
_openImages.append(self)
rdr = self._bioimage.rdr
# Bind additional functions not done by bioformats module.
# Functions are listed at https://downloads.openmicroscopy.org
# //bio-formats/5.1.5/api/loci/formats/IFormatReader.html
for (name, params, desc) in [
('getBitsPerPixel', '()I', 'Get the number of bits per pixel'),
('getEffectiveSizeC', '()I', 'effectiveC * Z * T = imageCount'),
('isNormalized', '()Z', 'Is float data normalized'),
('isMetadataComplete', '()Z', 'True if metadata is completely parsed'),
('getDomains', '()[Ljava/lang/String;', 'Get a list of domains'),
('getZCTCoords', '(I)[I', 'Gets the Z, C and T coordinates '
'(real sizes) corresponding to the given rasterized index value.'),
('getOptimalTileWidth', '()I', 'the optimal sub-image width '
'for use with openBytes'),
('getOptimalTileHeight', '()I', 'the optimal sub-image height '
'for use with openBytes'),
('getResolutionCount', '()I', 'The number of resolutions for '
'the current series'),
('setResolution', '(I)V', 'Set the resolution level'),
('getResolution', '()I', 'The current resolution level'),
('hasFlattenedResolutions', '()Z', 'True if resolutions have been flattened'),
('setFlattenedResolutions', '(Z)V', 'Set if resolution should be flattened'),
]:
setattr(rdr, name, types.MethodType(
javabridge.jutil.make_method(name, params, desc), rdr))
# rdr.setFlattenedResolutions(False)
self._metadata = {
'dimensionOrder': rdr.getDimensionOrder(),
'metadata': javabridge.jdictionary_to_string_dictionary(
rdr.getMetadata()),
'seriesMetadata': javabridge.jdictionary_to_string_dictionary(
rdr.getSeriesMetadata()),
'seriesCount': rdr.getSeriesCount(),
'imageCount': rdr.getImageCount(),
'rgbChannelCount': rdr.getRGBChannelCount(),
'sizeColorPlanes': rdr.getSizeC(),
'sizeT': rdr.getSizeT(),
'sizeZ': rdr.getSizeZ(),
'sizeX': rdr.getSizeX(),
'sizeY': rdr.getSizeY(),
'pixelType': rdr.getPixelType(),
'isLittleEndian': rdr.isLittleEndian(),
'isRGB': rdr.isRGB(),
'isInterleaved': rdr.isInterleaved(),
'isIndexed': rdr.isIndexed(),
'bitsPerPixel': rdr.getBitsPerPixel(),
'sizeC': rdr.getEffectiveSizeC(),
'normalized': rdr.isNormalized(),
'metadataComplete': rdr.isMetadataComplete(),
# 'domains': rdr.getDomains(),
'optimalTileWidth': rdr.getOptimalTileWidth(),
'optimalTileHeight': rdr.getOptimalTileHeight(),
'resolutionCount': rdr.getResolutionCount(),
'flattenedResolutions': rdr.hasFlattenedResolutions(),
}
self._checkSeries(rdr)
bmd = bioformats.metadatatools.MetadataRetrieve(self._bioimage.metadata)
try:
self._metadata['channelNames'] = [
bmd.getChannelName(0, c) or bmd.getChannelID(0, c)
for c in range(self._metadata['sizeColorPlanes'])]
except Exception:
self._metadata['channelNames'] = []
for key in ['sizeXY', 'sizeC', 'sizeZ', 'sizeT']:
if not isinstance(self._metadata[key], int) or self._metadata[key] < 1:
self._metadata[key] = 1
self.sizeX = self._metadata['sizeX']
self.sizeY = self._metadata['sizeY']
self._computeTiles()
self._computeLevels()
self._computeMagnification()
except javabridge.JavaException as exc:
es = javabridge.to_string(exc.throwable)
self.logger.debug('File cannot be opened via Bioformats. (%s)' % es)
raise TileSourceError('File cannot be opened via Bioformats. (%s)' % es)
except (AttributeError, UnicodeDecodeError):
self.logger.exception('The bioformats reader threw an unhandled exception.')
raise TileSourceError('The bioformats reader threw an unhandled exception.')
finally:
if javabridge.get_env():
javabridge.detach()
if self.levels < 1:
raise TileSourceError(
'Bioformats image must have at least one level.')
if self.sizeX <= 0 or self.sizeY <= 0:
raise TileSourceError('Bioformats tile size is invalid.')
try:
self.getTile(0, 0, self.levels - 1)
except Exception as exc:
raise TileSourceError('Bioformats cannot read a tile: %r' % exc)
def __del__(self):
if getattr(self, '_bioimage', None) is not None:
try:
javabridge.attach()
self._bioimage.close()
_openImages.remove(self)
finally:
if javabridge.get_env():
javabridge.detach()
def _getSeriesStarts(self, rdr):
self._metadata['frameSeries'] = [{
'series': [0],
'sizeX': self._metadata['sizeX'],
'sizeY': self._metadata['sizeY'],
}]
if self._metadata['seriesCount'] <= 1:
return 1
seriesMetadata = {}
for idx in range(self._metadata['seriesCount']):
rdr.setSeries(idx)
seriesMetadata.update(
javabridge.jdictionary_to_string_dictionary(rdr.getSeriesMetadata()))
frameList = []
nextSeriesNum = 0
try:
for key, value in seriesMetadata.items():
frameNum = int(value)
seriesNum = int(key.split('Series ')[1].split('|')[0]) - 1
if seriesNum >= 0 and seriesNum < self._metadata['seriesCount']:
while len(frameList) <= frameNum:
frameList.append([])
if seriesNum not in frameList[frameNum]:
frameList[frameNum].append(seriesNum)
frameList[frameNum].sort()
nextSeriesNum = max(nextSeriesNum, seriesNum + 1)
except Exception as exc:
self.logger.debug('Failed to parse series information: %s', exc)
rdr.setSeries(0)
return 1
if not len(seriesMetadata):
frameList = [[0]]
nextSeriesNum = 1
for idx in range(1, self._metadata['seriesCount']):
rdr.setSeries(idx)
if rdr.getSizeX() == self.sizeX and rdr.getSizeY == self.sizeY:
frameList.append([idx])
if nextSeriesNum == idx:
nextSeriesNum = idx + 1
frameList = [fl for fl in frameList if len(fl)]
self._metadata['frameSeries'] = [{
'series': fl,
} for fl in frameList]
rdr.setSeries(0)
return nextSeriesNum
def _checkSeries(self, rdr):
firstPossibleAssoc = self._getSeriesStarts(rdr)
self._metadata['seriesAssociatedImages'] = {}
for seriesNum in range(firstPossibleAssoc, self._metadata['seriesCount']):
if any((seriesNum in series['series']) for series in self._metadata['frameSeries']):
continue
rdr.setSeries(seriesNum)
info = {
'sizeX': rdr.getSizeX(),
'sizeY': rdr.getSizeY(),
}
if (info['sizeX'] < self._associatedImageMaxSize and
info['sizeY'] < self._associatedImageMaxSize):
# TODO: Figure out better names for associated images. Can
# we tell if any of them are the macro or label image?
info['seriesNum'] = seriesNum
self._metadata['seriesAssociatedImages'][
'image%d' % seriesNum] = info
validate = None
for frame in self._metadata['frameSeries']:
for level in range(len(frame['series'])):
rdr.setSeries(frame['series'][level])
info = {
'sizeX': rdr.getSizeX(),
'sizeY': rdr.getSizeY(),
}
if not level:
frame.update(info)
self._metadata['sizeX'] = max(self._metadata['sizeX'], frame['sizeX'])
self._metadata['sizeY'] = max(self._metadata['sizeY'], frame['sizeY'])
elif validate is not False:
if (not nearPowerOfTwo(frame['sizeX'], info['sizeX']) or
not nearPowerOfTwo(frame['sizeY'], info['sizeY'])):
frame['series'] = frame['series'][:level]
validate = True
if validate is None:
validate = False
rdr.setSeries(0)
self._metadata['sizeXY'] = len(self._metadata['frameSeries'])
def _computeTiles(self):
if (self._metadata['resolutionCount'] <= 1 and
self.sizeX <= self._singleTileThreshold and
self.sizeY <= self._singleTileThreshold):
self.tileWidth = self.sizeX
self.tileHeight = self.sizeY
elif (128 <= self._metadata['optimalTileWidth'] <= self._singleTileThreshold and
128 <= self._metadata['optimalTileHeight'] <= self._singleTileThreshold):
self.tileWidth = self._metadata['optimalTileWidth']
self.tileHeight = self._metadata['optimalTileHeight']
else:
self.tileWidth = self.tileHeight = self._tileSize
def _computeLevels(self):
self.levels = int(math.ceil(max(
math.log(float(self.sizeX) / self.tileWidth),
math.log(float(self.sizeY) / self.tileHeight)) / math.log(2))) + 1
def _computeMagnification(self):
self._magnification = {}
metadata = self._metadata['metadata']
valuekeys = {
'x': ['Scaling|Distance|Value #1'],
'y': ['Scaling|Distance|Value #2'],
}
magkeys = ['Information|Instrument|Objective|NominalMagnification #1']
units = 1e3
for axis in {'x', 'y'}:
for key in valuekeys[axis]:
if metadata.get(key):
self._magnification['mm_' + axis] = float(metadata[key]) * units
for key in magkeys:
if metadata.get(key):
self._magnification['magnification'] = float(metadata[key])
break
def getNativeMagnification(self):
"""
Get the magnification at a particular level.
:return: magnification, width of a pixel in mm, height of a pixel in mm.
"""
mm_x = self._magnification.get('mm_x')
mm_y = self._magnification.get('mm_y', mm_x)
# Estimate the magnification if we don't have a direct value
mag = self._magnification.get('magnification') or 0.01 / mm_x if mm_x else None
return {
'magnification': mag,
'mm_x': mm_x,
'mm_y': mm_y,
}
def getMetadata(self):
"""
Return a dictionary of metadata containing levels, sizeX, sizeY,
tileWidth, tileHeight, magnification, mm_x, mm_y, and frames.
:returns: metadata dictionary.
"""
result = super().getMetadata()
# sizeC, sizeZ, sizeT, sizeXY
frames = []
for xy in range(self._metadata['sizeXY']):
for t in range(self._metadata['sizeT']):
for z in range(self._metadata['sizeZ']):
for c in range(self._metadata['sizeC']):
frames.append({
'IndexC': c,
'IndexZ': z,
'IndexT': t,
'IndexXY': xy,
})
if len(self._metadata['frameSeries']) == len(frames):
for idx, frame in enumerate(frames):
frame['sizeX'] = self._metadata['frameSeries'][idx]['sizeX']
frame['sizeY'] = self._metadata['frameSeries'][idx]['sizeY']
frame['levels'] = len(self._metadata['frameSeries'][idx]['series'])
if len(frames) > 1:
result['frames'] = frames
self._addMetadataFrameInformation(result, self._metadata['channelNames'])
return result
def getInternalMetadata(self, **kwargs):
"""
Return additional known metadata about the tile source. Data returned
from this method is not guaranteed to be in any particular format or
have specific values.
:returns: a dictionary of data or None.
"""
return self._metadata
@methodcache()
def getTile(self, x, y, z, pilImageAllowed=False, numpyAllowed=False, **kwargs):
self._xyzInRange(x, y, z)
ft = fc = fz = 0
fseries = self._metadata['frameSeries'][0]
if kwargs.get('frame') is not None:
frame = self._getFrame(**kwargs)
fc = frame % self._metadata['sizeC']
fz = (frame // self._metadata['sizeC']) % self._metadata['sizeZ']
ft = (frame // self._metadata['sizeC'] //
self._metadata['sizeZ']) % self._metadata['sizeT']
fxy = (frame // self._metadata['sizeC'] //
self._metadata['sizeZ'] // self._metadata['sizeT'])
if frame < 0 or fxy > self._metadata['sizeXY']:
raise TileSourceError('Frame does not exist')
fseries = self._metadata['frameSeries'][fxy]
seriesLevel = self.levels - 1 - z
scale = 1
while seriesLevel >= len(fseries['series']):
seriesLevel -= 1
scale *= 2
offsetx = x * self.tileWidth * scale
offsety = y * self.tileHeight * scale
width = min(self.tileWidth * scale, self.sizeX // 2 ** seriesLevel - offsetx)
height = min(self.tileHeight * scale, self.sizeY // 2 ** seriesLevel - offsety)
sizeXAtScale = fseries['sizeX'] // (2 ** seriesLevel)
sizeYAtScale = fseries['sizeY'] // (2 ** seriesLevel)
finalWidth = width // scale
finalHeight = height // scale
width = min(width, sizeXAtScale - offsetx)
height = min(height, sizeYAtScale - offsety)
with self._tileLock:
try:
javabridge.attach()
if width > 0 and height > 0:
tile = self._bioimage.read(
c=fc, z=fz, t=ft, series=fseries['series'][seriesLevel],
rescale=False, # return internal data types
XYWH=(offsetx, offsety, width, height))
else:
# We need the same dtype, so read 1x1 at 0x0
tile = self._bioimage.read(
c=fc, z=fz, t=ft, series=fseries['series'][seriesLevel],
rescale=False, # return internal data types
XYWH=(0, 0, 1, 1))
tile = numpy.zeros(tuple([0, 0] + list(tile.shape[2:])), dtype=tile.dtype)
format = TILE_FORMAT_NUMPY
except javabridge.JavaException as exc:
es = javabridge.to_string(exc.throwable)
raise TileSourceError('Failed to get Bioformat region (%s, %r).' % (es, (
fc, fz, ft, fseries, self.sizeX, self.sizeY, offsetx, offsety, width, height)))
finally:
if javabridge.get_env():
javabridge.detach()
if scale > 1:
tile = tile[::scale, ::scale]
if tile.shape[:2] != (finalHeight, finalWidth):
fillValue = 0
if tile.dtype == numpy.uint16:
fillValue = 65535
elif tile.dtype == numpy.uint8:
fillValue = 255
elif tile.dtype.kind == 'f':
fillValue = 1
retile = numpy.full(
tuple([finalHeight, finalWidth] + list(tile.shape[2:])),
fillValue,
dtype=tile.dtype)
retile[0:min(tile.shape[0], finalHeight), 0:min(tile.shape[1], finalWidth)] = tile[
0:min(tile.shape[0], finalHeight), 0:min(tile.shape[1], finalWidth)]
tile = retile
return self._outputTile(tile, format, x, y, z, pilImageAllowed, numpyAllowed, **kwargs)
def getAssociatedImagesList(self):
"""
Return a list of associated images.
:return: the list of image keys.
"""
return sorted(self._metadata['seriesAssociatedImages'].keys())
def _getAssociatedImage(self, imageKey):
"""
Get an associated image in PIL format.
:param imageKey: the key of the associated image.
:return: the image in PIL format or None.
"""
info = self._metadata['seriesAssociatedImages'].get(imageKey)
if info is None:
return
series = info['seriesNum']
with self._tileLock:
try:
javabridge.attach()
image = self._bioimage.read(
series=series,
rescale=False, # return internal data types
XYWH=(0, 0, info['sizeX'], info['sizeY']))
except javabridge.JavaException as exc:
es = javabridge.to_string(exc.throwable)
raise TileSourceError('Failed to get Bioformat series (%s, %r).' % (es, (
series, info['sizeX'], info['sizeY'])))
finally:
if javabridge.get_env():
javabridge.detach()
return large_image.tilesource.base._imageToPIL(image)
def open(*args, **kwargs):
"""
Create an instance of the module class.
"""
return BioformatsFileTileSource(*args, **kwargs)
def canRead(*args, **kwargs):
"""
Check if an input can be read by the module class.
"""
return BioformatsFileTileSource.canRead(*args, **kwargs)
| {
"content_hash": "8631bcdf520bc0bcc0273acffae22e07",
"timestamp": "",
"source": "github",
"line_count": 579,
"max_line_length": 99,
"avg_line_length": 41.75302245250432,
"alnum_prop": 0.5605377456049638,
"repo_name": "girder/large_image",
"id": "e673609a10d58259017f9a2de3f880450a32f8c2",
"size": "25381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sources/bioformats/large_image_source_bioformats/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7114"
},
{
"name": "JavaScript",
"bytes": "307859"
},
{
"name": "Pug",
"bytes": "21406"
},
{
"name": "Python",
"bytes": "1371949"
},
{
"name": "Shell",
"bytes": "5500"
},
{
"name": "Stylus",
"bytes": "4261"
}
],
"symlink_target": ""
} |
"""
WSGI config for musicwings project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "musicwings.settings")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application()) | {
"content_hash": "a770c2a6187ab781310dc9c815754507",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 26.875,
"alnum_prop": 0.7790697674418605,
"repo_name": "miccio-dk/musicwings",
"id": "22432ec6b25b34e83f34327aca7acebb49935272",
"size": "430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "musicwings/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "204"
},
{
"name": "HTML",
"bytes": "7045"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "5736"
}
],
"symlink_target": ""
} |
"""
Abstract: Operations on earthquake data as examples for how to use the core concept 'event'
Use Cases:
- get all locations of earthquakes with a magnitude of 4 or higher
- get all earthquakes which origin is 100 m or deeper
- get all earthquakes from 12/01/2014 00:00:00 - 12/6/2014 23:59:59
- get all earthquakes in Alaska
Provided data:
CSV file with all global earthquake events for December 2014.
The fields for each earthquake are:
time,latitude,longitude,depth,mag,magType,nst,gap,dmin,rms,net,id,updated,place,type
"""
__author__ = "Marc Tim Thiemann"
__copyright__ = "Copyright 2014"
__credits__ = ["Marc Tim Thiemann"]
__license__ = ""
__version__ = "0.1"
__maintainer__ = ""
__email__ = ""
__date__ = "January 2015"
__status__ = "Development"
import sys
sys.path = [ '.', '../..' ] + sys.path
from utils import _init_log
from events import *
import dateutil.parser
from datetime import *
import csv
log = _init_log("example-1")
f = open('../data/events/earthquake_data.csv')
csv_f = csv.reader(f)
events = []
for row in csv_f:
properties = { 'latitude': row[1],
'longitude': row[2],
'depth': row[3],
'mag': row[4],
'magType': row[5],
'nst': row[6],
'gap': row[7],
'dmin': row[8],
'rms': row[9],
'net': row[10],
'id': row[11],
'updated': row[12],
'place': row[13],
'type': row[14]}
dt = dateutil.parser.parse(row[0], fuzzy = True, ignoretz = True)
events.append(PyEvent((dt, dt), properties))
print 'Get all locations of earthquakes with a magnitude of 4 or higher during December 2014'
locations = []
for e in events:
if(e.get('mag') >= 4):
locations.append((e.get('latitude'), e.get('longitude')))
print 'Get all earthquakes from 12/01/2014 00:00:00 - 12/6/2014 23:59:59'
earthquakesFirstSevenDays = []
for e in events:
if(e.during((datetime(2014, 12, 01, 0, 0, 0), datetime(2014, 12, 6, 23, 59, 59)))):
earthquakesFirstSevenDays.append(e)
print 'Get all earthquakes in Alaska during December 2014'
earthquakesInAlaska = []
for e in events:
if "Alaska" in e.get('place'):
earthquakesInAlaska.append(e)
print 'Get all earthquakes which origin is 100 m or deeper'
deepEarthQuakes = []
for e in events:
depth = 0.0
try:
depth = float(e.get('depth'))
except:
print 'Not a Number!'
if(depth >= 100):
deepEarthQuakes.append(e)
| {
"content_hash": "ac73987699eb55ed2dacccfe1d38b3ec",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 93,
"avg_line_length": 26.11,
"alnum_prop": 0.5913443125239372,
"repo_name": "liangcun/ConceptsOfSpatialInformation",
"id": "a8d75f2d6350a9cd2602715fb75b29fb806b7e06",
"size": "2658",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "CoreConceptsPy/GdalPy/examples/events/example_1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "772"
},
{
"name": "HTML",
"bytes": "62282"
},
{
"name": "Haskell",
"bytes": "42082"
},
{
"name": "JavaScript",
"bytes": "531005"
},
{
"name": "Makefile",
"bytes": "3953"
},
{
"name": "Python",
"bytes": "257815"
}
],
"symlink_target": ""
} |
"""The SHA-256 Hasher implementation"""
import hashlib
from plaso.hashers import interface
from plaso.hashers import manager
class SHA256Hasher(interface.BaseHasher):
"""This class provides SHA-256 hashing functionality."""
NAME = u'sha256'
DESCRIPTION = u'Calculates a SHA-256 digest hash over input data.'
def __init__(self):
"""Initializes the SHA-256 hasher."""
super(SHA256Hasher, self).__init__()
self._sha256_context = hashlib.sha256()
def Update(self, data):
"""Updates the current state of the hasher with a new block of data.
Repeated calls to update are equivalent to one single call with the
concatenation of the arguments.
Args:
data: a string of data with which to update the context of the hasher.
"""
self._sha256_context.update(data)
def GetStringDigest(self):
"""Returns the digest of the hash function expressed as a unicode string.
Returns:
A string hash digest calculated over the data blocks passed to
Update(). The string will consist of printable Unicode characters.
"""
return unicode(self._sha256_context.hexdigest())
def GetBinaryDigest(self):
"""Returns the digest of the hash function as a binary string.
Returns:
A binary string hash digest calculated over the data blocks passed to
Update().
"""
return self._sha256_context.digest()
manager.HashersManager.RegisterHasher(SHA256Hasher)
| {
"content_hash": "759a6458d44695590bcf0e8618e4f67f",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 28.86,
"alnum_prop": 0.7089397089397089,
"repo_name": "jorik041/plaso",
"id": "178df0ce01faea8c797c6070862faba1a0c158f9",
"size": "1467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plaso/hashers/sha256.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13905"
},
{
"name": "Python",
"bytes": "3032632"
},
{
"name": "Shell",
"bytes": "45900"
}
],
"symlink_target": ""
} |
import ujson
from gnocchiclient.v1 import base
class ArchivePolicyRuleManager(base.Manager):
url = "v1/archive_policy_rule/"
def list(self):
"""List archive policy rules."""
return self._get(self.url).json()
def get(self, name):
"""Get an archive policy rules.
:param name: Name of the archive policy rule
:type name: str
"""
return self._get(self.url + name).json()
def create(self, archive_policy_rule):
"""Create an archive policy rule."""
return self._post(
self.url, headers={'Content-Type': "application/json"},
data=ujson.dumps(archive_policy_rule)).json()
def update(self, name, new_name):
"""Update an archive policy rule.
:param name: the name of archive policy rule
:type name: str
:param new_name: the new name of archive policy rule
:type new_name: str
"""
return self._patch(
self.url + '/' + name,
headers={'Content-Type': "application/json"},
data=ujson.dumps({'name': new_name})).json()
def delete(self, name):
"""Delete an archive policy rule.
:param name: Name of the archive policy rule
:type name: str
"""
self._delete(self.url + name)
| {
"content_hash": "20c66044cccc09f4cd33a83af57e725b",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 67,
"avg_line_length": 27.541666666666668,
"alnum_prop": 0.575642965204236,
"repo_name": "gnocchixyz/python-gnocchiclient",
"id": "9f0abd8a19ff12d298dde59ebedbab3f64209024",
"size": "1897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnocchiclient/v1/archive_policy_rule.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "196841"
},
{
"name": "Shell",
"bytes": "645"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from typing import Dict, Type
from .base import BatchJobServiceTransport
from .grpc import BatchJobServiceGrpcTransport
# Compile a registry of transports.
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[BatchJobServiceTransport]]
_transport_registry["grpc"] = BatchJobServiceGrpcTransport
__all__ = (
"BatchJobServiceTransport",
"BatchJobServiceGrpcTransport",
)
| {
"content_hash": "9cd91bf2561f1162f054810d073bc56d",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 26.9375,
"alnum_prop": 0.777262180974478,
"repo_name": "googleads/google-ads-python",
"id": "91c165c36bf103f9f825b437eae6a418287a4f1b",
"size": "1031",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "google/ads/googleads/v11/services/services/batch_job_service/transports/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23399881"
}
],
"symlink_target": ""
} |
import pytest
from telegram import Bot, Update, Message, User, Chat, CallbackQuery, InlineQuery, \
ChosenInlineResult, ShippingQuery, PreCheckoutQuery
from telegram.ext import StringCommandHandler
message = Message(1, User(1, ''), None, Chat(1, ''), text='Text')
params = [
{'message': message},
{'edited_message': message},
{'callback_query': CallbackQuery(1, User(1, ''), 'chat', message=message)},
{'channel_post': message},
{'edited_channel_post': message},
{'inline_query': InlineQuery(1, User(1, ''), '', '')},
{'chosen_inline_result': ChosenInlineResult('id', User(1, ''), '')},
{'shipping_query': ShippingQuery('id', User(1, ''), '', None)},
{'pre_checkout_query': PreCheckoutQuery('id', User(1, ''), '', 0, '')},
{'callback_query': CallbackQuery(1, User(1, ''), 'chat')}
]
ids = ('message', 'edited_message', 'callback_query', 'channel_post',
'edited_channel_post', 'inline_query', 'chosen_inline_result',
'shipping_query', 'pre_checkout_query', 'callback_query_without_message')
@pytest.fixture(params=params, ids=ids)
def false_update(request):
return Update(update_id=1, **request.param)
class TestStringCommandHandler:
@pytest.fixture(autouse=True)
def reset(self):
self.test_flag = False
def sch_basic_handler(self, bot, update):
test_bot = isinstance(bot, Bot)
test_update = isinstance(update, str)
self.test_flag = test_bot and test_update
def sch_queue_handler_1(self, bot, update, job_queue=None, update_queue=None):
self.test_flag = (job_queue is not None) or (update_queue is not None)
def sch_queue_handler_2(self, bot, update, job_queue=None, update_queue=None):
self.test_flag = (job_queue is not None) and (update_queue is not None)
def sch_pass_args_handler(self, bot, update, args):
if update == '/test':
self.test_flag = len(args) == 0
else:
self.test_flag = args == ['one', 'two']
def test_basic(self, dp):
handler = StringCommandHandler('test', self.sch_basic_handler)
dp.add_handler(handler)
assert handler.check_update('/test')
dp.process_update('/test')
assert self.test_flag
assert not handler.check_update('/nottest')
assert not handler.check_update('not /test in front')
assert handler.check_update('/test followed by text')
def test_pass_args(self, dp):
handler = StringCommandHandler('test', self.sch_pass_args_handler, pass_args=True)
dp.add_handler(handler)
dp.process_update('/test')
assert self.test_flag
self.test_flag = False
dp.process_update('/test one two')
assert self.test_flag
def test_pass_job_or_update_queue(self, dp):
handler = StringCommandHandler('test', self.sch_queue_handler_1, pass_job_queue=True)
dp.add_handler(handler)
dp.process_update('/test')
assert self.test_flag
dp.remove_handler(handler)
handler = StringCommandHandler('test', self.sch_queue_handler_1, pass_update_queue=True)
dp.add_handler(handler)
self.test_flag = False
dp.process_update('/test')
assert self.test_flag
dp.remove_handler(handler)
handler = StringCommandHandler('test', self.sch_queue_handler_2, pass_job_queue=True,
pass_update_queue=True)
dp.add_handler(handler)
self.test_flag = False
dp.process_update('/test')
assert self.test_flag
def test_other_update_types(self, false_update):
handler = StringCommandHandler('test', self.sch_basic_handler)
assert not handler.check_update(false_update)
| {
"content_hash": "4caec1fcc2da00536473efb292de83ff",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 96,
"avg_line_length": 37.41747572815534,
"alnum_prop": 0.6139076284379865,
"repo_name": "rogerscristo/BotFWD",
"id": "fb3dced4d670b24be0482075a2b448d1426a9f31",
"size": "4684",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python3.6/site-packages/pytests/test_stringcommandhandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13999"
}
],
"symlink_target": ""
} |
from azure.identity import DefaultAzureCredential
from azure.mgmt.sql import SqlManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-sql
# USAGE
python list_the_server_management_operations.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SqlManagementClient(
credential=DefaultAzureCredential(),
subscription_id="00000000-1111-2222-3333-444444444444",
)
response = client.server_operations.list_by_server(
resource_group_name="sqlcrudtest-7398",
server_name="sqlcrudtest-4645",
)
for item in response:
print(item)
# x-ms-original-file: specification/sql/resource-manager/Microsoft.Sql/preview/2020-11-01-preview/examples/ListServerOperations.json
if __name__ == "__main__":
main()
| {
"content_hash": "3bdea70778ecdc9ae3697d78b05d8154",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 132,
"avg_line_length": 33.5,
"alnum_prop": 0.7278314310798947,
"repo_name": "Azure/azure-sdk-for-python",
"id": "130275dcc6fc928fc1131b473bd01c507eec6d82",
"size": "1607",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/sql/azure-mgmt-sql/generated_samples/list_the_server_management_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
Provides values which would be available from /proc which
are not fulfilled by other modules.
"""
import functools
import sys
from types import ModuleType
import gdb
import pwndbg.memoize
class module(ModuleType):
@property
def pid(self):
i = gdb.selected_inferior()
if i is not None:
return i.pid
return 0
@property
def alive(self):
return gdb.selected_thread() is not None
@property
def exe(self):
auxv = pwndbg.auxv.get()
def OnlyWhenRunning(self, func):
@functools.wraps(func)
def wrapper(*a, **kw):
if self.alive:
return func(*a, **kw)
return wrapper
# To prevent garbage collection
tether = sys.modules[__name__]
sys.modules[__name__] = module(__name__, '')
| {
"content_hash": "8b94c4749c1505ba83a4f910f1d49c9e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 57,
"avg_line_length": 20.71794871794872,
"alnum_prop": 0.6101485148514851,
"repo_name": "sigma-random/pwndbg",
"id": "ff737521a8fe481d1c1eb51cb3445e63fd1af850",
"size": "854",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pwndbg/proc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1071439"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from django import forms
from blog.models import Category, Entry
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = { 'slug': ['title'] }
admin.site.register(Category, CategoryAdmin)
class EntryAdminForm(forms.ModelForm):
class Meta:
model = Entry
class EntryAdmin(admin.ModelAdmin):
prepopulated_fields = { 'slug': ['title'] }
form = EntryAdminForm
admin.site.register(Entry, EntryAdmin)
| {
"content_hash": "eed45d5e996d6d722d7c614f06d8324a",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 44,
"avg_line_length": 21.61904761904762,
"alnum_prop": 0.7466960352422908,
"repo_name": "davogler/davsite",
"id": "b3a74654a5786003ea6a9411d04c7c00cf3a7fe0",
"size": "454",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blog/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ActionScript",
"bytes": "321037"
},
{
"name": "CSS",
"bytes": "363320"
},
{
"name": "JavaScript",
"bytes": "561426"
},
{
"name": "PHP",
"bytes": "3388"
},
{
"name": "Python",
"bytes": "68314"
}
],
"symlink_target": ""
} |
from django_graph_api.graphql.utils import GraphQLError
from django_graph_api.graphql.request import Request
from test_app.schema import schema
def test_non_existent_episode(starwars_data):
document = '''
{
episode (number: 12) {
name
}
}
'''
request = Request(document, schema)
data, errors = request.execute()
assert data == {
"episode": None
}
assert errors == [
GraphQLError('Error resolving episode: Episode matching query does not exist.'),
]
def test_non_existent_field(starwars_data):
document = '''
{
episode (number: 4) {
name
other_field
}
}
'''
request = Request(document, schema)
data, errors = request.execute()
assert data == {
"episode": {
"name": "A New Hope",
"other_field": None
}
}
assert errors == [
GraphQLError('Episode does not have field other_field'),
]
| {
"content_hash": "f1b10e5501bbbc38e493d589084408d9",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 88,
"avg_line_length": 23.84090909090909,
"alnum_prop": 0.5328884652049571,
"repo_name": "melinath/django-graph-api",
"id": "a2216bb9e256c64c5300d996d23fbac320d0c301",
"size": "1049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_graph_api/tests/test_errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3754"
},
{
"name": "Python",
"bytes": "84291"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.core.management.base import NoArgsCommand
from optparse import make_option
from project.actions import create_project_db, create_project_local
from docutil.commands_util import recocommand
from docutil.str_util import smart_decode
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--pname', action='store', dest='pname',
default='default', help='Project unix name to initialize'),
make_option('--pfullname', action='store', dest='pfullname',
default='Default Project', help='Project name'),
make_option('--url', action='store', dest='url',
default='http://example.com', help='Project URL'),
make_option('--local', action='store_true', dest='local',
default=False, help='Set to create local directory'),
)
help = "Initialize local directory structure"
@recocommand
def handle_noargs(self, **options):
create_project_db(smart_decode(options.get('pfullname')),
smart_decode(options.get('url')),
smart_decode(options.get('pname')))
if (options.get('local', False)):
create_project_local(smart_decode(options.get('pname')))
| {
"content_hash": "6c7de4d380f911913e5e7fe66a374b88",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 71,
"avg_line_length": 46.107142857142854,
"alnum_prop": 0.6475600309837335,
"repo_name": "bartdag/recodoc2",
"id": "6d5401473541141004c18dca73fb43d6ac4a5c03",
"size": "1291",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "recodoc2/apps/project/management/commands/createproject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5581"
},
{
"name": "HTML",
"bytes": "32211467"
},
{
"name": "Java",
"bytes": "13646"
},
{
"name": "Perl",
"bytes": "503"
},
{
"name": "Python",
"bytes": "717834"
}
],
"symlink_target": ""
} |
from consts.award_type import AwardType
from models.notifications.notification import Notification
class AwardsNotification(Notification):
def __init__(self, event, team=None):
self.event = event
self.team = team
self.team_awards = event.team_awards().get(team.key, []) if team else []
@classmethod
def _type(cls):
from consts.notification_type import NotificationType
return NotificationType.AWARDS
@property
def fcm_notification(self):
from firebase_admin import messaging
# Construct Team-specific payload
if self.team:
if len(self.team_awards) == 1:
award = self.team_awards[0]
# For WINNER/FINALIST, change our verbage
if award.award_type_enum in [AwardType.WINNER, AwardType.FINALIST]:
body = 'is the'
else:
body = 'won the'
body = '{} {}'.format(body, award.name_str)
else:
body = 'won {} awards'.format(len(self.team_awards))
return messaging.Notification(
title='Team {} Awards'.format(self.team.team_number),
body='Team {} {} at the {} {}.'.format(self.team.team_number, body, self.event.year, self.event.normalized_name)
)
# Construct Event payload
return messaging.Notification(
title='{} Awards'.format(self.event.event_short.upper()),
body='{} {} awards have been posted.'.format(self.event.year, self.event.normalized_name)
)
@property
def data_payload(self):
payload = {
'event_key': self.event.key_name
}
if self.team:
payload['team_key'] = self.team.key_name
return payload
@property
def webhook_message_data(self):
payload = self.data_payload
payload['event_name'] = self.event.name
from helpers.award_helper import AwardHelper
from helpers.model_to_dict import ModelToDict
if self.team:
payload['awards'] = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.team_awards)]
else:
payload['awards'] = [ModelToDict.awardConverter(award) for award in AwardHelper.organizeAwards(self.event.awards)]
return payload
| {
"content_hash": "7a98eda885188c263ff2db54f25d34b1",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 128,
"avg_line_length": 35.37313432835821,
"alnum_prop": 0.5970464135021097,
"repo_name": "phil-lopreiato/the-blue-alliance",
"id": "75706a233565262f49d9bb479c117e575cfc8c6e",
"size": "2370",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "models/notifications/awards.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "342115"
},
{
"name": "Dockerfile",
"bytes": "1806"
},
{
"name": "HTML",
"bytes": "923112"
},
{
"name": "JavaScript",
"bytes": "519596"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2829552"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "15899"
}
],
"symlink_target": ""
} |
import os
import tornado
from application.server import Application
if __name__ == "__main__":
app = Application()
app.listen(int(os.environ.get("PORT", 8080)))
tornado.ioloop.IOLoop.instance().start()
| {
"content_hash": "27b44c4d80427e614631ea4c1487a9ab",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 26.875,
"alnum_prop": 0.6837209302325581,
"repo_name": "icoxfog417/enigma_abroad",
"id": "9006da419690193c780a7d979967c7613de54e47",
"size": "215",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "9967"
},
{
"name": "JavaScript",
"bytes": "5832"
},
{
"name": "Python",
"bytes": "63885"
}
],
"symlink_target": ""
} |
import datetime
import os
from django.conf import settings
from django.db.models.fields import Field
from django.core.files.base import File, ContentFile
from django.core.files.storage import default_storage
from django.core.files.images import ImageFile, get_image_dimensions
from django.core.files.uploadedfile import UploadedFile
from django.utils.functional import curry
from django.db.models import signals
from django.utils.encoding import force_unicode, smart_str
from django.utils.translation import ugettext_lazy, ugettext as _
from django import forms
from django.db.models.loading import cache
class FieldFile(File):
def __init__(self, instance, field, name):
self.instance = instance
self.field = field
self.storage = field.storage
self._name = name or u''
self._closed = False
def __eq__(self, other):
# Older code may be expecting FileField values to be simple strings.
# By overriding the == operator, it can remain backwards compatibility.
if hasattr(other, 'name'):
return self.name == other.name
return self.name == other
def __ne__(self, other):
return not self.__eq__(other)
# The standard File contains most of the necessary properties, but
# FieldFiles can be instantiated without a name, so that needs to
# be checked for here.
def _require_file(self):
if not self:
raise ValueError("The '%s' attribute has no file associated with it." % self.field.name)
def _get_file(self):
self._require_file()
if not hasattr(self, '_file'):
self._file = self.storage.open(self.name, 'rb')
return self._file
file = property(_get_file)
def _get_path(self):
self._require_file()
return self.storage.path(self.name)
path = property(_get_path)
def _get_url(self):
self._require_file()
return self.storage.url(self.name)
url = property(_get_url)
def _get_size(self):
self._require_file()
return self.storage.size(self.name)
size = property(_get_size)
def open(self, mode='rb'):
self._require_file()
return super(FieldFile, self).open(mode)
# open() doesn't alter the file's contents, but it does reset the pointer
open.alters_data = True
# In addition to the standard File API, FieldFiles have extra methods
# to further manipulate the underlying file, as well as update the
# associated model instance.
def save(self, name, content, save=True):
name = self.field.generate_filename(self.instance, name)
self._name = self.storage.save(name, content)
setattr(self.instance, self.field.name, self.name)
# Update the filesize cache
self._size = len(content)
# Save the object because it has changed, unless save is False
if save:
self.instance.save()
save.alters_data = True
def delete(self, save=True):
# Only close the file if it's already open, which we know by the
# presence of self._file
if hasattr(self, '_file'):
self.close()
del self._file
self.storage.delete(self.name)
self._name = None
setattr(self.instance, self.field.name, self.name)
# Delete the filesize cache
if hasattr(self, '_size'):
del self._size
if save:
self.instance.save()
delete.alters_data = True
def __getstate__(self):
# FieldFile needs access to its associated model field and an instance
# it's attached to in order to work properly, but the only necessary
# data to be pickled is the file's name itself. Everything else will
# be restored later, by FileDescriptor below.
return {'_name': self.name, '_closed': False}
class FileDescriptor(object):
def __init__(self, field):
self.field = field
def __get__(self, instance=None, owner=None):
if instance is None:
raise AttributeError, "%s can only be accessed from %s instances." % (self.field.name(self.owner.__name__))
file = instance.__dict__[self.field.name]
if not isinstance(file, FieldFile):
# Create a new instance of FieldFile, based on a given file name
instance.__dict__[self.field.name] = self.field.attr_class(instance, self.field, file)
elif not hasattr(file, 'field'):
# The FieldFile was pickled, so some attributes need to be reset.
file.instance = instance
file.field = self.field
file.storage = self.field.storage
return instance.__dict__[self.field.name]
def __set__(self, instance, value):
instance.__dict__[self.field.name] = value
class FileField(Field):
attr_class = FieldFile
def __init__(self, verbose_name=None, name=None, upload_to='', storage=None, **kwargs):
for arg in ('primary_key', 'unique'):
if arg in kwargs:
raise TypeError("'%s' is not a valid argument for %s." % (arg, self.__class__))
self.storage = storage or default_storage
self.upload_to = upload_to
if callable(upload_to):
self.generate_filename = upload_to
kwargs['max_length'] = kwargs.get('max_length', 100)
super(FileField, self).__init__(verbose_name, name, **kwargs)
def get_internal_type(self):
return "FileField"
def get_db_prep_lookup(self, lookup_type, value, connection=None):
if hasattr(value, 'name'):
value = value.name
return super(FileField, self).get_db_prep_lookup(lookup_type, value, connection)
def get_db_prep_value(self, value, connection=None):
"Returns field's value prepared for saving into a database."
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return unicode(value)
def contribute_to_class(self, cls, name):
super(FileField, self).contribute_to_class(cls, name)
setattr(cls, self.name, FileDescriptor(self))
signals.post_delete.connect(self.delete_file, sender=cls)
def delete_file(self, instance, sender, **kwargs):
file = getattr(instance, self.attname)
# If no other object of this type references the file,
# and it's not the default value for future objects,
# delete it from the backend.
if file and file.name != self.default and \
not sender._default_manager.filter(**{self.name: file.name}):
file.delete(save=False)
elif file:
# Otherwise, just close the file, so it doesn't tie up resources.
file.close()
def get_directory_name(self):
return os.path.normpath(force_unicode(datetime.datetime.now().strftime(smart_str(self.upload_to))))
def get_filename(self, filename):
return os.path.normpath(self.storage.get_valid_name(os.path.basename(filename)))
def generate_filename(self, instance, filename):
return os.path.join(self.get_directory_name(), self.get_filename(filename))
def save_form_data(self, instance, data):
if data and isinstance(data, UploadedFile):
getattr(instance, self.name).save(data.name, data, save=False)
def formfield(self, **kwargs):
defaults = {'form_class': forms.FileField}
# If a file has been provided previously, then the form doesn't require
# that a new file is provided this time.
# The code to mark the form field as not required is used by
# form_for_instance, but can probably be removed once form_for_instance
# is gone. ModelForm uses a different method to check for an existing file.
if 'initial' in kwargs:
defaults['required'] = False
defaults.update(kwargs)
return super(FileField, self).formfield(**defaults)
class ImageFieldFile(ImageFile, FieldFile):
def save(self, name, content, save=True):
# Repopulate the image dimension cache.
self._dimensions_cache = get_image_dimensions(content)
# Update width/height fields, if needed
if self.field.width_field:
setattr(self.instance, self.field.width_field, self.width)
if self.field.height_field:
setattr(self.instance, self.field.height_field, self.height)
super(ImageFieldFile, self).save(name, content, save)
def delete(self, save=True):
# Clear the image dimensions cache
if hasattr(self, '_dimensions_cache'):
del self._dimensions_cache
super(ImageFieldFile, self).delete(save)
class ImageField(FileField):
attr_class = ImageFieldFile
def __init__(self, verbose_name=None, name=None, width_field=None, height_field=None, **kwargs):
self.width_field, self.height_field = width_field, height_field
FileField.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ImageField}
defaults.update(kwargs)
return super(ImageField, self).formfield(**defaults)
| {
"content_hash": "4ebeb32e32867e3764d97c0103505caf",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 119,
"avg_line_length": 38.73109243697479,
"alnum_prop": 0.6421132566717292,
"repo_name": "weigj/django-multidb",
"id": "17886582851d7c628ad6bbbd6ec3f9d43de7490b",
"size": "9218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/db/models/fields/files.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "77034"
},
{
"name": "Python",
"bytes": "4173202"
},
{
"name": "Shell",
"bytes": "804"
}
],
"symlink_target": ""
} |
from .AbstractMode import AbstractMode
class CommandMode(AbstractMode):
def __init__(self, controller):
super().__init__(controller)
self.line = controller.model.commandLine
self.window = controller.view.commandLine
def handleKey(self, key):
super().handleKey(key)
if key == 127: return self.backspace()
if key == 10: return self.submit()
else: return self.append(key)
def submit(self):
self.line.clear()
self.window.draw()
return self.controller.commandMode
| {
"content_hash": "f974ca4281f41736eb8a79b7b68f334b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 49,
"avg_line_length": 26.523809523809526,
"alnum_prop": 0.63016157989228,
"repo_name": "elmar-hinz/Python.Vii",
"id": "32ed9305c3a5cee7fdbc828ff0dda01fb93147fd",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vii/CommandMode.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124980"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('consultorio', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='evoluciondiaria',
name='fecha_hora',
field=models.DateTimeField(default=datetime.datetime(2018, 2, 11, 18, 32, 6, 760875)),
),
migrations.AlterField(
model_name='formulamedica',
name='fecha_hora',
field=models.DateTimeField(default=datetime.datetime(2018, 2, 11, 18, 32, 6, 758542)),
),
migrations.AlterField(
model_name='historiaclinica',
name='fecha_hora',
field=models.DateTimeField(default=datetime.datetime(2018, 2, 11, 18, 32, 6, 762283)),
),
]
| {
"content_hash": "9dbfdc0f3c3e95e98b1b3d8ca863b039",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 98,
"avg_line_length": 30.137931034482758,
"alnum_prop": 0.5949656750572082,
"repo_name": "andresmauro17/mediapp",
"id": "fdb9f077e3f78da421671fe50c608e8a659d0e06",
"size": "947",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/apps/consultorio/migrations/0002_auto_20180211_1832.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "605428"
},
{
"name": "HTML",
"bytes": "357312"
},
{
"name": "JavaScript",
"bytes": "575186"
},
{
"name": "Python",
"bytes": "335982"
}
],
"symlink_target": ""
} |
import subprocess
import datetime
import sys
output = []
# Check arguments.
if len(sys.argv) != 2:
print("Usage: gen_env.py <output file>")
sys.exit(1)
output_filename = sys.argv[1]
# Fetch details about the current repo revision.
p = subprocess.Popen(["git", "log", "-r", "HEAD", "-n", "1", "--pretty=format:%ci"],
stdout=subprocess.PIPE)
commit_date_string = p.communicate()[0]
commit_date = datetime.datetime.strptime(commit_date_string.split()[0], "%Y-%m-%d")
# Output in a format that LaTeX can read.
output.append('\\newcommand{\\commitdate}{%s}' % (
commit_date.strftime("%-d %B %Y")))
output.append('\\newcommand{\\commityear}{%s}' % (
commit_date.strftime("%Y")))
# Output file, if it has changed.
new_data = "\n".join(output) + "\n"
old_data = None
try:
with open(sys.argv[1], "r") as f:
old_data = f.read()
except:
pass
if new_data != old_data:
with open(sys.argv[1], "w") as f:
f.write(new_data)
# Done!
sys.exit(0)
| {
"content_hash": "a15085e0c52070313d2dfac131ed304b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 84,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.6251266464032421,
"repo_name": "cmr/seL4",
"id": "ef64876ab582620ba7b8c6acdb5ee1361e82aff0",
"size": "1235",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "manual/tools/gen_env.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "79182"
},
{
"name": "Brainfuck",
"bytes": "3357"
},
{
"name": "C",
"bytes": "1783638"
},
{
"name": "C++",
"bytes": "147133"
},
{
"name": "HyPhy",
"bytes": "75654"
},
{
"name": "Makefile",
"bytes": "76977"
},
{
"name": "Objective-C",
"bytes": "4319"
},
{
"name": "Python",
"bytes": "47292"
},
{
"name": "TeX",
"bytes": "173044"
}
],
"symlink_target": ""
} |
VERSION = '1.1.7' # PIL version
PILLOW_VERSION = '2.5.3' # Pillow
_plugins = ['BmpImagePlugin',
'BufrStubImagePlugin',
'CurImagePlugin',
'DcxImagePlugin',
'EpsImagePlugin',
'FitsStubImagePlugin',
'FliImagePlugin',
'FpxImagePlugin',
'GbrImagePlugin',
'GifImagePlugin',
'GribStubImagePlugin',
'Hdf5StubImagePlugin',
'IcnsImagePlugin',
'IcoImagePlugin',
'ImImagePlugin',
'ImtImagePlugin',
'IptcImagePlugin',
'JpegImagePlugin',
'Jpeg2KImagePlugin',
'McIdasImagePlugin',
'MicImagePlugin',
'MpegImagePlugin',
'MspImagePlugin',
'PalmImagePlugin',
'PcdImagePlugin',
'PcxImagePlugin',
'PdfImagePlugin',
'PixarImagePlugin',
'PngImagePlugin',
'PpmImagePlugin',
'PsdImagePlugin',
'SgiImagePlugin',
'SpiderImagePlugin',
'SunImagePlugin',
'TgaImagePlugin',
'TiffImagePlugin',
'WebPImagePlugin',
'WmfImagePlugin',
'XbmImagePlugin',
'XpmImagePlugin',
'XVThumbImagePlugin']
| {
"content_hash": "8d3223954c49e39e604eb2a5c069af99",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 34,
"avg_line_length": 30.431818181818183,
"alnum_prop": 0.4958924570575056,
"repo_name": "havard024/prego",
"id": "d5894c4471c807fde29b0865cf75d70df0bfff0a",
"size": "1523",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/PIL/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2978"
},
{
"name": "CSS",
"bytes": "620190"
},
{
"name": "JavaScript",
"bytes": "2456120"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "34948766"
},
{
"name": "Shell",
"bytes": "12359"
},
{
"name": "TeX",
"bytes": "113674"
}
],
"symlink_target": ""
} |
import logging
logger = logging.getLogger("pythran")
stream = logging.StreamHandler()
# Initialize logging
try:
# Set a nice colored output
from colorlog import ColoredFormatter
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s",
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
except ImportError:
# No color available, use default config
formatter = logging.Formatter("%(levelname)s: %(message)s")
color_disabled = True
else:
color_disabled = False
stream.setFormatter(formatter)
logger.addHandler(stream)
if color_disabled:
logger.info("Disabling color, you really want to install colorlog.")
| {
"content_hash": "3f7550b68f5b0de6afc57fed391137f5",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 72,
"avg_line_length": 25.90625,
"alnum_prop": 0.6212303980699638,
"repo_name": "pombredanne/pythran",
"id": "5ead1171991b0311690184945a8349f6b31647cc",
"size": "829",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pythran/log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "1366767"
},
{
"name": "Makefile",
"bytes": "1185"
},
{
"name": "Python",
"bytes": "1209572"
},
{
"name": "Shell",
"bytes": "264"
}
],
"symlink_target": ""
} |
from dateutil import parser
try:
from xml.etree import cElementTree as ETree
except ImportError:
from xml.etree import ElementTree as ETree
from .models import (
Share,
Directory,
File,
Handle,
FileProperties,
FileRange,
ShareProperties,
DirectoryProperties,
)
from azure.storage.common.models import (
_list,
)
from azure.storage.common._deserialization import (
_parse_properties,
_parse_metadata,
)
from azure.storage.common._error import _validate_content_match
from azure.storage.common._common_conversion import (
_get_content_md5,
_to_str,
)
def _parse_snapshot_share(response, name):
'''
Extracts snapshot return header.
'''
snapshot = response.headers.get('x-ms-snapshot')
return _parse_share(response, name, snapshot)
def _parse_share(response, name, snapshot=None):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, ShareProperties)
return Share(name, props, metadata, snapshot)
def _parse_directory(response, name):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, DirectoryProperties)
return Directory(name, props, metadata)
def _parse_permission_key(response):
'''
Extracts out file permission key
'''
if response is None or response.headers is None:
return None
return response.headers.get('x-ms-file-permission-key', None)
def _parse_permission(response):
'''
Extracts out file permission
'''
return response.body
def _parse_file(response, name, validate_content=False):
if response is None:
return None
metadata = _parse_metadata(response)
props = _parse_properties(response, FileProperties)
# For range gets, only look at 'x-ms-content-md5' for overall MD5
content_settings = getattr(props, 'content_settings')
if 'content-range' in response.headers:
if 'x-ms-content-md5' in response.headers:
setattr(content_settings, 'content_md5', _to_str(response.headers['x-ms-content-md5']))
else:
delattr(content_settings, 'content_md5')
if validate_content:
computed_md5 = _get_content_md5(response.body)
_validate_content_match(response.headers['content-md5'], computed_md5)
return File(name, response.body, props, metadata)
def _convert_xml_to_shares(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults AccountName="https://myaccount.file.core.windows.net">
<Prefix>string-value</Prefix>
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Shares>
<Share>
<Name>share-name</Name>
<Snapshot>date-time-value</Snapshot>
<Properties>
<Last-Modified>date/time-value</Last-Modified>
<Etag>etag</Etag>
<Quota>max-share-size</Quota>
</Properties>
<Metadata>
<metadata-name>value</metadata-name>
</Metadata>
</Share>
</Shares>
<NextMarker>marker-value</NextMarker>
</EnumerationResults>
'''
if response is None or response.body is None:
return None
shares = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
next_marker = list_element.findtext('NextMarker') or None
setattr(shares, 'next_marker', next_marker)
shares_element = list_element.find('Shares')
for share_element in shares_element.findall('Share'):
# Name element
share = Share()
share.name = share_element.findtext('Name')
# Snapshot
share.snapshot = share_element.findtext('Snapshot')
# Metadata
metadata_root_element = share_element.find('Metadata')
if metadata_root_element is not None:
share.metadata = dict()
for metadata_element in metadata_root_element:
share.metadata[metadata_element.tag] = metadata_element.text
# Properties
properties_element = share_element.find('Properties')
share.properties.last_modified = parser.parse(properties_element.findtext('Last-Modified'))
share.properties.etag = properties_element.findtext('Etag')
share.properties.quota = int(properties_element.findtext('Quota'))
# Add share to list
shares.append(share)
return shares
def _convert_xml_to_directories_and_files(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults ServiceEndpoint="https://myaccount.file.core.windows.net/" ShareName="myshare" DirectoryPath="directory-path">
<Marker>string-value</Marker>
<MaxResults>int-value</MaxResults>
<Entries>
<File>
<Name>file-name</Name>
<Properties>
<Content-Length>size-in-bytes</Content-Length>
</Properties>
</File>
<Directory>
<Name>directory-name</Name>
</Directory>
</Entries>
<NextMarker />
</EnumerationResults>
'''
if response is None or response.body is None:
return None
entries = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
next_marker = list_element.findtext('NextMarker') or None
setattr(entries, 'next_marker', next_marker)
entries_element = list_element.find('Entries')
for file_element in entries_element.findall('File'):
# Name element
file = File()
file.name = file_element.findtext('Name')
# Properties
properties_element = file_element.find('Properties')
file.properties.content_length = int(properties_element.findtext('Content-Length'))
# Add file to list
entries.append(file)
for directory_element in entries_element.findall('Directory'):
# Name element
directory = Directory()
directory.name = directory_element.findtext('Name')
# Add directory to list
entries.append(directory)
return entries
def _convert_xml_to_handles(response):
"""
<?xml version="1.0" encoding="utf-8"?>
<EnumerationResults>
<Entries>
<Handle>
<HandleId>21123954401</HandleId>
<Path />
<FileId>0</FileId>
<ParentId>0</ParentId>
<SessionId>9385737614310506553</SessionId>
<ClientIp>167.220.2.92:27553</ClientIp>
<OpenTime>Fri, 03 May 2019 05:59:43 GMT</OpenTime>
</Handle>
...
</Entries>
<NextMarker />
</EnumerationResults>'
"""
if response is None or response.body is None:
return None
entries = _list()
list_element = ETree.fromstring(response.body)
# Set next marker
next_marker = list_element.findtext('NextMarker') or None
setattr(entries, 'next_marker', next_marker)
handles_list_element = list_element.find('Entries')
for handle_element in handles_list_element.findall('Handle'):
# Name element
handle = Handle()
handle.handle_id = handle_element.findtext('HandleId')
handle.path = handle_element.findtext('Path')
handle.file_id = handle_element.findtext('FileId')
handle.parent_id = handle_element.findtext('ParentId')
handle.session_id = handle_element.findtext('SessionId')
handle.client_ip = handle_element.findtext('ClientIp')
handle.open_time = parser.parse(handle_element.findtext('OpenTime'))
last_connect_time_string = handle_element.findtext('LastReconnectTime')
if last_connect_time_string is not None:
handle.last_reconnect_time = parser.parse(last_connect_time_string)
# Add file to list
entries.append(handle)
return entries
def _parse_close_handle_response(response):
if response is None or response.body is None:
return 0
results = _list()
results.append(int(response.headers['x-ms-number-of-handles-closed']))
next_marker = None if 'x-ms-marker' not in response.headers else response.headers['x-ms-marker']
setattr(results, 'next_marker', next_marker)
return results
def _convert_xml_to_ranges(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<Ranges>
<Range>
<Start>Start Byte</Start>
<End>End Byte</End>
</Range>
<Range>
<Start>Start Byte</Start>
<End>End Byte</End>
</Range>
</Ranges>
'''
if response is None or response.body is None:
return None
ranges = list()
ranges_element = ETree.fromstring(response.body)
for range_element in ranges_element.findall('Range'):
# Parse range
range = FileRange(int(range_element.findtext('Start')), int(range_element.findtext('End')))
# Add range to list
ranges.append(range)
return ranges
def _convert_xml_to_share_stats(response):
'''
<?xml version="1.0" encoding="utf-8"?>
<ShareStats>
<ShareUsageBytes>15</ShareUsageBytes>
</ShareStats>
'''
if response is None or response.body is None:
return None
share_stats_element = ETree.fromstring(response.body)
return int(share_stats_element.findtext('ShareUsageBytes'))
| {
"content_hash": "7ad46f665ffd95ea9a25931a4f9c6f15",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 134,
"avg_line_length": 29.21183800623053,
"alnum_prop": 0.6359176708968753,
"repo_name": "Azure/azure-storage-python",
"id": "98a4cf904f0948eb14bc43f6bed5c1e14f8a217f",
"size": "9687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-storage-file/azure/storage/file/_deserialization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "779"
},
{
"name": "Python",
"bytes": "1674801"
},
{
"name": "Shell",
"bytes": "168"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('article', '0002_auto_20141207_0840'),
]
operations = [
migrations.AddField(
model_name='article',
name='featured',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
| {
"content_hash": "750d3be27ff6bec44ff0e66970fc68cc",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 21.94736842105263,
"alnum_prop": 0.592326139088729,
"repo_name": "F483/trainlessmagazine.com",
"id": "bf0d9578f38b66f370a8effdde99688501bd8d9f",
"size": "441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "article/migrations/0003_article_featured.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1927635"
},
{
"name": "HTML",
"bytes": "6030086"
},
{
"name": "JavaScript",
"bytes": "232810"
},
{
"name": "Makefile",
"bytes": "3507"
},
{
"name": "Python",
"bytes": "44196"
},
{
"name": "Ruby",
"bytes": "4418"
}
],
"symlink_target": ""
} |
"""
lnrater.py contains Lightning Network node rating functionality.
"""
import asyncio
from collections import defaultdict
from pprint import pformat
from random import choices
from statistics import mean, median, stdev
from typing import TYPE_CHECKING, Dict, NamedTuple, Tuple, List, Optional
import sys
import time
from .logging import Logger
from .util import profiler, get_running_loop
from .lnrouter import fee_for_edge_msat
from .lnutil import LnFeatures, ln_compare_features, IncompatibleLightningFeatures
if TYPE_CHECKING:
from .network import Network
from .channel_db import Policy, NodeInfo
from .lnchannel import ShortChannelID
from .lnworker import LNWallet
MONTH_IN_BLOCKS = 40 * 24 * 30
# the scores are only updated after this time interval
RATER_UPDATE_TIME_SEC = 10 * 60
# amount used for calculating an effective relative fee
FEE_AMOUNT_MSAT = 100_000_000
# define some numbers for minimal requirements of good nodes
# exclude nodes with less number of channels
# monacoin is OK?
EXCLUDE_NUM_CHANNELS = 2
# exclude nodes with less mean capacity
EXCLUDE_MEAN_CAPACITY_MSAT = 1_000_000_000
# exclude nodes which are young
EXCLUDE_NODE_AGE = 2 * MONTH_IN_BLOCKS
# exclude nodes which have young mean channel age
EXCLUDE_MEAN_CHANNEL_AGE = EXCLUDE_NODE_AGE
# exclude nodes which charge a high fee
EXCLUDE_EFFECTIVE_FEE_RATE = 0.001500
# exclude nodes whose last channel open was a long time ago
EXCLUDE_BLOCKS_LAST_CHANNEL = 3 * MONTH_IN_BLOCKS
class NodeStats(NamedTuple):
number_channels: int
# capacity related
total_capacity_msat: int
median_capacity_msat: float
mean_capacity_msat: float
# block height related
node_age_block_height: int
mean_channel_age_block_height: float
blocks_since_last_channel: int
# fees
mean_fee_rate: float
def weighted_sum(numbers: List[float], weights: List[float]) -> float:
running_sum = 0.0
for n, w in zip(numbers, weights):
running_sum += n * w
return running_sum/sum(weights)
class LNRater(Logger):
def __init__(self, lnworker: 'LNWallet', network: 'Network'):
"""LNRater can be used to suggest nodes to open up channels with.
The graph is analyzed and some heuristics are applied to sort out nodes
that are deemed to be bad routers or unmaintained.
"""
Logger.__init__(self)
self.lnworker = lnworker
self.network = network
self._node_stats: Dict[bytes, NodeStats] = {} # node_id -> NodeStats
self._node_ratings: Dict[bytes, float] = {} # node_id -> float
self._policies_by_nodes: Dict[bytes, List[Tuple[ShortChannelID, Policy]]] = defaultdict(list) # node_id -> (short_channel_id, policy)
self._last_analyzed = 0 # timestamp
self._last_progress_percent = 0
def maybe_analyze_graph(self):
loop = self.network.asyncio_loop
fut = asyncio.run_coroutine_threadsafe(self._maybe_analyze_graph(), loop)
fut.result()
def analyze_graph(self):
"""Forces a graph analysis, e.g., due to external triggers like
the graph info reaching 50%."""
loop = self.network.asyncio_loop
fut = asyncio.run_coroutine_threadsafe(self._analyze_graph(), loop)
fut.result()
async def _maybe_analyze_graph(self):
"""Analyzes the graph when in early sync stage (>30%) or when caching
time expires."""
# gather information about graph sync status
current_channels, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# gossip sync progress state could be None when not started, but channel
# db already knows something about the graph, which is why we allow to
# evaluate the graph early
if progress_percent is not None or self.network.channel_db.num_nodes > 500:
progress_percent = progress_percent or 0 # convert None to 0
now = time.time()
# graph should have changed significantly during the sync progress
# or last analysis was a long time ago
if (30 <= progress_percent and progress_percent - self._last_progress_percent >= 10 or
self._last_analyzed + RATER_UPDATE_TIME_SEC < now):
await self._analyze_graph()
self._last_progress_percent = progress_percent
self._last_analyzed = now
async def _analyze_graph(self):
await self.network.channel_db.data_loaded.wait()
self._collect_policies_by_node()
loop = get_running_loop()
# the analysis is run in an executor because it's costly
await loop.run_in_executor(None, self._collect_purged_stats)
self._rate_nodes()
now = time.time()
self._last_analyzed = now
def _collect_policies_by_node(self):
policies = self.network.channel_db.get_node_policies()
for pv, p in policies.items():
# append tuples of ShortChannelID and Policy
self._policies_by_nodes[pv[0]].append((pv[1], p))
@profiler
def _collect_purged_stats(self):
"""Traverses through the graph and sorts out nodes."""
current_height = self.network.get_local_height()
node_infos = self.network.channel_db.get_node_infos()
for n, channel_policies in self._policies_by_nodes.items():
try:
# use policies synonymously to channels
num_channels = len(channel_policies)
# save some time for nodes we are not interested in:
if num_channels < EXCLUDE_NUM_CHANNELS:
continue
# analyze block heights
block_heights = [p[0].block_height for p in channel_policies]
node_age_bh = current_height - min(block_heights)
if node_age_bh < EXCLUDE_NODE_AGE:
continue
mean_channel_age_bh = current_height - mean(block_heights)
if mean_channel_age_bh < EXCLUDE_MEAN_CHANNEL_AGE:
continue
blocks_since_last_channel = current_height - max(block_heights)
if blocks_since_last_channel > EXCLUDE_BLOCKS_LAST_CHANNEL:
continue
# analyze capacities
capacities = [p[1].htlc_maximum_msat for p in channel_policies]
if None in capacities:
continue
total_capacity = sum(capacities)
mean_capacity = total_capacity / num_channels if num_channels else 0
if mean_capacity < EXCLUDE_MEAN_CAPACITY_MSAT:
continue
median_capacity = median(capacities)
# analyze fees
effective_fee_rates = [fee_for_edge_msat(
FEE_AMOUNT_MSAT,
p[1].fee_base_msat,
p[1].fee_proportional_millionths) / FEE_AMOUNT_MSAT for p in channel_policies]
mean_fees_rate = mean(effective_fee_rates)
if mean_fees_rate > EXCLUDE_EFFECTIVE_FEE_RATE:
continue
self._node_stats[n] = NodeStats(
number_channels=num_channels,
total_capacity_msat=total_capacity,
median_capacity_msat=median_capacity,
mean_capacity_msat=mean_capacity,
node_age_block_height=node_age_bh,
mean_channel_age_block_height=mean_channel_age_bh,
blocks_since_last_channel=blocks_since_last_channel,
mean_fee_rate=mean_fees_rate
)
except Exception as e:
self.logger.exception("Could not use channel policies for "
"calculating statistics.")
self.logger.debug(pformat(channel_policies))
continue
self.logger.info(f"node statistics done, calculated statistics"
f"for {len(self._node_stats)} nodes")
def _rate_nodes(self):
"""Rate nodes by collected statistics."""
max_capacity = 0
max_num_chan = 0
min_fee_rate = float('inf')
for stats in self._node_stats.values():
max_capacity = max(max_capacity, stats.total_capacity_msat)
max_num_chan = max(max_num_chan, stats.number_channels)
min_fee_rate = min(min_fee_rate, stats.mean_fee_rate)
for n, stats in self._node_stats.items():
heuristics = []
heuristics_weights = []
# Construct an average score which leads to recommendation of nodes
# with low fees, large capacity and reasonable number of channels.
# This is somewhat akin to preferential attachment, but low fee
# nodes are more favored. Here we make a compromise between user
# comfort and decentralization, tending towards user comfort.
# number of channels
heuristics.append(stats.number_channels / max_num_chan)
heuristics_weights.append(0.2)
# total capacity
heuristics.append(stats.total_capacity_msat / max_capacity)
heuristics_weights.append(0.8)
# inverse fees
fees = min(1E-6, min_fee_rate) / max(1E-10, stats.mean_fee_rate)
heuristics.append(fees)
heuristics_weights.append(1.0)
self._node_ratings[n] = weighted_sum(heuristics, heuristics_weights)
def suggest_node_channel_open(self) -> Tuple[bytes, NodeStats]:
node_keys = list(self._node_stats.keys())
node_ratings = list(self._node_ratings.values())
channel_peers = self.lnworker.channel_peers()
node_info: Optional["NodeInfo"] = None
while True:
# randomly pick nodes weighted by node_rating
pk = choices(node_keys, weights=node_ratings, k=1)[0]
# node should have compatible features
node_info = self.network.channel_db.get_node_infos().get(pk, None)
peer_features = LnFeatures(node_info.features)
try:
ln_compare_features(self.lnworker.features, peer_features)
except IncompatibleLightningFeatures as e:
self.logger.info("suggested node is incompatible")
continue
# don't want to connect to nodes we are already connected to
if pk in channel_peers:
continue
# don't want to connect to nodes we already have a channel with on another device
if self.lnworker.has_conflicting_backup_with(pk):
continue
break
alias = node_info.alias if node_info else 'unknown node alias'
self.logger.info(
f"node rating for {alias}:\n"
f"{pformat(self._node_stats[pk])} (score {self._node_ratings[pk]})")
return pk, self._node_stats[pk]
def suggest_peer(self) -> Optional[bytes]:
"""Suggests a LN node to open a channel with.
Returns a node ID (pubkey).
"""
self.maybe_analyze_graph()
if self._node_ratings:
return self.suggest_node_channel_open()[0]
else:
return None
| {
"content_hash": "808f5d3854b5c314627175cf3d0958d5",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 142,
"avg_line_length": 40.98550724637681,
"alnum_prop": 0.6148338048090524,
"repo_name": "wakiyamap/electrum-mona",
"id": "957b5f6eb3fb7e1393280c0b4fc9df9a48722a2d",
"size": "11493",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum_mona/lnrater.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13043"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2162"
},
{
"name": "NSIS",
"bytes": "7779"
},
{
"name": "Python",
"bytes": "4381566"
},
{
"name": "Ruby",
"bytes": "16375"
},
{
"name": "Shell",
"bytes": "100799"
},
{
"name": "kvlang",
"bytes": "67448"
}
],
"symlink_target": ""
} |
from flask import Flask
app = Flask(__name__, static_url_path='/static')
from app import views | {
"content_hash": "ad8e78d612338174bfd12f87a693b993",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 48,
"avg_line_length": 23.75,
"alnum_prop": 0.7263157894736842,
"repo_name": "euri16/dulcerefugio-py",
"id": "34206f5934468648aa3ce2112ebbe2a99cbe2d52",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DulceRefugio/app/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "189245"
},
{
"name": "HTML",
"bytes": "29950"
},
{
"name": "JavaScript",
"bytes": "254940"
},
{
"name": "Python",
"bytes": "598"
}
],
"symlink_target": ""
} |
import re
import json
from xml.etree.ElementTree import Element, SubElement, tostring, fromstring
from vumi.transports.wechat.errors import (
WeChatParserException, WeChatException)
def get_child_value(node, name):
[child] = node.findall(name)
return (child.text.strip() if child.text is not None else '')
def append(node, tag, value):
el = SubElement(node, tag)
el.text = value
class WeChatMessage(object):
mandatory_fields = ()
optional_fields = ()
@classmethod
def from_xml(cls, doc):
params = [get_child_value(doc, name)
for name in cls.mandatory_fields]
for field in cls.optional_fields:
try:
params.append(get_child_value(doc, field))
except ValueError:
# element not present
continue
return cls(*params)
class TextMessage(WeChatMessage):
mandatory_fields = (
'ToUserName',
'FromUserName',
'CreateTime',
'Content',
)
optional_fields = (
'MsgId',
)
def __init__(self, to_user_name, from_user_name, create_time, content,
msg_id=None):
self.to_user_name = to_user_name
self.from_user_name = from_user_name
self.create_time = create_time
self.content = content
self.msg_id = msg_id
@classmethod
def from_vumi_message(cls, message):
md = message['transport_metadata'].get('wechat', {})
from_addr = md.get('ToUserName', message['from_addr'])
return cls(message['to_addr'], from_addr,
message['timestamp'].strftime('%s'),
message['content'])
def to_xml(self):
xml = Element('xml')
append(xml, 'ToUserName', self.to_user_name)
append(xml, 'FromUserName', self.from_user_name)
append(xml, 'CreateTime', self.create_time)
append(xml, 'MsgType', 'text')
append(xml, 'Content', self.content)
return tostring(xml)
def to_json(self):
return json.dumps({
'touser': self.to_user_name,
'msgtype': 'text',
'text': {
'content': self.content,
}
})
class NewsMessage(WeChatMessage):
# Has something URL-ish in it
URLISH = re.compile(
r'(?P<before>.*)'
r'(?P<url>http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+)'
r'(?P<after>.*?)')
def __init__(self, to_user_name, from_user_name, create_time,
items=None):
self.to_user_name = to_user_name
self.from_user_name = from_user_name
self.create_time = create_time
self.items = ([] if items is None else items)
@classmethod
def accepts(cls, vumi_message):
return cls.URLISH.match(vumi_message['content'])
@classmethod
def from_vumi_message(cls, match, vumi_message):
md = vumi_message['transport_metadata'].get('wechat', {})
from_addr = md.get('ToUserName', vumi_message['from_addr'])
url_data = match.groupdict()
return cls(
vumi_message['to_addr'],
from_addr,
vumi_message['timestamp'].strftime('%s'),
[{
'title': '%(before)s' % url_data,
'url': '%(url)s' % url_data,
'description': vumi_message['content']
}])
def to_xml(self):
xml = Element('xml')
append(xml, 'ToUserName', self.to_user_name)
append(xml, 'FromUserName', self.from_user_name)
append(xml, 'CreateTime', self.create_time)
append(xml, 'MsgType', 'news')
append(xml, 'ArticleCount', str(len(self.items)))
articles = SubElement(xml, 'Articles')
for item in self.items:
if not any(item.values()):
raise WeChatException(
'News items must have some values.')
item_element = SubElement(articles, 'item')
if 'title' in item:
append(item_element, 'Title', item['title'])
if 'description' in item:
append(item_element, 'Description', item['description'])
if 'picurl' in item:
append(item_element, 'PicUrl', item['picurl'])
if 'url' in item:
append(item_element, 'Url', item['url'])
return tostring(xml)
def to_json(self):
return json.dumps({
'touser': self.to_user_name,
'msgtype': 'news',
'news': {
'articles': self.items
}
})
class EventMessage(WeChatMessage):
mandatory_fields = (
'ToUserName',
'FromUserName',
'CreateTime',
'Event',
)
optional_fields = (
'MsgId',
'EventKey',
)
def __init__(self, to_user_name, from_user_name, create_time, event,
event_key=None):
self.to_user_name = to_user_name
self.from_user_name = from_user_name
self.create_time = create_time
self.event = event
self.event_key = event_key
class WeChatXMLParser(object):
ENCODING = 'utf-8'
CLASS_MAP = {
'text': TextMessage,
'news': NewsMessage,
'event': EventMessage,
}
@classmethod
def parse(cls, string):
doc = fromstring(string.decode(cls.ENCODING))
klass = cls.get_class(doc)
return klass.from_xml(doc)
@classmethod
def get_class(cls, doc):
msg_types = doc.findall('MsgType')
if not msg_types:
raise WeChatParserException('No MsgType found.')
if len(msg_types) > 1:
raise WeChatParserException('More than 1 MsgType found.')
[msg_type_element] = msg_types
msg_type = msg_type_element.text.strip()
if msg_type not in cls.CLASS_MAP:
raise WeChatParserException(
'Unsupported MsgType: %s' % (msg_type,))
return cls.CLASS_MAP[msg_type]
| {
"content_hash": "d9782b8f94533f38f198ba9ab37951e2",
"timestamp": "",
"source": "github",
"line_count": 207,
"max_line_length": 98,
"avg_line_length": 29.169082125603865,
"alnum_prop": 0.5460417356740642,
"repo_name": "vishwaprakashmishra/xmatrix",
"id": "39e4d1f28c8f5d6a3320d660ca54c09c4392a055",
"size": "6113",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vumi/transports/wechat/message_types.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "29735"
},
{
"name": "JavaScript",
"bytes": "5556"
},
{
"name": "Puppet",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "2968329"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="scattercarpet.textfont", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
| {
"content_hash": "90e0788ec83007c07f9b225969e13884",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 35.75,
"alnum_prop": 0.5891608391608392,
"repo_name": "plotly/plotly.py",
"id": "42020c181ae9a1fb3308d750f8e48c3a47a7d259",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scattercarpet/textfont/_family.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import collections
from oslo_config import cfg
from networking_cisco.plugins.ml2.drivers.cisco.nexus import (
constants as const)
from networking_cisco.plugins.ml2.drivers.cisco.nexus import exceptions
from networking_cisco.plugins.ml2.drivers.cisco.nexus import nexus_db_v2
from networking_cisco.tests.unit.ml2.drivers.cisco.nexus import (
test_cisco_nexus_base)
RP_HOST_NAME_1 = 'UniquePort'
RP_HOST_NAME_2 = 'DuplicateVlan'
RP_HOST_NAME_3 = 'DuplicatePort'
RP_HOST_NAME_DUAL = 'testdualhost'
MAX_REPLAY_COUNT = 4
class TestCiscoNexusReplayResults(
test_cisco_nexus_base.TestCiscoNexusBaseResults):
"""Unit tests driver results for Cisco ML2 Nexus."""
test_results = {
'driver_result_unique_init': (
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 'None')]),
'driver_result_unique_add1': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)]),
'driver_result_unique_add2': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 265)]),
'driver_result_unique_del1': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 265),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265)]),
'driver_result_unique_del2': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
'driver_result_unique_2vlan_replay': (
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', '265,267'),
test_cisco_nexus_base.RESULT_ADD_VLAN.format('265,267')]),
'dupl_vlan_result1_add': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 267)]),
'dupl_vlan_result2_add': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 267)]),
'dupl_vlan_result2_del': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267),
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/20', 267)]),
'dupl_vlan_result_replay': (
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 267),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(267)]),
'dupl_port_result_replay': (
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(267)]),
'switch_up_result_add': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(269),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/3', 269)]),
'switch_up_result_del': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/3', 269),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(269)]),
'switch_restore_result_add': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(269),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/3', 269)]),
'switch_restore_result_replay': (
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/2', 269),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(269)]),
'switch_restore_result_del': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/3', 269),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(269),
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/2', 269),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(269)]),
}
class TestCiscoNexusReplay(test_cisco_nexus_base.TestCiscoNexusReplayBase):
"""Unit tests for Replay of Cisco ML2 Nexus data."""
test_configs = {
'test_replay_unique1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_unique2':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_2,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_duplvlan1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_2,
RP_HOST_NAME_2,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_duplvlan2':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_2,
RP_HOST_NAME_2,
test_cisco_nexus_base.NEXUS_PORT_2,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_duplport1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_3,
RP_HOST_NAME_3,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_duplport2':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_3,
RP_HOST_NAME_3,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_dual':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_DUAL,
RP_HOST_NAME_DUAL,
test_cisco_nexus_base.NEXUS_DUAL1,
test_cisco_nexus_base.INSTANCE_DUAL,
test_cisco_nexus_base.VLAN_ID_DUAL,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_dual2':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_DUAL2,
RP_HOST_NAME_DUAL,
test_cisco_nexus_base.NEXUS_DUAL2,
test_cisco_nexus_base.INSTANCE_DUAL,
test_cisco_nexus_base.VLAN_ID_DUAL,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
'test_replay_vxlan_unique1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.VXLAN_ID,
test_cisco_nexus_base.MCAST_GROUP,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
}
test_configs = collections.OrderedDict(sorted(test_configs.items()))
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
cfg.CONF.set_override('nexus_driver', 'ncclient', 'ml2_cisco')
cfg.CONF.set_override('never_cache_ssh_connection', False, 'ml2_cisco')
super(TestCiscoNexusReplay, self).setUp()
self.results = TestCiscoNexusReplayResults()
def test_replay_unique_ports(self):
"""Provides replay data and result data for unique ports. """
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_add1'),
'nbr_db_entries': 1}
second_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_add2'),
'nbr_db_entries': 2}
first_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_del1'),
'nbr_db_entries': 1}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_del2'),
'nbr_db_entries': 0}
self._process_replay(
'test_replay_unique1',
'test_replay_unique2',
self.results.get_test_results(
'driver_result_unique_init'),
first_add,
second_add,
self.results.get_test_results(
'driver_result_unique_2vlan_replay'),
first_del,
second_del)
def test_replay_duplicate_vlan(self):
"""Provides replay data and result data for duplicate vlans. """
first_add = {
'driver_results': self.results.get_test_results(
'dupl_vlan_result1_add'),
'nbr_db_entries': 2}
# TODO(caboucha)
# 'driver_result': [], until the correct fix for
# the following issue is resolved.
# https://review.openstack.org/#/c/241216/
second_add = {
'driver_results': self.results.get_test_results(
'dupl_vlan_result2_add'),
'nbr_db_entries': 4}
first_del = {'driver_results': [],
'nbr_db_entries': 2}
second_del = {
'driver_results': self.results.get_test_results(
'dupl_vlan_result2_del'),
'nbr_db_entries': 0}
self._process_replay('test_replay_duplvlan1',
'test_replay_duplvlan2',
[],
first_add, second_add,
self.results.get_test_results(
'dupl_vlan_result_replay'),
first_del, second_del)
def test_replay_duplicate_ports(self):
"""Provides replay data and result data for duplicate ports. """
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_add1'),
'nbr_db_entries': 1}
# TODO(caboucha)
# 'driver_result': [], until the correct fix for
# the following issue is resolved.
# https://review.openstack.org/#/c/241216/
second_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_add1'),
'nbr_db_entries': 2}
first_del = {'driver_results': [],
'nbr_db_entries': 1}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_del2'),
'nbr_db_entries': 0}
self._process_replay('test_replay_duplport1',
'test_replay_duplport2',
[],
first_add, second_add,
self.results.get_test_results(
'dupl_port_result_replay'),
first_del, second_del)
def test_replay_enable_vxlan_feature_failure(self):
"""Verifies exception during enable VXLAN feature driver. """
# Set configuration variable to add/delete the VXLAN global nexus
# switch values.
cfg.CONF.set_override('vxlan_global_config', True, 'ml2_cisco')
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'feature nv overlay vn-segment-vlan-based',
'test_replay_vxlan_unique1',
__name__)
def test_replay_disable_vxlan_feature_failure(self):
"""Verifies exception during disable VXLAN feature driver. """
# Set configuration variable to add/delete the VXLAN global nexus
# switch values.
cfg.CONF.set_override('vxlan_global_config', True, 'ml2_cisco')
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'no feature nv overlay vn-segment-vlan-based',
'test_replay_vxlan_unique1',
__name__)
def test_replay_create_nve_member_failure(self):
"""Verifies exception during create nve member driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'member vni mcast-group',
'test_replay_vxlan_unique1',
__name__)
def test_replay_delete_nve_member_failure(self):
"""Verifies exception during delete nve member driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'no member vni',
'test_replay_vxlan_unique1',
__name__)
def test_replay_create_vlan_failure(self):
"""Verifies exception during edit vlan create driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan-id-create-delete',
'test_replay_unique1',
__name__)
def test_replay_delete_vlan_failure(self):
"""Verifies exception during edit vlan delete driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'vlan-id-create-delete no vlan 267',
'test_replay_unique1',
__name__)
def test_replay_create_trunk_failure(self):
"""Verifies exception during create trunk interface driver. """
self._create_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed vlan_id 267',
'test_replay_unique1',
__name__)
def test_replay_delete_trunk_failure(self):
"""Verifies exception during delete trunk interface driver. """
self._delete_port_failure(
'connect.return_value.edit_config.side_effect',
'switchport trunk allowed remove vlan 267',
'test_replay_unique1',
__name__)
def test_replay_new_port_success_if_one_switch_up(self):
"""Verifies create port successful if one multi-switch up."""
# Make sure port is not rejected when there are multiple
# switches and only one is active.
port_cfg1 = self.test_configs['test_replay_dual']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg1.nexus_ip_addr, const.SWITCH_ACTIVE)
port_cfg2 = self.test_configs['test_replay_dual2']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg2.nexus_ip_addr, const.SWITCH_INACTIVE)
# Set-up successful creation of port vlan config
self._basic_create_verify_port_vlan('test_replay_dual',
self.results.get_test_results(
'switch_up_result_add'),
nbr_of_bindings=1)
# Even though 2nd entry is inactive, there should be
# a data base entry configured for it.
# 2 = One entry for port the other for reserved binding
self.assertEqual(
2, len(nexus_db_v2.get_nexusport_switch_bindings(
port_cfg2.nexus_ip_addr)))
# Clean-up the port entry
self._basic_delete_verify_port_vlan('test_replay_dual',
self.results.get_test_results(
'switch_up_result_del'),
nbr_of_bindings=0)
def test_replay_port_success_if_one_switch_restored(self):
"""Verifies port restored after one of multi-switch restored."""
# Make sure port is not rejected when there are multiple
# switches and one is active. Then proceed to bring-up
# the other switch and it gets configured successfully.
# Then remove all.
port_cfg1 = self.test_configs['test_replay_dual']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg1.nexus_ip_addr, const.SWITCH_ACTIVE)
port_cfg2 = self.test_configs['test_replay_dual2']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg2.nexus_ip_addr, const.SWITCH_INACTIVE)
# Set-up successful creation of port vlan config
self._basic_create_verify_port_vlan('test_replay_dual',
self.results.get_test_results(
'switch_restore_result_add'),
nbr_of_bindings=1)
# Even though 2nd entry is inactive, there should be
# a data base entry configured for it.
# 2 = One entry for port the other for reserved binding
self.assertEqual(
2, len(nexus_db_v2.get_nexusport_switch_bindings(
port_cfg2.nexus_ip_addr)))
# Restore port data for that switch
self._cfg_monitor.check_connections()
self._verify_results(
self.results.get_test_results(
'switch_restore_result_replay'))
# Clear mock_call history.
self.mock_ncclient.reset_mock()
# Clean-up the port entries
self._basic_delete_verify_port_vlan('test_replay_dual',
self.results.get_test_results(
'switch_restore_result_del'),
nbr_of_bindings=0)
def test_replay_create_fails_if_single_switch_down(self):
"""Verifies port create fails if switch down."""
# Make sure create ethernet config fails when the
# switch state is inactive.
port_cfg = self.test_configs['test_replay_unique1']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
port_context = self._generate_port_context(port_cfg)
self.assertRaises(
exceptions.NexusConnectFailed,
self._cisco_mech_driver.create_port_postcommit,
port_context)
def test_replay_update_fails_if_single_switch_down(self):
"""Verifies port update fails if switch down."""
# Make sure update ethernet config fails when the
# switch state is inactive.
port_cfg = self.test_configs['test_replay_unique1']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
port_context = self._generate_port_context(port_cfg)
self.assertRaises(
exceptions.NexusConnectFailed,
self._cisco_mech_driver.update_port_postcommit,
port_context)
def test_replay_delete_success_if_switch_down(self):
"""Verifies port delete success if switch down."""
# Make sure delete config successful even when the
# switch state is inactive.
port_cfg = self.test_configs['test_replay_unique1']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_ACTIVE)
self._basic_create_verify_port_vlan('test_replay_unique1',
self.results.get_test_results(
'driver_result_unique_add1'))
# Make switch inactive before delete
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
# Clean-up the port entry
self._basic_delete_verify_port_vlan('test_replay_unique1',
[], nbr_of_bindings=0)
def test_replay_get_nexus_type_failure_two_switches(self):
"""Verifies exception during ncclient get inventory. """
# There are two switches, one active and the other inactive.
# Make sure 'get_nexus_type' fails so create_port_postcommit()
# will return an exception. 'get_nexus_type' is used as
# as ping so even if the switch is marked active then double
# check it is indeed still active. If not and thre are no
# other active switches, then raise exception.
port_cfg1 = self.test_configs['test_replay_dual']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg1.nexus_ip_addr, const.SWITCH_ACTIVE)
port_cfg2 = self.test_configs['test_replay_dual2']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg2.nexus_ip_addr, const.SWITCH_INACTIVE)
# Set-up so get_nexus_type driver fails on active switch
config = {'connect.return_value.get.side_effect':
self._config_side_effects_on_count('show inventory',
Exception(__name__))}
self.mock_ncclient.configure_mock(**config)
port_context = self._generate_port_context(port_cfg1)
self.assertRaises(
exceptions.NexusConnectFailed,
self._cisco_mech_driver.create_port_postcommit,
port_context)
def test_replay_get_nexus_type_failure(self):
"""Verifies exception during get nexus_type while replaying. """
# Set switch state to False so replay config will start.
# This should not affect user configuration.
port_cfg = self.test_configs['test_replay_unique1']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_ACTIVE)
# Set-up successful creation of port vlan config
self._basic_create_verify_port_vlan('test_replay_unique1',
self.results.get_test_results(
'driver_result_unique_add1'))
# Set-up so get_nexus_type driver fails
config = {'connect.return_value.get.side_effect':
self._config_side_effects_on_count('show inventory',
Exception(__name__))}
self.mock_ncclient.configure_mock(**config)
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
# Perform replay which should not send back exception
# but merely quit
self._cfg_monitor.check_connections()
# Since get of nexus_type failed, there should be
# no attempt to configure anything.
self._verify_results([])
# Clean-up the port entry
self._basic_delete_verify_port_vlan('test_replay_unique1',
[])
def test_replay_create_vlan_failure_during_replay(self):
"""Verifies exception during create vlan while replaying. """
vlan267 = test_cisco_nexus_base.RESULT_ADD_VLAN.format(267)
driver_result1 = [vlan267] * 2
# Set switch state to False so replay config will start.
# This should not affect user configuration.
port_cfg = self.test_configs['test_replay_unique1']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_ACTIVE)
# Set-up successful creation of port vlan config
self._basic_create_verify_port_vlan('test_replay_unique1',
self.results.get_test_results(
'driver_result_unique_add1'))
# Set-up exception during create_vlan
config = {'connect.return_value.edit_config.side_effect':
self._config_side_effects_on_count(
'vlan-id-create-delete',
Exception(__name__ + '1'))}
self.mock_ncclient.configure_mock(**config)
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
# Perform replay which should not send back exception
# but merely quit
self._cfg_monitor.check_connections()
# Verify that switch is put back into INACTIVE state
self.assertEqual(
const.SWITCH_INACTIVE,
self._cisco_mech_driver.get_switch_ip_and_active_state(
port_cfg.nexus_ip_addr))
# The edit of create_vlan failed, but there will
# be 2 create vlan attempts in mock call history.
result_replay = (
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)] + driver_result1)
self._verify_results(result_replay)
# Clear the edit driver exception for next test.
config = {'connect.return_value.edit_config.side_effect':
None}
self.mock_ncclient.configure_mock(**config)
# Perform replay which should not send back exception
# but merely quit
self._cfg_monitor.check_connections()
# Verify that switch is in ACTIVE state
self.assertEqual(
const.SWITCH_ACTIVE,
self._cisco_mech_driver.get_switch_ip_and_active_state(
port_cfg.nexus_ip_addr))
# Clear mock_call history.
self.mock_ncclient.reset_mock()
# Clean-up the port entry
self._basic_delete_verify_port_vlan('test_replay_unique1',
self.results.get_test_results(
'driver_result_unique_del2'))
def test_replay_vlan_batch_failure_during_replay(self):
"""Verifies handling of batch vlan during replay."""
tmp_cfg = self.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC)
self._cisco_mech_driver.set_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr, const.SWITCH_ACTIVE)
# Create a batch of port entries with unique vlans
num_vlans = const.CREATE_VLAN_BATCH + 10
for x in range(num_vlans):
instance_id = test_cisco_nexus_base.INSTANCE_1 + '-' + str(x)
new_cfg = tmp_cfg._replace(
vlan_id=test_cisco_nexus_base.VLAN_ID_1 + x,
instance_id=instance_id)
self._create_port(new_cfg)
# Put it back to inactive state
self._cisco_mech_driver.set_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
self._cfg_monitor.check_connections()
# Verify the switch is in restore stage 2 state
self.assertEqual(
const.SWITCH_RESTORE_S2,
self._cisco_mech_driver.get_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr))
config = {'connect.return_value.edit_config.side_effect':
self._config_side_effects_on_count(
'vlan-id-create-delete',
Exception(__name__ + '1'))}
self.mock_ncclient.configure_mock(**config)
# Call check_connections() again to attempt to send
# last batch of 10 which should fail
self._cfg_monitor.check_connections()
# Verify the switch is back in INACTIVE state
self.assertEqual(
const.SWITCH_INACTIVE,
self._cisco_mech_driver.get_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr))
# Clear mock_call history.
self.mock_ncclient.reset_mock()
# Clear the edit driver exception for next test.
config = {'connect.return_value.edit_config.side_effect':
None}
self.mock_ncclient.configure_mock(**config)
# Call check_connections() again to restart restore
self._cfg_monitor.check_connections()
# Verify the switch is in restore stage 2 state
self.assertEqual(
const.SWITCH_RESTORE_S2,
self._cisco_mech_driver.get_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr))
# Call check_connections() to successfully send
# last batch of 10 which should fail
self._cfg_monitor.check_connections()
# Verify the switch is in restore stage 2 state
self.assertEqual(
const.SWITCH_ACTIVE,
self._cisco_mech_driver.get_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr))
def test_replay_no_retry_failure_handling(self):
"""Tests to check replay 'no retry' failure handling.
1) Verify config_failure is incremented upon failure during
replay config and verify create_vlan transactions are seen.
2) Verify contact_failure is incremented upon failure during
get_nexus_type transaction.
3) Verify receipt of new transaction does not reset
failure statistics.
4) Verify config&contact_failure is reset when replay is
successful.
"""
# Due to 2 retries in driver to deal with stale ncclient
# handle, the results are doubled.
vlan267 = '<vlan-id-create-delete\>\s+\<__XML__PARAM_value\>267'
driver_result2 = ([test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)] + [vlan267] * 2) * 4
config_replay = MAX_REPLAY_COUNT
port_cfg = self.test_configs['test_replay_unique1']
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_ACTIVE)
# Set-up successful creation of port vlan config
self._basic_create_verify_port_vlan(
'test_replay_unique1',
self.results.get_test_results(
'driver_result_unique_add1'))
# Test 1:
# Set the edit create vlan driver exception
# Perform replay MAX_REPLAY_COUNT times
# This should not roll-up an exception but merely quit
# and increment FAIL_CONFIG statistics
config = {'connect.return_value.edit_config.side_effect':
self._config_side_effects_on_count(
'vlan-id-create-delete',
Exception(__name__ + '1'))}
self.mock_ncclient.configure_mock(**config)
self._cisco_mech_driver.set_switch_ip_and_active_state(
port_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
for i in range(config_replay):
self._cfg_monitor.check_connections()
# Verify FAIL_CONFIG reached(MAX_REPLAY_COUNT) and there
# were only MAX_REPLAY_COUNT+1 attempts to send create_vlan.
# first is from test_replay_create_vlan_failure()
# and MAX_REPLAY_COUNT from check_connections()
self.assertEqual(
config_replay,
self._cisco_mech_driver.get_switch_replay_failure(
const.FAIL_CONFIG,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
self._verify_results(driver_result2)
# Verify there exists a single port binding
# plus 1 for reserved switch entry
self.assertEqual(
2, len(nexus_db_v2.get_nexusport_switch_bindings(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1)))
# Clear mock_call history.
self.mock_ncclient.reset_mock()
# Clear the edit driver exception for next test.
config = {'connect.return_value.edit_config.side_effect':
None}
self.mock_ncclient.configure_mock(**config)
# Test 2)
# Set it up so get nexus type returns exception.
# FAIL_CONTACT should increment.
self._set_nexus_type_failure()
# Perform replay MAX_REPLAY_COUNT times
# This should not roll-up an exception but merely quit
for i in range(config_replay):
self._cfg_monitor.check_connections()
# Verify switch FAIL_CONTACT reached (MAX_REPLAY_COUNT)
# and there were no attempts to send create_vlan.
self.assertEqual(
config_replay,
self._cisco_mech_driver.get_switch_replay_failure(
const.FAIL_CONFIG,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
self.assertEqual(
config_replay,
self._cisco_mech_driver.get_switch_replay_failure(
const.FAIL_CONTACT,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
self._verify_results([])
# Test 3)
# Verify delete transaction doesn't affect failure stats.
self._basic_delete_verify_port_vlan('test_replay_unique1',
[])
# Verify failure stats is not reset
self.assertEqual(
config_replay,
self._cisco_mech_driver.get_switch_replay_failure(
const.FAIL_CONFIG,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
self.assertEqual(
config_replay,
self._cisco_mech_driver.get_switch_replay_failure(
const.FAIL_CONTACT,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
# Clear the get nexus type driver exception.
config = {'connect.return_value.get.side_effect':
None}
self.mock_ncclient.configure_mock(**config)
# Test 4)
# Verify config&contact_failure is reset when replay is
# successful.
# Perform replay once which will be successful causing
# failure stats to be reset to 0.
# Then verify these stats are indeed 0.
self._cfg_monitor.check_connections()
self.assertEqual(
0,
self._cisco_mech_driver.get_switch_replay_failure(
const.FAIL_CONFIG,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
self.assertEqual(
0,
self._cisco_mech_driver.get_switch_replay_failure(
const.FAIL_CONTACT,
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
# Verify switch state is now active following successful replay.
self.assertEqual(
const.SWITCH_ACTIVE,
self._cisco_mech_driver.get_switch_ip_and_active_state(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1))
class TestCiscoNexusBaremetalReplayResults(
test_cisco_nexus_base.TestCiscoNexusBaseResults):
"""Unit tests driver results for Cisco ML2 Nexus."""
test_results = {
'driver_result_unique_eth_init': (
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 'None')],
[test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 'None')],
),
'driver_result_unique_eth_add1': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('ethernet', '1\/10', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267))]),
'driver_result_unique_eth_add2': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('ethernet', '1\/20', 265) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 265))]),
'driver_result_unique_eth_del1': (
[(test_cisco_nexus_base.RESULT_DEL_NATIVE_INTERFACE.
format('ethernet', '1\/20') +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/20', 265)),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265)]),
'driver_result_unique_eth_del2': (
[(test_cisco_nexus_base.RESULT_DEL_NATIVE_INTERFACE.
format('ethernet', '1\/10') +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 267)),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
'driver_result_unique_2if_replay': (
[(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('ethernet', '1\/10', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('ethernet', '1\/20', 265) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/20', 265)),
test_cisco_nexus_base.RESULT_ADD_VLAN.format('265,267')]),
'driver_result_unique_eth_add_vm': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 265)]),
'driver_result_unique_eth_del_vm': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('ethernet', '1\/10', 265),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265)]),
'driver_result_unique_2vlan_replay': (
[(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('ethernet', '1\/10', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 267)),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('ethernet', '1\/10', 265),
test_cisco_nexus_base.RESULT_ADD_VLAN.format('265,267')]),
'driver_result_unique_vPC_2switch_add1': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '469', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '469', 267)),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '469', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '469', 267))]),
'driver_result_unique_vPC_2switch_del1': (
[(test_cisco_nexus_base.RESULT_DEL_NATIVE_INTERFACE.
format('port-channel', '469') +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('port-channel', '469', 267)),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267),
(test_cisco_nexus_base.RESULT_DEL_NATIVE_INTERFACE.
format('port-channel', '469') +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('port-channel', '469', 267)),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
'driver_result_unique_vPC_2if_replay': (
[(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '469', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '469', 267)),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '469', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '469', 267)),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(267)]),
'driver_result_unique_vPC470_add1': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '470', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', 267)),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(267),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '470', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', 267))]),
'driver_result_unique_vPC470_add2': (
[test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', 265),
test_cisco_nexus_base.RESULT_ADD_VLAN.format(265),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', 265)]),
'driver_result_unique_vPC470_del1': (
[test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('port-channel', '470', 265),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265),
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('port-channel', '470', 265),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(265)]),
'driver_result_unique_vPC470_del2': (
[(test_cisco_nexus_base.RESULT_DEL_NATIVE_INTERFACE.
format('port-channel', '470') +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('port-channel', '470', 267)),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267),
(test_cisco_nexus_base.RESULT_DEL_NATIVE_INTERFACE.
format('port-channel', '470') +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_DEL_INTERFACE.
format('port-channel', '470', 267)),
test_cisco_nexus_base.RESULT_DEL_VLAN.format(267)]),
'driver_result_unique_vPC470_2vlan_replay': (
[(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '470', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', '267')),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', '265,267'),
test_cisco_nexus_base.RESULT_ADD_VLAN.format('265,267'),
(test_cisco_nexus_base.RESULT_ADD_NATIVE_INTERFACE.
format('port-channel', '470', 267) +
'[\x00-\x7f]+' +
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', '267')),
test_cisco_nexus_base.RESULT_ADD_INTERFACE.
format('port-channel', '470', '265,267'),
test_cisco_nexus_base.RESULT_ADD_VLAN.format('265,267')]),
}
class TestCiscoNexusBaremetalReplay(
test_cisco_nexus_base.TestCiscoNexusReplayBase):
"""Unit tests for Replay of Cisco ML2 Nexus data."""
baremetal_profile = {
"local_link_information": [
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
"switch_info": {
"switch_ip": test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
},
},
]
}
baremetal_profile2 = {
"local_link_information": [
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_2,
"switch_info": {
"switch_ip": test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
},
},
]
}
baremetal_profile_vPC = {
"local_link_information": [
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
"switch_info": {
"switch_ip": test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
},
},
{
"port_id": test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_2,
"switch_info": {
"switch_ip": test_cisco_nexus_base.NEXUS_IP_ADDRESS_2,
},
},
]
}
test_configs = {
'test_replay_unique1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
None,
test_cisco_nexus_base.HOST_NAME_UNUSED,
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_BAREMETAL,
baremetal_profile,
test_cisco_nexus_base.HOST_NAME_Baremetal + '1',
test_cisco_nexus_base.BAREMETAL_VNIC),
'test_replay_unique2':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
None,
test_cisco_nexus_base.HOST_NAME_UNUSED,
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_2,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_2,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_BAREMETAL,
baremetal_profile2,
test_cisco_nexus_base.HOST_NAME_Baremetal + '1',
test_cisco_nexus_base.BAREMETAL_VNIC),
'test_replay_unique_vPC':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
None,
test_cisco_nexus_base.HOST_NAME_UNUSED,
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_BAREMETAL,
baremetal_profile_vPC,
test_cisco_nexus_base.HOST_NAME_Baremetal + '1',
test_cisco_nexus_base.BAREMETAL_VNIC),
'test_config_vm':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
test_cisco_nexus_base.HOST_NAME_Baremetal + '1',
test_cisco_nexus_base.NEXUS_BAREMETAL_PORT_1,
test_cisco_nexus_base.INSTANCE_2,
test_cisco_nexus_base.VLAN_ID_2,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
}
test_configs = collections.OrderedDict(sorted(test_configs.items()))
def setUp(self):
"""Sets up mock ncclient, and switch and credentials dictionaries."""
cfg.CONF.set_override('nexus_driver', 'ncclient', 'ml2_cisco')
cfg.CONF.set_override('never_cache_ssh_connection', False, 'ml2_cisco')
super(TestCiscoNexusBaremetalReplay, self).setUp()
self.results = TestCiscoNexusBaremetalReplayResults()
def _init_port_channel(self, ch_grp, which=1):
# with Baremetal config when enet interface associated to port-channel,
# the port-channel interface is configured instead. This config
# causes this to happen.
data_xml = {'connect.return_value.get.return_value.data_xml':
'switchport trunk allowed vlan none\n'
'channel-group ' + str(ch_grp) + ' mode active'}
self.mock_ncclient.configure_mock(**data_xml)
def test_replay_unique_ethernet_ports(self):
"""Provides replay data and result data for unique ports. """
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_add1'),
'nbr_db_entries': 1}
second_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_add2'),
'nbr_db_entries': 2}
first_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_del1'),
'nbr_db_entries': 1,
'nbr_db_mappings': 0}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_del2'),
'nbr_db_entries': 0}
self._process_replay(
'test_replay_unique1',
'test_replay_unique2',
self.results.get_test_results(
'driver_result_unique_eth_init'),
first_add,
second_add,
self.results.get_test_results(
'driver_result_unique_2if_replay'),
first_del,
second_del)
def test_replay_unique_ethernet_port_and_vm(self):
"""Provides replay data and result data for unique ports. """
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_add1'),
'nbr_db_entries': 1}
second_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_add_vm'),
'nbr_db_entries': 2}
first_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_del_vm'),
'nbr_db_entries': 1}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_eth_del2'),
'nbr_db_entries': 0}
self._process_replay(
'test_replay_unique1',
'test_config_vm',
self.results.get_test_results(
'driver_result_unique_eth_init'),
first_add,
second_add,
self.results.get_test_results(
'driver_result_unique_2vlan_replay'),
first_del,
second_del)
def test_replay_unique_vPC_ports(self):
"""Provides replay data and result data for unique ports. """
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC_2switch_add1'),
'nbr_db_entries': 2}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC_2switch_del1'),
'nbr_db_entries': 0}
self._init_port_channel(469)
self._process_replay(
'test_replay_unique_vPC',
None,
[],
first_add,
None,
self.results.get_test_results(
'driver_result_unique_vPC_2if_replay'),
None,
second_del)
def test_replay_unique_vPC_ports_and_vm(self):
"""Provides replay data and result data for unique ports. """
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC470_add1'),
'nbr_db_entries': 2}
second_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC470_add2'),
'nbr_db_entries': 4}
first_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC470_del1'),
'nbr_db_entries': 2}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC470_del2'),
'nbr_db_entries': 0}
self._init_port_channel(470)
self._process_replay(
'test_replay_unique_vPC',
'test_config_vm',
[],
first_add,
second_add,
self.results.get_test_results(
'driver_result_unique_vPC470_2vlan_replay'),
first_del,
second_del)
def test_replay_unique_vPC_ports_chg_vPC_nbr(self):
"""Persist with learned channel group even if it changed."""
def replay_init():
# This causes port-channel 469 will persist instead.
# We will not relearn
self._init_port_channel(470)
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC_2switch_add1'),
'nbr_db_entries': 2}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC_2switch_del1'),
'nbr_db_entries': 0}
self._init_port_channel(469)
self._process_replay(
'test_replay_unique_vPC',
None,
[],
first_add,
None,
self.results.get_test_results(
'driver_result_unique_vPC_2if_replay'),
None,
second_del,
replay_init)
def test_replay_unique_vPC_ports_chg_to_enet(self):
"""Persist with learned channel group even if it was removed."""
def replay_init():
# This causes port-channel to get replaced with enet
# by eliminating channel-group config from enet config.
if cfg.CONF.ml2_cisco.nexus_driver == 'restapi':
self.restapi_mock_init()
else:
self.mock_init()
first_add = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC_2switch_add1'),
'nbr_db_entries': 2}
second_del = {
'driver_results': self.results.get_test_results(
'driver_result_unique_vPC_2switch_del1'),
'nbr_db_entries': 0}
self._init_port_channel(469)
self._process_replay(
'test_replay_unique_vPC',
None,
[],
first_add,
None,
self.results.get_test_results(
'driver_result_unique_vPC_2if_replay'),
None,
second_del,
replay_init)
class TestCiscoNexusNonCachedSshReplay(
test_cisco_nexus_base.TestCiscoNexusReplayBase):
"""Unit tests for Replay of Cisco ML2 Nexus data."""
# Testing new default of True for config var 'never_cache_ssh_connection'
test_configs = {
'test_replay_unique1':
test_cisco_nexus_base.TestCiscoNexusBase.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC),
}
def setUp(self):
cfg.CONF.set_override('nexus_driver', 'ncclient', 'ml2_cisco')
super(TestCiscoNexusNonCachedSshReplay, self).setUp()
self.mock_ncclient.reset_mock()
def test_basic_replay_NonCacheSsh(self):
"""Basic none cached ssh replay test."""
tmp_cfg = self.TestConfigObj(
test_cisco_nexus_base.NEXUS_IP_ADDRESS_1,
RP_HOST_NAME_1,
test_cisco_nexus_base.NEXUS_PORT_1,
test_cisco_nexus_base.INSTANCE_1,
test_cisco_nexus_base.VLAN_ID_1,
test_cisco_nexus_base.NO_VXLAN_ID,
None,
test_cisco_nexus_base.DEVICE_OWNER_COMPUTE,
{},
None,
test_cisco_nexus_base.NORMAL_VNIC)
self._cisco_mech_driver.set_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr, const.SWITCH_ACTIVE)
self.mock_ncclient.reset_mock()
# Create a batch of port entries with unique vlans
num_vlans = 10
for x in range(num_vlans):
instance_id = test_cisco_nexus_base.INSTANCE_1 + '-' + str(x)
new_cfg = tmp_cfg._replace(
vlan_id=test_cisco_nexus_base.VLAN_ID_1 + x,
instance_id=instance_id)
self._create_port(new_cfg)
self.assertEqual(20, self.mock_ncclient.connect.call_count)
self.assertEqual(20,
self.mock_ncclient.connect.return_value.
close_session.call_count)
self.mock_ncclient.reset_mock()
# Put it back to inactive state
self._cisco_mech_driver.set_switch_ip_and_active_state(
tmp_cfg.nexus_ip_addr, const.SWITCH_INACTIVE)
self._cfg_monitor.check_connections()
self.assertEqual(2, self.mock_ncclient.connect.call_count)
self.assertEqual(2,
self.mock_ncclient.connect.return_value.
close_session.call_count)
| {
"content_hash": "bd2031df189774cf9e2a37ee2bb91a7a",
"timestamp": "",
"source": "github",
"line_count": 1472,
"max_line_length": 79,
"avg_line_length": 40.239809782608695,
"alnum_prop": 0.5654618202691067,
"repo_name": "Tehsmash/networking-cisco",
"id": "467179a2fe65ebcd803dc32d1e8756d3617f47e2",
"size": "59850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_cisco/tests/unit/ml2/drivers/cisco/nexus/test_cisco_nexus_replay.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1043"
},
{
"name": "Python",
"bytes": "3715465"
},
{
"name": "Shell",
"bytes": "35749"
}
],
"symlink_target": ""
} |
"""
@package mi.dataset.parser.ctdmo_ghqr_sio
@file mi/dataset/parser/ctdmo_ghqr_sio.py
@author Emily Hahn (original telemetered), Steve Myerson (recovered)
@brief A CTDMO series ghqr specific data set agent parser
This file contains code for the CTDMO parsers and code to produce data particles.
For telemetered data, there is one parser which produces two data particles.
For recovered data, there are two parsers, with each parser producing one data particle.
There are two types of CTDMO data.
CT, aka instrument, sensor or science data.
CO, aka offset data.
For telemetered data, both types (CT, CO) of data are in SIO Mule files.
For recovered data, the CT data is stored in a separate file.
Additionally, both CT and CO data are stored in another file (SIO Controller file),
both CO and CT data in the SIO Controller file is processed here.
"""
import datetime
__author__ = 'Emily Hahn'
__license__ = 'Apache 2.0'
import binascii
import re
import struct
from mi.dataset.parser.utilities import zulu_timestamp_to_ntp_time
from mi.core.log import get_logger
log = get_logger()
from mi.dataset.dataset_parser import SimpleParser
from mi.dataset.parser.sio_mule_common import \
SioParser, \
SIO_HEADER_MATCHER, \
SIO_HEADER_GROUP_ID, \
SIO_HEADER_GROUP_TIMESTAMP
from mi.core.common import BaseEnum
from mi.core.exceptions import \
DatasetParserException, \
RecoverableSampleException, \
SampleException, \
UnexpectedDataException
from mi.core.instrument.dataset_data_particle import DataParticle
ID_INSTRUMENT = 'CT' # ID for instrument (science) data
ID_OFFSET = 'CO' # ID for time offset data
# Recovered CT file format (file is ASCII, lines separated by new line):
# Several lines of unformatted ASCII text and key value pairs (ignored here)
# Configuration information in XML format (only serial number is of interest)
# *END* record (IDD says *end* so we'll check for either)
# Instrument data in HEX ASCII (need to extract these values)
NEW_LINE = r'[\n\r]+' # Handle any type of new line
REC_CT_RECORD = r'.*' # Any number of ASCII characters
REC_CT_RECORD += NEW_LINE # separated by a new line
REC_CT_RECORD_MATCHER = re.compile(REC_CT_RECORD)
# For Recovered CT files, the serial number is in the Configuration XML section.
REC_CT_SERIAL_REGEX = r'^' # At the beginning of the record
REC_CT_SERIAL_REGEX += r'\* <HardwareData DeviceType=\'SBE37-IM\' SerialNumber=\''
REC_CT_SERIAL_REGEX += r'(\d+)' # Serial number is any number of digits
REC_CT_SERIAL_REGEX += r'\'>' # the rest of the XML syntax
REC_CT_SERIAL_MATCHER = re.compile(REC_CT_SERIAL_REGEX)
# The REC_CT_SERIAL_MATCHER produces the following group:
REC_CT_SERIAL_GROUP_SERIAL_NUMBER = 1
# The end of the Configuration XML section is denoted by a *END* record.
REC_CT_CONFIGURATION_END = r'^' # At the beginning of the record
REC_CT_CONFIGURATION_END += r'\*END\*' # *END*
REC_CT_CONFIGURATION_END += NEW_LINE # separated by a new line
REC_CT_CONFIGURATION_END_MATCHER = re.compile(REC_CT_CONFIGURATION_END)
# Recovered CT Data record (hex ascii):
REC_CT_SAMPLE_BYTES = 31 # includes record separator
REC_CT_REGEX = b'([0-9a-fA-F]{6})' # Temperature
REC_CT_REGEX += b'([0-9a-fA-F]{6})' # Conductivity
REC_CT_REGEX += b'([0-9a-fA-F]{6})' # Pressure
REC_CT_REGEX += b'([0-9a-fA-F]{4})' # Pressure Temperature
REC_CT_REGEX += b'([0-9a-fA-F]{8})' # Time since Jan 1, 2000
REC_CT_REGEX += NEW_LINE # separated by a new line
REC_CT_MATCHER = re.compile(REC_CT_REGEX)
# The REC_CT_MATCHER produces the following groups:
REC_CT_GROUP_TEMPERATURE = 1
REC_CT_GROUP_CONDUCTIVITY = 2
REC_CT_GROUP_PRESSURE = 3
REC_CT_GROUP_PRESSURE_TEMP = 4
REC_CT_GROUP_TIME = 5
# Telemetered CT Data record (binary):
TEL_CT_RECORD_END = b'\x0D' # records separated by a new line
TEL_CT_SAMPLE_BYTES = 13 # includes record separator
TEL_CT_REGEX = b'([\x00-\xFF])' # Inductive ID
TEL_CT_REGEX += b'([\x00-\xFF]{7})' # Temperature, Conductivity, Pressure (reversed)
TEL_CT_REGEX += b'([\x00-\xFF]{4})' # Time since Jan 1, 2000 (bytes reversed)
TEL_CT_REGEX += TEL_CT_RECORD_END # CT Record separator
TEL_CT_MATCHER = re.compile(TEL_CT_REGEX)
# The TEL_CT_MATCHER produces the following groups:
TEL_CT_GROUP_ID = 1
TEL_CT_GROUP_SCIENCE_DATA = 2
TEL_CT_GROUP_TIME = 3
# Recovered and Telemetered CO Data record (binary):
CO_RECORD_END = b'[\x13|\x0D]' # records separated by sentinel 0x13 or 0x0D
CO_SAMPLE_BYTES = 6
CO_REGEX = b'([\x00-\xFF])' # Inductive ID
CO_REGEX += b'([\x00-\xFF]{4})' # Time offset in seconds
CO_REGEX += CO_RECORD_END # CO Record separator
CO_MATCHER = re.compile(CO_REGEX)
# The CO_MATCHER produces the following groups:
CO_GROUP_ID = 1
CO_GROUP_TIME_OFFSET = 2
# Indices into raw_data tuples for recovered CT data
RAW_INDEX_REC_CT_ID = 0
RAW_INDEX_REC_CT_SERIAL = 1
RAW_INDEX_REC_CT_TEMPERATURE = 2
RAW_INDEX_REC_CT_CONDUCTIVITY = 3
RAW_INDEX_REC_CT_PRESSURE = 4
RAW_INDEX_REC_CT_PRESSURE_TEMP = 5
RAW_INDEX_REC_CT_TIME = 6
# Indices into raw_data tuples for telemetered CT data
RAW_INDEX_TEL_CT_SIO_TIMESTAMP = 0
RAW_INDEX_TEL_CT_ID = 1
RAW_INDEX_TEL_CT_SCIENCE = 2
RAW_INDEX_TEL_CT_TIME = 3
# Indices into raw_data tuples for recovered and telemetered CO data
RAW_INDEX_CO_SIO_TIMESTAMP = 0
RAW_INDEX_CO_ID = 1
RAW_INDEX_CO_TIME_OFFSET = 2
INDUCTIVE_ID_KEY = 'inductive_id'
def convert_hex_ascii_to_int(int_val):
"""
Use to convert from hex-ascii to int when encoding data particle values
"""
return int(int_val, 16)
def generate_particle_timestamp(time_2000):
"""
This function calculates and returns a timestamp in epoch 1900
based on an ASCII hex time in epoch 2000.
Parameter:
time_2000 - number of seconds since Jan 1, 2000
Returns:
number of seconds since Jan 1, 1900
"""
return int(time_2000, 16) + zulu_timestamp_to_ntp_time("2000-01-01T00:00:00.00Z")
class DataParticleType(BaseEnum):
"""
Recovered_host CTDMO instrument data is identical to the telemetered data.
so, for Recovered_host CTDMO instrument data, we are using 'ctdmo_ghqr_sio_mule_instrument'
( TEL_CT_PARTICLE) data stream.
"""
REC_CO_PARTICLE = 'ctdmo_ghqr_offset_recovered'
REC_CT_PARTICLE = 'ctdmo_ghqr_instrument_recovered'
TEL_CO_PARTICLE = 'ctdmo_ghqr_sio_offset'
TEL_CT_PARTICLE = 'ctdmo_ghqr_sio_mule_instrument'
REC_HOST_CT_PARTICLE = 'ctdmo_ghqr_sio_mule_instrument'
class CtdmoInstrumentDataParticleKey(BaseEnum):
CONTROLLER_TIMESTAMP = "sio_controller_timestamp"
INDUCTIVE_ID = "inductive_id"
SERIAL_NUMBER = "serial_number"
TEMPERATURE = "temperature"
CONDUCTIVITY = "conductivity"
PRESSURE = "pressure"
PRESSURE_TEMP = "pressure_temp"
CTD_TIME = "ctd_time"
class CtdmoGhqrRecoveredInstrumentDataParticle(DataParticle):
"""
Class for generating Instrument Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.REC_CT_PARTICLE
def _build_parsed_values(self):
"""
Build parsed values for Telemetered Data Particle.
Take something in the hex ASCII data values and turn it into a
particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
#
# The particle timestamp is the time contained in the CT instrument data.
# This time field is number of seconds since Jan 1, 2000.
# Convert from epoch in 2000 to epoch in 1900.
#
time_stamp = generate_particle_timestamp(self.raw_data[RAW_INDEX_REC_CT_TIME])
self.set_internal_timestamp(timestamp=time_stamp)
#
# Raw data for this particle consists of the following fields (hex ASCII
# unless noted otherwise):
# inductive ID (hex)
# serial number (hex)
# temperature
# conductivity
# pressure
# pressure temperature
# time of science data
#
particle = [
self._encode_value(CtdmoInstrumentDataParticleKey.INDUCTIVE_ID,
self.raw_data[RAW_INDEX_REC_CT_ID], int),
self._encode_value(CtdmoInstrumentDataParticleKey.SERIAL_NUMBER,
self.raw_data[RAW_INDEX_REC_CT_SERIAL], str),
self._encode_value(CtdmoInstrumentDataParticleKey.TEMPERATURE,
self.raw_data[RAW_INDEX_REC_CT_TEMPERATURE],
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.CONDUCTIVITY,
self.raw_data[RAW_INDEX_REC_CT_CONDUCTIVITY],
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.PRESSURE,
self.raw_data[RAW_INDEX_REC_CT_PRESSURE],
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.PRESSURE_TEMP,
self.raw_data[RAW_INDEX_REC_CT_PRESSURE_TEMP],
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.CTD_TIME,
self.raw_data[RAW_INDEX_REC_CT_TIME],
convert_hex_ascii_to_int)
]
return particle
class CtdmoGhqrSioTelemeteredInstrumentDataParticle(DataParticle):
"""
Class for generating Instrument Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.TEL_CT_PARTICLE
def _build_parsed_values(self):
"""
Build parsed values for Telemetered Instrument Data Particle.
Take something in the binary data values and turn it into a
particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
#
# Convert science data time to hex ascii.
# The 4 byte time field is in reverse byte order.
#
hex_time = binascii.b2a_hex(self.raw_data[RAW_INDEX_TEL_CT_TIME])
reversed_hex_time = hex_time[6:8] + hex_time[4:6] + hex_time[2:4] + hex_time[0:2]
# convert from epoch in 2000 to epoch in 1900.
time_stamp = generate_particle_timestamp(reversed_hex_time)
self.set_internal_timestamp(timestamp=time_stamp)
try:
#
# Convert binary science data to hex ascii string.
# 7 binary bytes get turned into 14 hex ascii bytes.
# The 2 byte pressure field is in reverse byte order.
#
science_data = binascii.b2a_hex(self.raw_data[RAW_INDEX_TEL_CT_SCIENCE])
pressure = science_data[12:14] + science_data[10:12]
except (ValueError, TypeError, IndexError) as ex:
log.warn("Error (%s) while decoding parameters in data: [%s]", ex, self.raw_data)
raise RecoverableSampleException("Error (%s) while decoding parameters in data: [%s]"
% (ex, self.raw_data))
particle = [
self._encode_value(CtdmoInstrumentDataParticleKey.CONTROLLER_TIMESTAMP,
self.raw_data[RAW_INDEX_TEL_CT_SIO_TIMESTAMP],
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.INDUCTIVE_ID,
struct.unpack('>B',
self.raw_data[RAW_INDEX_TEL_CT_ID])[0],
int),
self._encode_value(CtdmoInstrumentDataParticleKey.TEMPERATURE,
science_data[0:5],
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.CONDUCTIVITY,
science_data[5:10],
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.PRESSURE,
pressure,
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.CTD_TIME,
reversed_hex_time,
convert_hex_ascii_to_int)
]
return particle
class CtdmoGhqrRecoveredHostInstrumentDataParticle(DataParticle):
"""
Class for generating Instrument Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.REC_HOST_CT_PARTICLE
def _build_parsed_values(self):
"""
Build parsed values for Recovered Instrument Data Particle.
Take something in the hex data values and turn it into a
particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
SECONDS_1900_TO_2000 = (datetime.datetime(2000, 1, 1) - datetime.datetime(1900, 1, 1)).total_seconds()
header_timestamp, inductive_id, data = self.raw_data
temp = int(data[:5], 16)
cond = int(data[5:10], 16)
pressure, secs = struct.unpack('<HI', binascii.a2b_hex(data[10:22]))
self.set_internal_timestamp(timestamp=secs + SECONDS_1900_TO_2000)
port_timestamp = float (convert_hex_ascii_to_int(header_timestamp))
self.set_port_timestamp(unix_time = port_timestamp)
"""
Here, sio_controller_timestamp is deprecated, use port timestamp instead
"""
particle = [
self._encode_value(CtdmoInstrumentDataParticleKey.CONTROLLER_TIMESTAMP,
header_timestamp,
convert_hex_ascii_to_int),
self._encode_value(CtdmoInstrumentDataParticleKey.INDUCTIVE_ID,
inductive_id,
int),
self._encode_value(CtdmoInstrumentDataParticleKey.TEMPERATURE,
temp,
int),
self._encode_value(CtdmoInstrumentDataParticleKey.CONDUCTIVITY,
cond,
int),
self._encode_value(CtdmoInstrumentDataParticleKey.PRESSURE,
pressure,
int),
self._encode_value(CtdmoInstrumentDataParticleKey.CTD_TIME,
secs,
int)
]
return particle
class CtdmoOffsetDataParticleKey(BaseEnum):
CONTROLLER_TIMESTAMP = "sio_controller_timestamp"
INDUCTIVE_ID = "inductive_id"
CTD_OFFSET = "ctd_time_offset"
class CtdmoGhqrSioOffsetDataParticle(DataParticle):
"""
Class for generating the Offset Data Particle from the CTDMO instrument
on a MSFM platform node
"""
def _build_parsed_values(self):
"""
Build parsed values for Recovered and Telemetered Offset Data Particle.
Take something in the binary data values and turn it into a
particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
"""
#
# The particle timestamp for CO data is the SIO header timestamp.
#
time_stamp = convert_hex_ascii_to_int(self.raw_data[RAW_INDEX_CO_SIO_TIMESTAMP])
self.set_internal_timestamp(unix_time=time_stamp)
particle = [
self._encode_value(CtdmoOffsetDataParticleKey.CONTROLLER_TIMESTAMP,
self.raw_data[RAW_INDEX_CO_SIO_TIMESTAMP],
convert_hex_ascii_to_int),
self._encode_value(CtdmoOffsetDataParticleKey.INDUCTIVE_ID,
struct.unpack('>B', self.raw_data[RAW_INDEX_CO_ID])[0],
int),
self._encode_value(CtdmoOffsetDataParticleKey.CTD_OFFSET,
struct.unpack('>i',
self.raw_data[RAW_INDEX_CO_TIME_OFFSET])[0],
int)
]
return particle
class CtdmoGhqrSioRecoveredOffsetDataParticle(CtdmoGhqrSioOffsetDataParticle):
"""
Class for generating Offset Data Particles from Recovered data.
"""
_data_particle_type = DataParticleType.REC_CO_PARTICLE
class CtdmoGhqrSioTelemeteredOffsetDataParticle(CtdmoGhqrSioOffsetDataParticle):
"""
Class for generating Offset Data Particles from Telemetered data.
"""
_data_particle_type = DataParticleType.TEL_CO_PARTICLE
def parse_co_data(particle_class, chunk, sio_header_timestamp, extract_sample):
"""
This function parses a CO record and returns a list of samples.
The CO input record is the same for both recovered and telemetered data.
"""
particles = []
last_index = len(chunk)
start_index = 0
had_error = (False, 0)
while start_index < last_index:
#
# Look for a match in the next group of bytes
#
co_match = CO_MATCHER.match(
chunk[start_index:start_index+CO_SAMPLE_BYTES])
if co_match is not None:
#
# If the inductive ID is the one we're looking for,
# generate a data particle.
# The ID needs to be converted from a byte string to an integer
# for the comparison.
#
inductive_id = co_match.group(CO_GROUP_ID)
#
# Generate the data particle.
# Data stored for each particle is a tuple of the following:
# SIO header timestamp (input parameter)
# inductive ID (from chunk)
# Time Offset (from chunk)
#
sample = extract_sample(particle_class, None, (sio_header_timestamp, inductive_id,
co_match.group(CO_GROUP_TIME_OFFSET)), None)
if sample is not None:
#
# Add this particle to the list of particles generated
# so far for this chunk of input data.
#
particles.append(sample)
start_index += CO_SAMPLE_BYTES
#
# If there wasn't a match, the input data is messed up.
#
else:
had_error = (True, start_index)
break
#
# Once we reach the end of the input data,
# return the number of particles generated and the list of particles.
#
return particles, had_error
def parse_ct_data(particle_class, chunk, sio_header_timestamp, extract_sample, inductive_id):
"""
This function parses a CT record and returns a list of samples.
The CT input record is the same for both recovered and telemetered data.
"""
particles = []
had_error = (False, 0)
sample_list = chunk.split()
for item in sample_list:
try:
binascii.a2b_hex(item)
sample = extract_sample(particle_class, None, (sio_header_timestamp, inductive_id, item), None)
particles.append(sample)
except ValueError:
had_error = (True, 0)
return particles, had_error
class CtdmoGhqrSioRecoveredCoAndCtParser(SioParser):
"""
Parser for Ctdmo recovered CO and CT data.
"""
def handle_non_data(self, non_data, non_end, start):
"""
Handle any non-data that is found in the file
"""
# Handle non-data here.
if non_data is not None and non_end <= start:
message = "Found %d bytes of un-expected non-data %s" % (len(non_data), binascii.b2a_hex(non_data))
log.warn(message)
self._exception_callback(UnexpectedDataException(message))
def parse_chunks(self):
"""
Parse chunks for the Recovered CO and CT parser.
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
while chunk is not None:
header_match = SIO_HEADER_MATCHER.match(chunk)
header_timestamp = header_match.group(SIO_HEADER_GROUP_TIMESTAMP)
#
# Start processing at the end of the header.
#
chunk_idx = header_match.end(0)
if header_match.group(SIO_HEADER_GROUP_ID) == ID_OFFSET:
(particles, had_error) = parse_co_data(CtdmoGhqrSioRecoveredOffsetDataParticle,
chunk[chunk_idx:-1], header_timestamp,
self._extract_sample)
if had_error[0]:
log.error('unknown data found in CO chunk %s at %d, leaving out the rest',
binascii.b2a_hex(chunk), had_error[1])
self._exception_callback(SampleException(
'unknown data found in CO chunk at %d, leaving out the rest' % had_error[1]))
result_particles.extend(particles)
if header_match.group(SIO_HEADER_GROUP_ID) == ID_INSTRUMENT:
header_str = header_match.group(0)
inductive_id = header_str[8:10]
(particles, had_error) = parse_ct_data(CtdmoGhqrRecoveredHostInstrumentDataParticle,
chunk[chunk_idx:-1], header_timestamp,
self._extract_sample, inductive_id)
if had_error[0]:
log.error('unknown data found in CT chunk %s at %d, leaving out the rest',
binascii.b2a_hex(chunk), had_error[1])
self._exception_callback(SampleException(
'unknown data found in CT chunk at %d, leaving out the rest' % had_error[1]))
result_particles.extend(particles)
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
return result_particles
class CtdmoGhqrRecoveredCtParser(SimpleParser):
"""
Parser for Ctdmo recovered CT data.
"""
def __init__(self,
config,
stream_handle,
exception_callback):
#
# Verify that the required parameters are in the parser configuration.
#
if not INDUCTIVE_ID_KEY in config:
raise DatasetParserException("Parser config is missing %s" % INDUCTIVE_ID_KEY)
#
# File is ASCII with records separated by newlines.
#
super(CtdmoGhqrRecoveredCtParser, self).__init__(config, stream_handle, exception_callback)
#
# set flags to indicate the end of Configuration has not been reached
# and the serial number has not been found.
#
self._serial_number = None
self._end_config = False
self.input_file = stream_handle
def check_for_config_end(self, chunk):
"""
This function searches the input buffer for the end of Configuration record.
If found, the read_state and state are updated.
"""
match = REC_CT_CONFIGURATION_END_MATCHER.match(chunk)
if match is not None:
self._end_config = True
def check_for_serial_number(self, chunk):
"""
This function searches the input buffer for a serial number.
"""
#
# See if this record the serial number.
# If found, convert from decimal ASCII and save.
#
match = REC_CT_SERIAL_MATCHER.match(chunk)
if match is not None:
self._serial_number = int(match.group(REC_CT_SERIAL_GROUP_SERIAL_NUMBER))
def parse_file(self):
"""
Parser the file for the recovered CT parser
:return: list of result particles
"""
# read the first line in the file
line = self._stream_handle.readline()
while line:
#
# Search for serial number if not already found.
#
if self._serial_number is None:
self.check_for_serial_number(line)
#
# Once the serial number is found,
# search for the end of the Configuration section.
#
elif not self._end_config:
self.check_for_config_end(line)
#
# Once the end of the configuration is reached, all remaining records
# are supposedly CT data records.
# Parse the record and generate the particle for this chunk.
# Add it to the return list of particles.
#
else:
particle = self.parse_ct_record(line)
if particle is not None:
self._record_buffer.append(particle)
# read the next line in the file
line = self._stream_handle.readline()
def parse_ct_record(self, ct_record):
"""
This function parses a Recovered CT record and returns a data particle.
Parameters:
ct_record - the input which is being parsed
"""
ct_match = REC_CT_MATCHER.match(ct_record)
if ct_match is not None:
#
# If this is CT record, generate the data particle.
# Data stored for each particle is a tuple of the following:
# inductive ID (obtained from configuration data)
# serial number
# temperature
# conductivity
# pressure
# pressure temperature
# time of science data
#
sample = self._extract_sample(CtdmoGhqrRecoveredInstrumentDataParticle, None,
(self._config.get(INDUCTIVE_ID_KEY),
self._serial_number,
ct_match.group(REC_CT_GROUP_TEMPERATURE),
ct_match.group(REC_CT_GROUP_CONDUCTIVITY),
ct_match.group(REC_CT_GROUP_PRESSURE),
ct_match.group(REC_CT_GROUP_PRESSURE_TEMP),
ct_match.group(REC_CT_GROUP_TIME)))
#
# If there wasn't a match, the input data is messed up.
#
else:
error_message = 'unknown data found in CT chunk %s, leaving out the rest of chunk' \
% binascii.b2a_hex(ct_record)
log.error(error_message)
self._exception_callback(SampleException(error_message))
sample = None
return sample
class CtdmoGhqrSioTelemeteredParser(SioParser):
"""
Parser for Ctdmo telemetered data (SIO Mule).
This parser handles both CT and CO data from the SIO Mule.
"""
def parse_chunks(self):
"""
Parse chunks for the Telemetered parser.
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle.
Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
while chunk is not None:
header_match = SIO_HEADER_MATCHER.match(chunk)
if header_match:
header_timestamp = header_match.group(SIO_HEADER_GROUP_TIMESTAMP)
# start looping at the end of the header
chunk_idx = header_match.end(0)
if header_match.group(SIO_HEADER_GROUP_ID) == ID_INSTRUMENT:
#
# Parse the CT record, up to but not including the end of SIO block.
#
particles = self.parse_ct_record(chunk[chunk_idx:-1], header_timestamp)
result_particles.extend(particles)
elif header_match.group(SIO_HEADER_GROUP_ID) == ID_OFFSET:
(particles, had_error) = parse_co_data(CtdmoGhqrSioTelemeteredOffsetDataParticle,
chunk[chunk_idx:-1], header_timestamp,
self._extract_sample)
if had_error[0]:
log.error('unknown data found in CO chunk %s at %d, leaving out the rest',
binascii.b2a_hex(chunk), had_error[1])
self._exception_callback(SampleException(
'unknown data found in CO chunk at %d, leaving out the rest' % had_error[1]))
result_particles.extend(particles)
else:
message = 'Unexpected Sio Header ID %s' % header_match.group(SIO_HEADER_GROUP_ID)
log.warn(message)
self._exception_callback(UnexpectedDataException(message))
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index()
self.handle_non_data(non_data, non_end, start)
return result_particles
def parse_ct_record(self, ct_record, sio_header_timestamp):
"""
This function parses a Telemetered CT record and
returns a list of data particles.
Parameters:
chunk - the input which is being parsed
sio_header_timestamp - required for particle, passed through
"""
particles = []
last_index = len(ct_record)
start_index = 0
while start_index < last_index:
#
# Look for a match in the next group of bytes
#
ct_match = TEL_CT_MATCHER.match(
ct_record[start_index:start_index+TEL_CT_SAMPLE_BYTES])
if ct_match is not None:
#
# Generate the data particle.
# Data stored for each particle is a tuple of the following:
# SIO header timestamp (input parameter)
# inductive ID
# science data (temperature, conductivity, pressure)
# time of science data
#
sample = self._extract_sample(CtdmoGhqrSioTelemeteredInstrumentDataParticle, None,
(sio_header_timestamp,
ct_match.group(TEL_CT_GROUP_ID),
ct_match.group(TEL_CT_GROUP_SCIENCE_DATA),
ct_match.group(TEL_CT_GROUP_TIME)))
if sample is not None:
#
# Add this particle to the list of particles generated
# so far for this chunk of input data.
#
particles.append(sample)
start_index += TEL_CT_SAMPLE_BYTES
#
# If there wasn't a match, the input data is messed up.
#
else:
log.error('unknown data found in CT record %s at %d, leaving out the rest',
binascii.b2a_hex(ct_record), start_index)
self._exception_callback(SampleException(
'unknown data found in CT record at %d, leaving out the rest' % start_index))
break
#
# Once we reach the end of the input data,
# return the number of particles generated and the list of particles.
#
return particles
def handle_non_data(self, non_data, non_end, start):
"""
Handle any non-data that is found in the file
"""
# Handle non-data here.
if non_data is not None and non_end <= start:
message = "Found %d bytes of un-expected non-data %s" % (len(non_data), binascii.b2a_hex(non_data))
log.warn(message)
self._exception_callback(UnexpectedDataException(message))
| {
"content_hash": "7c43f9da60eb7391c4ba3fb25ac1023d",
"timestamp": "",
"source": "github",
"line_count": 825,
"max_line_length": 114,
"avg_line_length": 40.261818181818185,
"alnum_prop": 0.588752408477842,
"repo_name": "janeen666/mi-instrument",
"id": "fccce59f75c25b3e93db4a54c958a52c43b03269",
"size": "33239",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mi/dataset/parser/ctdmo_ghqr_sio.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "9920905"
},
{
"name": "Shell",
"bytes": "6208"
}
],
"symlink_target": ""
} |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("website", "0017_auto_20190804_0759")]
operations = [
migrations.AlterModelOptions(name="tournamentresultpage", options={"verbose_name": "Tournament results"})
]
| {
"content_hash": "527a5242b720765229d7da52394a9195",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 113,
"avg_line_length": 27.5,
"alnum_prop": 0.7127272727272728,
"repo_name": "ianastewart/cwltc-admin",
"id": "93498eab84ec6cbb6aee04eba528780c34dea5fe",
"size": "324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/migrations/0018_auto_20190805_0845.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "945975"
},
{
"name": "Dockerfile",
"bytes": "882"
},
{
"name": "HTML",
"bytes": "526368"
},
{
"name": "JavaScript",
"bytes": "843481"
},
{
"name": "Python",
"bytes": "8389886"
},
{
"name": "Shell",
"bytes": "1023"
}
],
"symlink_target": ""
} |
from datetime import date
from itertools import chain
import pymysql
from DBUtils import PooledDB
from dateutil.relativedelta import relativedelta
from scrapy import Item
__mysql_connection_pool = None
host = '127.0.0.1'
port = 3306
user = 'root'
password = 'ipampas'
db = 'stock'
trade_quote_table = 'stock_daily_quote'
derive_quote_table = 'stock_derive_quote'
today = date.today()
def get_connection():
global __mysql_connection_pool
if not __mysql_connection_pool:
__mysql_connection_pool = PooledDB.PooledDB(pymysql, mincached=10, maxcached=100,
maxconnections=200, blocking=True, host=host, port=port,
user=user, passwd=password, db=db)
return __mysql_connection_pool.connection()
def execute_sql(sql):
connection = get_connection()
cursor = connection.cursor()
cursor.execute(sql)
connection.commit()
_all = cursor.fetchall()
cursor.close()
return _all
def build_item_sql(table, item, columns):
if not isinstance(item, Item):
raise Exception(u'不支持的类,item应为scrapy.Item的子类')
sql_list = ['insert into `%s`' % table,
'(%s)' % ('`' + '`,`'.join(columns) + '`'),
'values (%s)' % _build_value_sql(item)]
return ' '.join(sql_list)
def _build_value_sql(item):
lm = lambda val: 'null' if len(val) == 0 or val == '0000-00-00' else ('"' + str(val) + '"')
str_list = [lm(item[key]) for key in item.keys() if key != 'level']
return ','.join(str_list)
def choose_table(stock_code):
return trade_quote_table + str(int(stock_code) % 20)
def choose_derive(stock_code):
return derive_quote_table + str(int(stock_code) % 30)
def get_all_stocks():
sql = 'SELECT stock_code, listed_date from stock_company ' \
'where stat_code = "A" and market_type in (1,4)'
stock_tuple = execute_sql(sql)
lm = lambda x: date(x.year, x.month, 1)
return [(stock[0], lm(stock[1])) for stock in stock_tuple]
def get_record_stocks(table_name, mod_count):
all_stocks = get_all_stocks()
record_stocks_dict = _tuple_list_to_dict(_query_record_stocks(table_name, mod_count))
lm = lambda x, y: max(x, y) if y else x
f_lm = lambda y: (not y) or y < today
return [(stock[0], lm(stock[1], record_stocks_dict.get(stock[0])))
for stock in all_stocks if f_lm(record_stocks_dict.get(stock[0]))]
def _query_record_stocks(table_name, mod_count):
template_sql = 'select stock_code, max(trade_date) from ' + table_name + '%s group by stock_code'
record_stock = [execute_sql(template_sql % str(idx)) for idx in range(0, mod_count)]
return list(chain.from_iterable(record_stock))
def _tuple_list_to_dict(tuple_list):
tpl_dict = {}
for tpl in tuple_list:
tpl_dict[tpl[0]] = tpl[1].date() + relativedelta(days=1)
return tpl_dict
| {
"content_hash": "c37238aa637ad5ed14b8c209190e5811",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 108,
"avg_line_length": 32,
"alnum_prop": 0.6256868131868132,
"repo_name": "nicee/xshirmp",
"id": "8feb25b8b7ba81cf40ba818e2d0d8193b7528478",
"size": "2959",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lotus-crawler/yulin_crawler/yulin_crawler/dbutil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "FreeMarker",
"bytes": "5244"
},
{
"name": "HTML",
"bytes": "380"
},
{
"name": "Java",
"bytes": "93255"
},
{
"name": "Python",
"bytes": "93489"
}
],
"symlink_target": ""
} |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'IpsubMaParentIntfStateDataEnum' : _MetaInfoEnum('IpsubMaParentIntfStateDataEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper',
{
'deleted':'deleted',
'down':'down',
'up':'up',
}, 'Cisco-IOS-XR-subscriber-ipsub-oper', _yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper']),
'IpsubMaIntfStateDataEnum' : _MetaInfoEnum('IpsubMaIntfStateDataEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper',
{
'invalid':'invalid',
'initialized':'initialized',
'session-creation-started':'session_creation_started',
'control-policy-executing':'control_policy_executing',
'control-policy-executed':'control_policy_executed',
'session-features-applied':'session_features_applied',
'vrf-configured':'vrf_configured',
'adding-adjacency':'adding_adjacency',
'adjacency-added':'adjacency_added',
'up':'up',
'down':'down',
'address-family-down':'address_family_down',
'address-family-down-complete':'address_family_down_complete',
'disconnecting':'disconnecting',
'disconnected':'disconnected',
'error':'error',
}, 'Cisco-IOS-XR-subscriber-ipsub-oper', _yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper']),
'IpsubMaParentIntfVlanEnum' : _MetaInfoEnum('IpsubMaParentIntfVlanEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper',
{
'plain':'plain',
'ambiguous':'ambiguous',
}, 'Cisco-IOS-XR-subscriber-ipsub-oper', _yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper']),
'IpsubMaIntfInitiatorDataEnum' : _MetaInfoEnum('IpsubMaIntfInitiatorDataEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper',
{
'dhcp':'dhcp',
'packet-trigger':'packet_trigger',
'invalid-trigger':'invalid_trigger',
}, 'Cisco-IOS-XR-subscriber-ipsub-oper', _yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper']),
'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.Dhcp' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.Dhcp',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'dhcp',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.PacketTrigger' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.PacketTrigger',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'packet-trigger',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators',
False,
[
_MetaInfoClassMember('dhcp', REFERENCE_CLASS, 'Dhcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.Dhcp',
[], [],
''' DHCP summary statistics
''',
'dhcp',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('packet-trigger', REFERENCE_CLASS, 'PacketTrigger' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.PacketTrigger',
[], [],
''' Packet trigger summary statistics
''',
'packet_trigger',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'initiators',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.Dhcp' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.Dhcp',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'dhcp',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.PacketTrigger' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.PacketTrigger',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'packet-trigger',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators',
False,
[
_MetaInfoClassMember('dhcp', REFERENCE_CLASS, 'Dhcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.Dhcp',
[], [],
''' DHCP summary statistics
''',
'dhcp',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('packet-trigger', REFERENCE_CLASS, 'PacketTrigger' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.PacketTrigger',
[], [],
''' Packet trigger summary statistics
''',
'packet_trigger',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'ipv6-initiators',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary',
False,
[
_MetaInfoClassMember('initiators', REFERENCE_CLASS, 'Initiators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators',
[], [],
''' Summary counts per initiator
''',
'initiators',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('interfaces', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of interfaces with subscriber
configuration
''',
'interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-initiators', REFERENCE_CLASS, 'Ipv6Initiators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators',
[], [],
''' Summary counts per initiator for ipv6 session
''',
'ipv6_initiators',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'access-interface-summary',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.Dhcp' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.Dhcp',
False,
[
_MetaInfoClassMember('adding-adjacency', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adding adjacency
''',
'adding_adjacency',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('adjacency-added', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adjacency added
''',
'adjacency_added',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executed', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executed
''',
'control_policy_executed',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executing', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executing
''',
'control_policy_executing',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnected', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnected
''',
'disconnected',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnecting', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnecting
''',
'disconnecting',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('down', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Down
''',
'down',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Error
''',
'error',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('initialized', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Initialized
''',
'initialized',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('invalid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Invalid
''',
'invalid',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-creation-started', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session creation started
''',
'session_creation_started',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-features-applied', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session features applied
''',
'session_features_applied',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('total-interfaces', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total number of interfaces in all states
''',
'total_interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('up', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Up
''',
'up',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf-configured', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' VRF configured
''',
'vrf_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'dhcp',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.PacketTrigger' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.PacketTrigger',
False,
[
_MetaInfoClassMember('adding-adjacency', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adding adjacency
''',
'adding_adjacency',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('adjacency-added', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adjacency added
''',
'adjacency_added',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executed', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executed
''',
'control_policy_executed',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executing', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executing
''',
'control_policy_executing',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnected', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnected
''',
'disconnected',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnecting', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnecting
''',
'disconnecting',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('down', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Down
''',
'down',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Error
''',
'error',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('initialized', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Initialized
''',
'initialized',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('invalid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Invalid
''',
'invalid',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-creation-started', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session creation started
''',
'session_creation_started',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-features-applied', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session features applied
''',
'session_features_applied',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('total-interfaces', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total number of interfaces in all states
''',
'total_interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('up', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Up
''',
'up',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf-configured', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' VRF configured
''',
'vrf_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'packet-trigger',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators',
False,
[
_MetaInfoClassMember('dhcp', REFERENCE_CLASS, 'Dhcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.Dhcp',
[], [],
''' DHCP
''',
'dhcp',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('packet-trigger', REFERENCE_CLASS, 'PacketTrigger' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.PacketTrigger',
[], [],
''' Packet trigger
''',
'packet_trigger',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'initiators',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.Dhcp' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.Dhcp',
False,
[
_MetaInfoClassMember('adding-adjacency', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adding adjacency
''',
'adding_adjacency',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('adjacency-added', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adjacency added
''',
'adjacency_added',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executed', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executed
''',
'control_policy_executed',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executing', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executing
''',
'control_policy_executing',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnected', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnected
''',
'disconnected',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnecting', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnecting
''',
'disconnecting',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('down', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Down
''',
'down',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Error
''',
'error',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('initialized', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Initialized
''',
'initialized',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('invalid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Invalid
''',
'invalid',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-creation-started', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session creation started
''',
'session_creation_started',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-features-applied', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session features applied
''',
'session_features_applied',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('total-interfaces', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total number of interfaces in all states
''',
'total_interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('up', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Up
''',
'up',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf-configured', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' VRF configured
''',
'vrf_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'dhcp',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.PacketTrigger' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.PacketTrigger',
False,
[
_MetaInfoClassMember('adding-adjacency', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adding adjacency
''',
'adding_adjacency',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('adjacency-added', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Adjacency added
''',
'adjacency_added',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executed', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executed
''',
'control_policy_executed',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('control-policy-executing', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Control policy executing
''',
'control_policy_executing',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnected', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnected
''',
'disconnected',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('disconnecting', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Disconnecting
''',
'disconnecting',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('down', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Down
''',
'down',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('error', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Error
''',
'error',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('initialized', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Initialized
''',
'initialized',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('invalid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Invalid
''',
'invalid',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-creation-started', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session creation started
''',
'session_creation_started',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-features-applied', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Session features applied
''',
'session_features_applied',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('total-interfaces', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Total number of interfaces in all states
''',
'total_interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('up', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Up
''',
'up',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf-configured', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' VRF configured
''',
'vrf_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'packet-trigger',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators',
False,
[
_MetaInfoClassMember('dhcp', REFERENCE_CLASS, 'Dhcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.Dhcp',
[], [],
''' DHCP
''',
'dhcp',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('packet-trigger', REFERENCE_CLASS, 'PacketTrigger' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.PacketTrigger',
[], [],
''' Packet trigger
''',
'packet_trigger',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'ipv6-initiators',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.InterfaceCounts' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.InterfaceCounts',
False,
[
_MetaInfoClassMember('initiators', REFERENCE_CLASS, 'Initiators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators',
[], [],
''' Initiators
''',
'initiators',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-initiators', REFERENCE_CLASS, 'Ipv6Initiators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators',
[], [],
''' IPv6 Initiators
''',
'ipv6_initiators',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'interface-counts',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary.Vrf' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary.Vrf',
False,
[
_MetaInfoClassMember('interfaces', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of IP subscriber interfaces in the VRF
table
''',
'interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-interfaces', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Number of IPv6 subscriber interfaces in the VRF
table
''',
'ipv6_interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPv6 VRF
''',
'ipv6vrf_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPv4 VRF
''',
'vrf_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Summary' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Summary',
False,
[
_MetaInfoClassMember('access-interface-summary', REFERENCE_CLASS, 'AccessInterfaceSummary' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary',
[], [],
''' Access interface summary statistics
''',
'access_interface_summary',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('interface-counts', REFERENCE_CLASS, 'InterfaceCounts' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.InterfaceCounts',
[], [],
''' Initiator interface counts
''',
'interface_counts',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf', REFERENCE_LIST, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary.Vrf',
[], [],
''' Array of VRFs with IPSUB interfaces
''',
'vrf',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'summary',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Interfaces.Interface.Vrf' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Interfaces.Interface.Vrf',
False,
[
_MetaInfoClassMember('table-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Table
''',
'table_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'vrf',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Interfaces.Interface.Ipv6Vrf' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Interfaces.Interface.Ipv6Vrf',
False,
[
_MetaInfoClassMember('table-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Table
''',
'table_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF name
''',
'vrf_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'ipv6vrf',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Interfaces.Interface' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Interfaces.Interface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', True),
_MetaInfoClassMember('access-interface', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Access interface through which this subscriber
is accessible
''',
'access_interface',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('age', ATTRIBUTE, 'str' , None, None,
[], [],
''' Age in hh:mm:ss format
''',
'age',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('current-change-age', ATTRIBUTE, 'str' , None, None,
[], [],
''' Current change age in hh:mm:ss format
''',
'current_change_age',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('initiator', REFERENCE_ENUM_CLASS, 'IpsubMaIntfInitiatorDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaIntfInitiatorDataEnum',
[], [],
''' Protocol trigger for creation of this subscriber
session
''',
'initiator',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('interface-creation-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface creation time in month day hh:mm:ss
format
''',
'interface_creation_time',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-current-change-age', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPV6 Current change age in hh:mm:ss format
''',
'ipv6_current_change_age',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-initiator', REFERENCE_ENUM_CLASS, 'IpsubMaIntfInitiatorDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaIntfInitiatorDataEnum',
[], [],
''' Protocol trigger for creation of this
subscriber's IPv6 session
''',
'ipv6_initiator',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-last-state-change-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface's IPV6 last state change time in month
day hh:mm:ss format
''',
'ipv6_last_state_change_time',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-old-state', REFERENCE_ENUM_CLASS, 'IpsubMaIntfStateDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaIntfStateDataEnum',
[], [],
''' Previous state of the subscriber's IPv6 session
''',
'ipv6_old_state',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-state', REFERENCE_ENUM_CLASS, 'IpsubMaIntfStateDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaIntfStateDataEnum',
[], [],
''' State of the subscriber's IPv6 session
''',
'ipv6_state',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6vrf', REFERENCE_CLASS, 'Ipv6Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Interfaces.Interface.Ipv6Vrf',
[], [],
''' IPv6 VRF details
''',
'ipv6vrf',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('is-l2-connected', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if L2 connected
''',
'is_l2_connected',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('last-state-change-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface's last state change time in month day
hh:mm:ss format
''',
'last_state_change_time',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('old-state', REFERENCE_ENUM_CLASS, 'IpsubMaIntfStateDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaIntfStateDataEnum',
[], [],
''' Previous state of the subscriber session
''',
'old_state',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Session Type
''',
'session_type',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'IpsubMaIntfStateDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaIntfStateDataEnum',
[], [],
''' State of the subscriber session
''',
'state',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('subscriber-ipv4-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IPv4 Address of the subscriber
''',
'subscriber_ipv4_address',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('subscriber-ipv6-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IPv6 Address of the subscriber
''',
'subscriber_ipv6_address',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('subscriber-label', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Subscriber label for this subscriber interface
''',
'subscriber_label',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('subscriber-mac-addres', ATTRIBUTE, 'str' , None, None,
[], [b'[0-9a-fA-F]{2}(:[0-9a-fA-F]{2}){5}'],
''' MAC address of the subscriber
''',
'subscriber_mac_addres',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vrf', REFERENCE_CLASS, 'Vrf' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Interfaces.Interface.Vrf',
[], [],
''' IPv4 VRF details
''',
'vrf',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'interface',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.Interfaces' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.Interfaces',
False,
[
_MetaInfoClassMember('interface', REFERENCE_LIST, 'Interface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Interfaces.Interface',
[], [],
''' IP subscriber interface entry
''',
'interface',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.Dhcp' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.Dhcp',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface that were
dropped
''',
'fsol_dropped_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped
''',
'fsol_dropped_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-dup-addr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to duplicate source address
''',
'fsol_dropped_packets_dup_addr',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-flow', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding creation rate
''',
'fsol_dropped_packets_flow',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-session-limit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding one or more of the
configured session limits
''',
'fsol_dropped_packets_session_limit',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('is-configured', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Ture if the initiator is configred
''',
'is_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('sessions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of sessions currently up for each
initiator
''',
'sessions',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('unique-ip-check', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if check for subscriber address
uniquenessduring first sign of life is enabled
''',
'unique_ip_check',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'dhcp',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.PacketTrigger' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.PacketTrigger',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface that were
dropped
''',
'fsol_dropped_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped
''',
'fsol_dropped_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-dup-addr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to duplicate source address
''',
'fsol_dropped_packets_dup_addr',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-flow', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding creation rate
''',
'fsol_dropped_packets_flow',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-session-limit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding one or more of the
configured session limits
''',
'fsol_dropped_packets_session_limit',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('is-configured', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Ture if the initiator is configred
''',
'is_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('sessions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of sessions currently up for each
initiator
''',
'sessions',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('unique-ip-check', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if check for subscriber address
uniquenessduring first sign of life is enabled
''',
'unique_ip_check',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'packet-trigger',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators',
False,
[
_MetaInfoClassMember('dhcp', REFERENCE_CLASS, 'Dhcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.Dhcp',
[], [],
''' DHCP information
''',
'dhcp',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('packet-trigger', REFERENCE_CLASS, 'PacketTrigger' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.PacketTrigger',
[], [],
''' packet trigger information
''',
'packet_trigger',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'initiators',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.Dhcp' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.Dhcp',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface that were
dropped
''',
'fsol_dropped_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped
''',
'fsol_dropped_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-dup-addr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to duplicate source address
''',
'fsol_dropped_packets_dup_addr',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-flow', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding creation rate
''',
'fsol_dropped_packets_flow',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-session-limit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding one or more of the
configured session limits
''',
'fsol_dropped_packets_session_limit',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('is-configured', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Ture if the initiator is configred
''',
'is_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('sessions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of sessions currently up for each
initiator
''',
'sessions',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('unique-ip-check', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if check for subscriber address
uniquenessduring first sign of life is enabled
''',
'unique_ip_check',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'dhcp',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.PacketTrigger' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.PacketTrigger',
False,
[
_MetaInfoClassMember('fsol-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface
''',
'fsol_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-bytes', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life bytes received for
initiating protocol on this interface that were
dropped
''',
'fsol_dropped_bytes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped
''',
'fsol_dropped_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-dup-addr', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to duplicate source address
''',
'fsol_dropped_packets_dup_addr',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-flow', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding creation rate
''',
'fsol_dropped_packets_flow',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-dropped-packets-session-limit', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface that
were dropped due to exceeding one or more of the
configured session limits
''',
'fsol_dropped_packets_session_limit',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('fsol-packets', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of first sign of life packets received
for initiating protocol on this interface
''',
'fsol_packets',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('is-configured', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Ture if the initiator is configred
''',
'is_configured',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('sessions', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Number of sessions currently up for each
initiator
''',
'sessions',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('unique-ip-check', ATTRIBUTE, 'bool' , None, None,
[], [],
''' True if check for subscriber address
uniquenessduring first sign of life is enabled
''',
'unique_ip_check',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'packet-trigger',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators',
False,
[
_MetaInfoClassMember('dhcp', REFERENCE_CLASS, 'Dhcp' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.Dhcp',
[], [],
''' DHCP information
''',
'dhcp',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('packet-trigger', REFERENCE_CLASS, 'PacketTrigger' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.PacketTrigger',
[], [],
''' packet trigger information
''',
'packet_trigger',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'ipv6-initiators',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.UnclassifiedSource' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.UnclassifiedSource',
False,
[
_MetaInfoClassMember('per-vlan', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Per-VLAN limit category
''',
'per_vlan',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'unclassified-source',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.Total' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.Total',
False,
[
_MetaInfoClassMember('per-vlan', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Per-VLAN limit category
''',
'per_vlan',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'total',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit',
False,
[
_MetaInfoClassMember('total', REFERENCE_CLASS, 'Total' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.Total',
[], [],
''' All sources session limits
''',
'total',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('unclassified-source', REFERENCE_CLASS, 'UnclassifiedSource' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.UnclassifiedSource',
[], [],
''' Unclassified source session limits
''',
'unclassified_source',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'session-limit',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface',
False,
[
_MetaInfoClassMember('interface-name', ATTRIBUTE, 'str' , None, None,
[], [b'(([a-zA-Z0-9_]*\\d+/){3,4}\\d+)|(([a-zA-Z0-9_]*\\d+/){3,4}\\d+\\.\\d+)|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]*\\d+))|(([a-zA-Z0-9_]*\\d+/){2}([a-zA-Z0-9_]+))|([a-zA-Z0-9_-]*\\d+)|([a-zA-Z0-9_-]*\\d+\\.\\d+)|(mpls)|(dwdm)'],
''' Interface name
''',
'interface_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', True),
_MetaInfoClassMember('age', ATTRIBUTE, 'str' , None, None,
[], [],
''' Age in HH:MM:SS format
''',
'age',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('initiators', REFERENCE_CLASS, 'Initiators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators',
[], [],
''' Configurational state-statistics for each
initiating protocol enabled on this parent
interface
''',
'initiators',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('interface-creation-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface creation time in Month Date HH:MM:SS
format
''',
'interface_creation_time',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('interface-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Type
''',
'interface_type',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-initiators', REFERENCE_CLASS, 'Ipv6Initiators' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators',
[], [],
''' Configurational state-statistics for each
initiating protocol enabled on this parent
interface for IPv6 session
''',
'ipv6_initiators',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('ipv6-state', REFERENCE_ENUM_CLASS, 'IpsubMaParentIntfStateDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaParentIntfStateDataEnum',
[], [],
''' Operational ipv6 state of this interface
''',
'ipv6_state',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('session-limit', REFERENCE_CLASS, 'SessionLimit' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit',
[], [],
''' Configuration session limits for each session
limit source and type
''',
'session_limit',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('state', REFERENCE_ENUM_CLASS, 'IpsubMaParentIntfStateDataEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaParentIntfStateDataEnum',
[], [],
''' Operational state of this interface
''',
'state',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('vlan-type', REFERENCE_ENUM_CLASS, 'IpsubMaParentIntfVlanEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpsubMaParentIntfVlanEnum',
[], [],
''' The VLAN type on the access interface
''',
'vlan_type',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'access-interface',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node.AccessInterfaces' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node.AccessInterfaces',
False,
[
_MetaInfoClassMember('access-interface', REFERENCE_LIST, 'AccessInterface' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface',
[], [],
''' IP subscriber access interface entry
''',
'access_interface',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'access-interfaces',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes.Node' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], [b'([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' The node ID to filter on. For eg., 0/1/CPU0
''',
'node_name',
'Cisco-IOS-XR-subscriber-ipsub-oper', True),
_MetaInfoClassMember('access-interfaces', REFERENCE_CLASS, 'AccessInterfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.AccessInterfaces',
[], [],
''' IP subscriber access interface table
''',
'access_interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('interfaces', REFERENCE_CLASS, 'Interfaces' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Interfaces',
[], [],
''' IP subscriber interface table
''',
'interfaces',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
_MetaInfoClassMember('summary', REFERENCE_CLASS, 'Summary' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node.Summary',
[], [],
''' IP subscriber interface summary
''',
'summary',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber.Nodes' : {
'meta_info' : _MetaInfoClass('IpSubscriber.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes.Node',
[], [],
''' Location. For eg., 0/1/CPU0
''',
'node',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
'IpSubscriber' : {
'meta_info' : _MetaInfoClass('IpSubscriber',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper', 'IpSubscriber.Nodes',
[], [],
''' IP subscriber operational data for a particular
location
''',
'nodes',
'Cisco-IOS-XR-subscriber-ipsub-oper', False),
],
'Cisco-IOS-XR-subscriber-ipsub-oper',
'ip-subscriber',
_yang_ns._namespaces['Cisco-IOS-XR-subscriber-ipsub-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_subscriber_ipsub_oper'
),
},
}
_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.Dhcp']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators.PacketTrigger']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.Dhcp']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators.PacketTrigger']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Initiators']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary.Ipv6Initiators']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.Dhcp']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators.PacketTrigger']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.Dhcp']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators.PacketTrigger']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Initiators']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts.Ipv6Initiators']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.AccessInterfaceSummary']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.InterfaceCounts']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary.Vrf']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Summary']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface.Vrf']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface.Ipv6Vrf']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Interfaces.Interface']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.Interfaces']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.Dhcp']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators.PacketTrigger']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.Dhcp']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators.PacketTrigger']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.UnclassifiedSource']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit.Total']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Initiators']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.Ipv6Initiators']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface.SessionLimit']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces.AccessInterface']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Summary']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.Interfaces']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node']['meta_info']
_meta_table['IpSubscriber.Nodes.Node.AccessInterfaces']['meta_info'].parent =_meta_table['IpSubscriber.Nodes.Node']['meta_info']
_meta_table['IpSubscriber.Nodes.Node']['meta_info'].parent =_meta_table['IpSubscriber.Nodes']['meta_info']
_meta_table['IpSubscriber.Nodes']['meta_info'].parent =_meta_table['IpSubscriber']['meta_info']
| {
"content_hash": "c6a2929c8774efb9647714d3288b820d",
"timestamp": "",
"source": "github",
"line_count": 1620,
"max_line_length": 250,
"avg_line_length": 54.42716049382716,
"alnum_prop": 0.5206074490768045,
"repo_name": "111pontes/ydk-py",
"id": "9a63e3350c034641b4bc4273db54c634b5092aeb",
"size": "88175",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_subscriber_ipsub_oper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "7226"
},
{
"name": "Python",
"bytes": "446117948"
}
],
"symlink_target": ""
} |
from django.core.cache import cache as default_cache
def generate_named_key(instance_or_type, name, **vary_by):
"""
Generate a named key (eg used for storing a list of results)
:param instance_or_type:
:param name:
:return:
"""
opts = instance_or_type._meta
vary_string = ','.join('%s=%s' % (k, vary_by[k]) for k in sorted(vary_by))
if vary_string:
return 'model:%s.%s:%s[%s]' % (opts.app_label, opts.module_name, name, vary_string)
else:
return 'model:%s.%s:%s' % (opts.app_label, opts.model_name, name)
def generate_obj_key(instance_or_type, **vary_by):
"""
Generate a cache key for a model instance or type.
:param instance_or_type: Model type or instance
:param vary_by: optional values to vary by.
:return: String key for use in cache.
"""
opts = instance_or_type._meta
vary_string = ','.join('%s=%s' % (k, vary_by[k]) for k in sorted(vary_by))
return 'model:%s.%s[%s]' % (opts.app_label, opts.model_name, vary_string)
def generate_instance_key(instance, attr_name=None):
"""
Generate a cache key for a model instance or type.
:param instance: Model instance.
:param attr_name: Name of attribute used to generate a unique key, this can also be a tuple or list to create
composite keys.
:return: String key for use in cache.
"""
if attr_name is None:
attr_name = [instance.cache_primary_attr if hasattr(instance, 'cache_primary_attr') else 'pk']
else:
attr_name = attr_name if isinstance(attr_name, (tuple, list)) else [attr_name]
vary_by = {n: getattr(instance, n) for n in attr_name}
return generate_obj_key(instance, **vary_by)
def set(model_instance, cache=None):
"""
Store a model in cache.
:param model_instance: the model object to store.
:param cache: cache instance to use; defaults to default django cache.
:returns: cache key.
"""
cache = cache or default_cache
key = generate_instance_key(model_instance)
cache.set(key, model_instance)
return key
def set_by_attribute(model_instance, attr_name, cache=None):
"""
Store a model in cache by attribute value.
Helper method that stores a model and a reference to the item.
:param model_instance: the model object to store.
:param attr_name: attribute or list of attributes.
:param cache: cache instance to use; defaults to main django cache.
:returns: reference cache key.
.. note::
Attribute must be unique to make this reliable.
"""
cache = cache or default_cache
reference_key = generate_instance_key(model_instance, attr_name)
instance_key = generate_instance_key(model_instance)
cache.set_many({
reference_key: instance_key,
instance_key: model_instance
})
return reference_key
def set_queryset(queryset, name, cache=None, **vary_by):
"""
Store a queryset in cache
:param queryset: Queryset to store
:param name: Name of set
:param cache: cache instance to use; defaults to default django cache.
:param vary_by: optional values to vary by.
:return: reference cache key.
"""
cache = cache or default_cache
key = generate_named_key(queryset.model, name, **vary_by)
qs_keys = [i.cache_key for i in queryset]
cache.set(key, qs_keys)
return key
def get(model_type, pk, cache=None):
"""
Get a model from cache.
:param model_type: model type for building cache key.
:param pk: primary key of model to fetch from cache.
:param cache: cache instance to use; defaults to default django cache.
:returns: model object if found; else None.
"""
cache = cache or default_cache
key = generate_obj_key(model_type, pk=pk)
return cache.get(key)
def get_by_attribute(model_type, cache=None, **vary_by):
"""
Get a model from cache by attribute reference.
:param model_type: model type for building cache key.
:param cache: cache instance to use; defaults to main django cache.
:param vary_by: key value pairs that a model varies by.
:returns: model object if found; else None.
"""
cache = cache or default_cache
reference_key = generate_obj_key(model_type, **vary_by)
key = cache.get(reference_key)
if key:
return cache.get(key)
else:
return None
class CachedQuerysetIter(object):
"""
Iterable that iterates over a cached query set, pulling results from cache.
"""
def __init__(self, cache, model_type, qs_keys):
self.cache = cache
self.model_type = model_type
self.qs_keys = qs_keys
self._local_cache = None
def __iter__(self):
if self._local_cache:
for instance in self._local_cache:
yield instance
else:
local_cache = []
for key in self.qs_keys:
instance = self.cache.get(key)
if not instance:
raise Exception('eek!')
local_cache.append(instance)
yield instance
self._local_cache = local_cache
def get_queryset(model_type, name, cache=None, **vary_by):
"""
Get a stored queryset from cache
:param model_type: model type for building cache key.
:param name: Name of set
:param cache: cache instance to use; defaults to default django cache.
:param vary_by: optional values to vary by.
:return: Iterable object that yields model objects that match
"""
cache = cache or default_cache
key = generate_named_key(model_type, name, **vary_by)
qs_keys = cache.get(key)
if qs_keys:
return CachedQuerysetIter(cache, model_type, qs_keys)
def delete(model_instance, cache=None, force_delete=False, delete_delay=5):
"""
Delete a model instance from cache.
The default method is to explicitly set a None value instead of just deleting to prevent a race condition where:
Thread 1 -> Cache miss, get object from DB
Thread 2 -> Object saved, deleted from cache
Thread 1 -> Store (stale) object fetched from DB in cache
Five second should be more than enough time to prevent this from happening for
a web app.
:param model_instance: the model object to remove.
:param cache: cache instance to use; defaults to default django cache.
:param force_delete: Just delete the key and don't prevent race conditions.
"""
cache = cache or default_cache
key = generate_instance_key(model_instance)
if force_delete:
cache.delete(key)
else:
cache.set(key, None, delete_delay)
def delete_queryset(model_type, name, cache=None, force_delete=False, delete_delay=5):
cache = cache or default_cache
key = generate_named_key(model_type, name)
if force_delete:
cache.delete(key)
else:
cache.set(key, None, delete_delay)
| {
"content_hash": "2c5707ecdce6720daddd87e01ec1b7cc",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 116,
"avg_line_length": 31.692660550458715,
"alnum_prop": 0.6482848458532349,
"repo_name": "timsavage/django-squirrel",
"id": "854185d950d41041bf522da8a5bdfe4d8e1becfa",
"size": "6933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "squirrel/model_cache.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20303"
}
],
"symlink_target": ""
} |
import unittest
import os, sys, commands
import comm
class TestSecurityFunctions(unittest.TestCase):
def test_permission_chinese(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_chinese_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
def test_permission_noapi(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_noapi_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
def test_permission_null(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_null_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
comm.gen_pkg(cmd, self)
def test_permission_splite(self):
comm.setUp()
manifestPath = comm.ConstPath + "/../testapp/permission_field_splite_tests/manifest.json"
cmd = "python %smake_apk.py --package=org.xwalk.example --arch=%s --mode=%s --manifest=%s" % \
(comm.Pck_Tools, comm.ARCH, comm.MODE, manifestPath)
packInfo = commands.getstatusoutput(cmd)
self.assertNotEquals(0, packInfo[0])
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "95643db310544c09028b47606b45b644",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 102,
"avg_line_length": 46.60526315789474,
"alnum_prop": 0.639751552795031,
"repo_name": "YongseopKim/crosswalk-test-suite",
"id": "46487f49fddc8dc51992423c642ad1680373b872",
"size": "3330",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "wrt/wrt-security-android-tests/security/permissiontest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3495"
},
{
"name": "CSS",
"bytes": "752102"
},
{
"name": "Erlang",
"bytes": "2850"
},
{
"name": "Java",
"bytes": "256724"
},
{
"name": "JavaScript",
"bytes": "16658061"
},
{
"name": "PHP",
"bytes": "40009"
},
{
"name": "Perl",
"bytes": "1255"
},
{
"name": "Python",
"bytes": "3995313"
},
{
"name": "Shell",
"bytes": "1106334"
},
{
"name": "XSLT",
"bytes": "785898"
}
],
"symlink_target": ""
} |
import pytest
import astropy.coordinates as coord
from ... import fermi
FK5_COORDINATES = coord.SkyCoord(10.68471, 41.26875, unit=('deg', 'deg'))
@pytest.mark.remote_data
def test_FermiLAT_query_async():
result = fermi.core.FermiLAT.query_object_async(
FK5_COORDINATES, energyrange_MeV='1000, 100000',
obsdates='2013-01-01 00:00:00, 2013-01-02 00:00:00')
assert 'https://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/QueryResults.cgi?' in result
@pytest.mark.remote_data
def test_FermiLAT_query():
# Make a query that results in small SC and PH file sizes
result = fermi.core.FermiLAT.query_object(
FK5_COORDINATES, energyrange_MeV='1000, 100000',
obsdates='2013-01-01 00:00:00, 2013-01-02 00:00:00')
# this test might be fragile? I'm not sure how stable the file names are
for rr in result:
assert rr.startswith('https://fermi.gsfc.nasa.gov/FTP/fermi/data/lat/queries/')
assert rr.endswith('_SC00.fits') or rr.endswith('_PH00.fits')
| {
"content_hash": "f47badd1834aa996e1b71008c39421b2",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 87,
"avg_line_length": 38.57692307692308,
"alnum_prop": 0.6919242273180458,
"repo_name": "ceb8/astroquery",
"id": "23d2b92ef3f1a6f808ccdc234600d58b0cdc4989",
"size": "1069",
"binary": false,
"copies": "2",
"ref": "refs/heads/track_master",
"path": "astroquery/fermi/tests/test_fermi_remote.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "756486"
},
{
"name": "Python",
"bytes": "2760787"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_host
short_description: Zabbix host creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix host entries and associated group and template data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name, used to authenticate against the server.
required: true
login_password:
description:
- Zabbix user password.
required: true
http_login_user:
description:
- Basic Auth login
required: false
default: None
version_added: "2.1"
http_login_password:
description:
- Basic Auth password
required: false
default: None
version_added: "2.1"
host_name:
description:
- Name of the host in Zabbix.
- host_name is the unique identifier used and cannot be updated using this module.
required: true
visible_name:
description:
- Visible name of the host in Zabbix.
required: false
version_added: '2.3'
host_groups:
description:
- List of host groups the host is part of.
required: false
link_templates:
description:
- List of templates linked to the host.
required: false
default: None
inventory_mode:
description:
- Configure the inventory mode.
choices: ['automatic', 'manual', 'disabled']
required: false
default: None
version_added: '2.1'
status:
description:
- Monitoring status of the host.
required: false
choices: ['enabled', 'disabled']
default: "enabled"
state:
description:
- State of the host.
- On C(present), it will create if host does not exist or update the host if the associated data is different.
- On C(absent) will remove a host if it exists.
required: false
choices: ['present', 'absent']
default: "present"
timeout:
description:
- The timeout of API request (seconds).
default: 10
proxy:
description:
- The name of the Zabbix Proxy to be used
default: None
interfaces:
description:
- List of interfaces to be created for the host (see example below).
- 'Available values are: dns, ip, main, port, type and useip.'
- Please review the interface documentation for more information on the supported properties
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
required: false
default: []
force:
description:
- Overwrite the host configuration, even if already present
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "2.0"
'''
EXAMPLES = '''
- name: Create a new host or update an existing host's info
local_action:
module: zabbix_host
server_url: http://monitor.example.com
login_user: username
login_password: password
host_name: ExampleHost
visible_name: ExampleName
host_groups:
- Example group1
- Example group2
link_templates:
- Example template1
- Example template2
status: enabled
state: present
inventory_mode: automatic
interfaces:
- type: 1
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 10050
- type: 4
main: 1
useip: 1
ip: 10.xx.xx.xx
dns: ""
port: 12345
proxy: a.zabbix.proxy
'''
import copy
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, no higher version so far),
# it does not support the 'hostinterface' api calls,
# so we have to inherit the ZabbixAPI class to add 'hostinterface' support.
class ZabbixAPIExtends(ZabbixAPI):
hostinterface = None
def __init__(self, server, timeout, user, passwd, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout, user=user, passwd=passwd)
self.hostinterface = ZabbixAPISubClass(self, dict({"prefix": "hostinterface"}, **kwargs))
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
from ansible.module_utils.basic import AnsibleModule
class Host(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# exist host
def is_host_exist(self, host_name):
result = self._zapi.host.get({'filter': {'host': host_name}})
return result
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" % group_name)
return True
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get({'output': 'extend', 'filter': {'host': template}})
if len(template_list) < 1:
self._module.fail_json(msg="Template not found: %s" % template)
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_host(self, host_name, group_ids, status, interfaces, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'host': host_name, 'interfaces': interfaces, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
host_list = self._zapi.host.create(parameters)
if len(host_list) >= 1:
return host_list['hostids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create host %s: %s" % (host_name, e))
def update_host(self, host_name, group_ids, status, host_id, interfaces, exist_interface_list, proxy_id, visible_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
parameters = {'hostid': host_id, 'groups': group_ids, 'status': status}
if proxy_id:
parameters['proxy_hostid'] = proxy_id
if visible_name:
parameters['name'] = visible_name
self._zapi.host.update(parameters)
interface_list_copy = exist_interface_list
if interfaces:
for interface in interfaces:
flag = False
interface_str = interface
for exist_interface in exist_interface_list:
interface_type = interface['type']
exist_interface_type = int(exist_interface['type'])
if interface_type == exist_interface_type:
# update
interface_str['interfaceid'] = exist_interface['interfaceid']
self._zapi.hostinterface.update(interface_str)
flag = True
interface_list_copy.remove(exist_interface)
break
if not flag:
# add
interface_str['hostid'] = host_id
self._zapi.hostinterface.create(interface_str)
# remove
remove_interface_ids = []
for remove_interface in interface_list_copy:
interface_id = remove_interface['interfaceid']
remove_interface_ids.append(interface_id)
if len(remove_interface_ids) > 0:
self._zapi.hostinterface.delete(remove_interface_ids)
except Exception as e:
self._module.fail_json(msg="Failed to update host %s: %s" % (host_name, e))
def delete_host(self, host_id, host_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.delete([host_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete host %s: %s" % (host_name, e))
# get host by host name
def get_host_by_host_name(self, host_name):
host_list = self._zapi.host.get({'output': 'extend', 'filter': {'host': [host_name]}})
if len(host_list) < 1:
self._module.fail_json(msg="Host not found: %s" % host_name)
else:
return host_list[0]
# get proxyid by proxy name
def get_proxyid_by_proxy_name(self, proxy_name):
proxy_list = self._zapi.proxy.get({'output': 'extend', 'filter': {'host': [proxy_name]}})
if len(proxy_list) < 1:
self._module.fail_json(msg="Proxy not found: %s" % proxy_name)
else:
return proxy_list[0]['proxyid']
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
# get host templates by host id
def get_host_templates_by_host_id(self, host_id):
template_ids = []
template_list = self._zapi.template.get({'output': 'extend', 'hostids': host_id})
for template in template_list:
template_ids.append(template['templateid'])
return template_ids
# get host groups by host id
def get_host_groups_by_host_id(self, host_id):
exist_host_groups = []
host_groups_list = self._zapi.hostgroup.get({'output': 'extend', 'hostids': host_id})
if len(host_groups_list) >= 1:
for host_groups_name in host_groups_list:
exist_host_groups.append(host_groups_name['name'])
return exist_host_groups
# check the exist_interfaces whether it equals the interfaces or not
def check_interface_properties(self, exist_interface_list, interfaces):
interfaces_port_list = []
if interfaces is not None:
if len(interfaces) >= 1:
for interface in interfaces:
interfaces_port_list.append(int(interface['port']))
exist_interface_ports = []
if len(exist_interface_list) >= 1:
for exist_interface in exist_interface_list:
exist_interface_ports.append(int(exist_interface['port']))
if set(interfaces_port_list) != set(exist_interface_ports):
return True
for exist_interface in exist_interface_list:
exit_interface_port = int(exist_interface['port'])
for interface in interfaces:
interface_port = int(interface['port'])
if interface_port == exit_interface_port:
for key in interface.keys():
if str(exist_interface[key]) != str(interface[key]):
return True
return False
# get the status of host by host
def get_host_status_by_host(self, host):
return host['status']
# check all the properties before link or clear template
def check_all_properties(self, host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, host, proxy_id, visible_name):
# get the existing host's groups
exist_host_groups = self.get_host_groups_by_host_id(host_id)
if set(host_groups) != set(exist_host_groups):
return True
# get the existing status
exist_status = self.get_host_status_by_host(host)
if int(status) != int(exist_status):
return True
# check the exist_interfaces whether it equals the interfaces or not
if self.check_interface_properties(exist_interfaces, interfaces):
return True
# get the existing templates
exist_template_ids = self.get_host_templates_by_host_id(host_id)
if set(list(template_ids)) != set(exist_template_ids):
return True
if host['proxy_hostid'] != proxy_id:
return True
if host['name'] != visible_name:
return True
return False
# link or clear template of the host
def link_or_clear_template(self, host_id, template_id_list):
# get host's exist template ids
exist_template_id_list = self.get_host_templates_by_host_id(host_id)
exist_template_ids = set(exist_template_id_list)
template_ids = set(template_id_list)
template_id_list = list(template_ids)
# get unlink and clear templates
templates_clear = exist_template_ids.difference(template_ids)
templates_clear_list = list(templates_clear)
request_str = {'hostid': host_id, 'templates': template_id_list, 'templates_clear': templates_clear_list}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to link template to host: %s" % e)
# Update the host inventory_mode
def update_inventory_mode(self, host_id, inventory_mode):
# nothing was set, do nothing
if not inventory_mode:
return
if inventory_mode == "automatic":
inventory_mode = int(1)
elif inventory_mode == "manual":
inventory_mode = int(0)
elif inventory_mode == "disabled":
inventory_mode = int(-1)
# watch for - https://support.zabbix.com/browse/ZBX-6033
request_str = {'hostid': host_id, 'inventory_mode': inventory_mode}
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.host.update(request_str)
except Exception as e:
self._module.fail_json(msg="Failed to set inventory_mode to host: %s" % e)
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
host_name=dict(type='str', required=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
host_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
status=dict(default="enabled", choices=['enabled', 'disabled']),
state=dict(default="present", choices=['present', 'absent']),
inventory_mode=dict(required=False, choices=['automatic', 'manual', 'disabled']),
timeout=dict(type='int', default=10),
interfaces=dict(type='list', required=False),
force=dict(type='bool', default=True),
proxy=dict(type='str', required=False),
visible_name=dict(type='str', required=False)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing required zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
host_name = module.params['host_name']
visible_name = module.params['visible_name']
host_groups = module.params['host_groups']
link_templates = module.params['link_templates']
inventory_mode = module.params['inventory_mode']
status = module.params['status']
state = module.params['state']
timeout = module.params['timeout']
interfaces = module.params['interfaces']
force = module.params['force']
proxy = module.params['proxy']
# convert enabled to 0; disabled to 1
status = 1 if status == "disabled" else 0
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password)
zbx.login(login_user, login_password)
except Exception as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
host = Host(module, zbx)
template_ids = []
if link_templates:
template_ids = host.get_template_ids(link_templates)
group_ids = []
if host_groups:
group_ids = host.get_group_ids_by_group_names(host_groups)
ip = ""
if interfaces:
for interface in interfaces:
if interface['type'] == 1:
ip = interface['ip']
# check if host exist
is_host_exist = host.is_host_exist(host_name)
if is_host_exist:
# Use proxy specified, or set to None when updating host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = None
# get host id by host name
zabbix_host_obj = host.get_host_by_host_name(host_name)
host_id = zabbix_host_obj['hostid']
if state == "absent":
# remove host
host.delete_host(host_id, host_name)
module.exit_json(changed=True, result="Successfully delete host %s" % host_name)
else:
if not group_ids:
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
if not force:
module.fail_json(changed=False, result="Host present, Can't update configuration without force")
# get exist host's interfaces
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
# update host
interfaces_len = len(interfaces) if interfaces else 0
if len(exist_interfaces) > interfaces_len:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces, zabbix_host_obj, proxy_id, visible_name):
host.link_or_clear_template(host_id, template_ids)
host.update_host(host_name, group_ids, status, host_id,
interfaces, exist_interfaces, proxy_id, visible_name)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if host.check_all_properties(host_id, host_groups, status, interfaces, template_ids,
exist_interfaces_copy, zabbix_host_obj, proxy_id, visible_name):
host.update_host(host_name, group_ids, status, host_id, interfaces, exist_interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True,
result="Successfully update host %s (%s) and linked with template '%s'"
% (host_name, ip, link_templates))
else:
module.exit_json(changed=False)
else:
if state == "absent":
# the host is already deleted.
module.exit_json(changed=False)
# Use proxy specified, or set to 0 when adding new host
if proxy:
proxy_id = host.get_proxyid_by_proxy_name(proxy)
else:
proxy_id = 0
if not group_ids:
module.fail_json(msg="Specify at least one group for creating host '%s'." % host_name)
if not interfaces or (interfaces and len(interfaces) == 0):
module.fail_json(msg="Specify at least one interface for creating host '%s'." % host_name)
# create host
host_id = host.add_host(host_name, group_ids, status, interfaces, proxy_id, visible_name)
host.link_or_clear_template(host_id, template_ids)
host.update_inventory_mode(host_id, inventory_mode)
module.exit_json(changed=True, result="Successfully added host %s (%s) and linked with template '%s'" % (
host_name, ip, link_templates))
if __name__ == '__main__':
main()
| {
"content_hash": "49a18b6116ea3c10d1906742a0060f58",
"timestamp": "",
"source": "github",
"line_count": 566,
"max_line_length": 129,
"avg_line_length": 38.87809187279152,
"alnum_prop": 0.5798227675528289,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "552113451d467965c36534b9488a21a285ec8a29",
"size": "22176",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/modules/monitoring/zabbix_host.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
from supriya.system.SupriyaObject import SupriyaObject
class Moment(SupriyaObject):
"""
A moment of intervals in a interval tree.
"""
### CLASS VARIABLES ###
__slots__ = (
"_interval_tree",
"_overlap_intervals",
"_start_intervals",
"_start_offset",
"_stop_intervals",
)
### INITIALIZER ###
def __init__(
self,
interval_tree=None,
overlap_intervals=None,
start_intervals=None,
start_offset=None,
stop_intervals=None,
):
self._interval_tree = interval_tree
self._start_offset = start_offset
self._start_intervals = start_intervals
self._stop_intervals = stop_intervals
self._overlap_intervals = overlap_intervals
### SPECIAL METHODS ###
def __repr__(self):
"""
Gets the repr of this moment.
"""
return "<{}({} <<{}>>)>".format(
type(self).__name__,
str(self.start_offset),
len(self.start_intervals) + len(self.overlap_intervals),
)
### PUBLIC PROPERTIES ###
@property
def next_moment(self):
"""
Gets the next moment in this moment's interval
collection.
"""
# TODO: This doesn't take into account stop offsets
tree = self._interval_tree
if tree is None:
return None
start_offset = tree.get_start_offset_after(self.start_offset)
if start_offset is None:
return None
return tree.get_moment_at(start_offset)
@property
def next_start_offset(self):
"""
Gets the next moment start offset in this moment's
interval tree.
"""
tree = self._interval_tree
if tree is None:
return None
start_offset = tree.get_start_offset_after(self.start_offset)
return start_offset
@property
def overlap_intervals(self):
"""
Gets the intervals in this moment which overlap this
moment's start offset.
"""
return self._overlap_intervals
@property
def previous_moment(self):
"""
Gets the previous moment in this moment's interval
collection.
"""
# TODO: This doesn't take into account stop offsets
tree = self._interval_tree
if tree is None:
return None
start_offset = tree.get_start_offset_before(self.start_offset)
if start_offset is None:
return None
return tree.get_moment_at(start_offset)
@property
def previous_start_offset(self):
"""
Gets the previous moment start offset in this moment's
interval tree.
"""
tree = self._interval_tree
if tree is None:
return None
start_offset = tree.get_start_offset_before(self.start_offset)
return start_offset
@property
def start_offset(self):
"""
Gets this moment's start offset.
"""
return self._start_offset
@property
def start_intervals(self):
"""
Gets the intervals in this moment which start at this
moment's start offset.
"""
return self._start_intervals
@property
def stop_intervals(self):
"""
Gets the intervals in this moment which stop at this
moment's start offset.
"""
return self._stop_intervals
@property
def interval_tree(self):
"""
Gets this moment's interval tree.
"""
return self._interval_tree
| {
"content_hash": "923c886a06c705884298d07afb281411",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 70,
"avg_line_length": 26,
"alnum_prop": 0.5605976757055894,
"repo_name": "Pulgama/supriya",
"id": "b864ce159873a20092a57c417a1e7a86d473abeb",
"size": "3614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "supriya/intervals/Moment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6712"
},
{
"name": "CSS",
"bytes": "446"
},
{
"name": "HTML",
"bytes": "1083"
},
{
"name": "JavaScript",
"bytes": "6163"
},
{
"name": "Makefile",
"bytes": "6775"
},
{
"name": "Python",
"bytes": "2790612"
},
{
"name": "Shell",
"bytes": "569"
}
],
"symlink_target": ""
} |
import time
from daemon import runner
class App():
def __init__(self):
self.stdin_path = '/dev/stdin'
self.stdout_path = '/dev/stdout'
self.stderr_path = '/dev/stderr'
self.pidfile_path = '/tmp/foo.pid'
self.pidfile_timeout = 5
def run(self):
while True:
print("Howdy! Gig'em! Whoop!")
time.sleep(10)
app = App()
daemon_runner = runner.DaemonRunner(app)
daemon_runner.do_action()
| {
"content_hash": "68e4dc9e4cc2ad0accfceb6fac72837c",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 44,
"avg_line_length": 23.3,
"alnum_prop": 0.5793991416309013,
"repo_name": "dmwesterhoff/slackd",
"id": "ab616e68fead72edff52ad506b71bb497cf1f797",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slackd/slackd_runner.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11747"
}
],
"symlink_target": ""
} |
from __future__ import annotations
import warnings
from typing import Sequence
from flask import g
from sqlalchemy import or_
from sqlalchemy.orm import joinedload
from airflow.exceptions import AirflowException, RemovedInAirflow3Warning
from airflow.models import DagBag, DagModel
from airflow.security import permissions
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import provide_session
from airflow.www.fab_security.sqla.manager import SecurityManager
from airflow.www.fab_security.sqla.models import Permission, Resource, Role, User
from airflow.www.fab_security.views import (
ActionModelView,
CustomResetMyPasswordView,
CustomResetPasswordView,
CustomRoleModelView,
CustomUserDBModelView,
CustomUserInfoEditView,
CustomUserLDAPModelView,
CustomUserOAuthModelView,
CustomUserOIDModelView,
CustomUserRemoteUserModelView,
CustomUserStatsChartView,
PermissionPairModelView,
ResourceModelView,
)
from airflow.www.utils import CustomSQLAInterface
EXISTING_ROLES = {
"Admin",
"Viewer",
"User",
"Op",
"Public",
}
class AirflowSecurityManager(SecurityManager, LoggingMixin):
"""Custom security manager, which introduces a permission model adapted to Airflow"""
###########################################################################
# PERMISSIONS
###########################################################################
# [START security_viewer_perms]
VIEWER_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_CODE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_IMPORT_ERROR),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG_WARNING),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_MY_PROFILE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_LOG),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_BROWSE_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_DEPENDENCIES),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DATASET),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_DOCS_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_JOB),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_AUDIT_LOG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_PLUGIN),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_SLA_MISS),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_INSTANCE),
]
# [END security_viewer_perms]
# [START security_user_perms]
USER_PERMISSIONS = [
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_TASK_INSTANCE),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG_RUN),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG_RUN),
]
# [END security_user_perms]
# [START security_op_perms]
OP_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_ADMIN_MENU),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONFIG),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_XCOM),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_CONNECTION),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_POOL),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PROVIDER),
(permissions.ACTION_CAN_CREATE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_VARIABLE),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_XCOM),
]
# [END security_op_perms]
ADMIN_PERMISSIONS = [
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TASK_RESCHEDULE),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_ACCESS_MENU, permissions.RESOURCE_TRIGGER),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_PASSWORD),
(permissions.ACTION_CAN_READ, permissions.RESOURCE_ROLE),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_ROLE),
]
# global resource for dag-level access
DAG_RESOURCES = {permissions.RESOURCE_DAG}
DAG_ACTIONS = permissions.DAG_ACTIONS
###########################################################################
# DEFAULT ROLE CONFIGURATIONS
###########################################################################
ROLE_CONFIGS = [
{"role": "Public", "perms": []},
{"role": "Viewer", "perms": VIEWER_PERMISSIONS},
{
"role": "User",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS,
},
{
"role": "Op",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS,
},
{
"role": "Admin",
"perms": VIEWER_PERMISSIONS + USER_PERMISSIONS + OP_PERMISSIONS + ADMIN_PERMISSIONS,
},
]
actionmodelview = ActionModelView
permissionmodelview = PermissionPairModelView
rolemodelview = CustomRoleModelView
resourcemodelview = ResourceModelView
userdbmodelview = CustomUserDBModelView
resetmypasswordview = CustomResetMyPasswordView
resetpasswordview = CustomResetPasswordView
userinfoeditview = CustomUserInfoEditView
userldapmodelview = CustomUserLDAPModelView
useroauthmodelview = CustomUserOAuthModelView
userremoteusermodelview = CustomUserRemoteUserModelView
useroidmodelview = CustomUserOIDModelView
userstatschartview = CustomUserStatsChartView
def __init__(self, appbuilder):
super().__init__(appbuilder)
# Go and fix up the SQLAInterface used from the stock one to our subclass.
# This is needed to support the "hack" where we had to edit
# FieldConverter.conversion_table in place in airflow.www.utils
for attr in dir(self):
if not attr.endswith("view"):
continue
view = getattr(self, attr, None)
if not view or not getattr(view, "datamodel", None):
continue
view.datamodel = CustomSQLAInterface(view.datamodel.obj)
self.perms = None
def _get_root_dag_id(self, dag_id):
if "." in dag_id:
dm = (
self.get_session.query(DagModel.dag_id, DagModel.root_dag_id)
.filter(DagModel.dag_id == dag_id)
.first()
)
return dm.root_dag_id or dm.dag_id
return dag_id
def init_role(self, role_name, perms):
"""
Initialize the role with actions and related resources.
:param role_name:
:param perms:
:return:
"""
warnings.warn(
"`init_role` has been deprecated. Please use `bulk_sync_roles` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
self.bulk_sync_roles([{"role": role_name, "perms": perms}])
def bulk_sync_roles(self, roles):
"""Sync the provided roles and permissions."""
existing_roles = self._get_all_roles_with_permissions()
non_dag_perms = self._get_all_non_dag_permissions()
for config in roles:
role_name = config["role"]
perms = config["perms"]
role = existing_roles.get(role_name) or self.add_role(role_name)
for action_name, resource_name in perms:
perm = non_dag_perms.get((action_name, resource_name)) or self.create_permission(
action_name, resource_name
)
if perm not in role.permissions:
self.add_permission_to_role(role, perm)
def delete_role(self, role_name):
"""
Delete the given Role
:param role_name: the name of a role in the ab_role table
"""
session = self.get_session
role = session.query(Role).filter(Role.name == role_name).first()
if role:
self.log.info("Deleting role '%s'", role_name)
session.delete(role)
session.commit()
else:
raise AirflowException(f"Role named '{role_name}' does not exist")
@staticmethod
def get_user_roles(user=None):
"""
Get all the roles associated with the user.
:param user: the ab_user in FAB model.
:return: a list of roles associated with the user.
"""
if user is None:
user = g.user
return user.roles
def get_readable_dags(self, user):
"""Gets the DAGs readable by authenticated user."""
warnings.warn(
"`get_readable_dags` has been deprecated. Please use `get_readable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_READ], user)
def get_editable_dags(self, user):
"""Gets the DAGs editable by authenticated user."""
warnings.warn(
"`get_editable_dags` has been deprecated. Please use `get_editable_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
with warnings.catch_warnings():
warnings.simplefilter("ignore", RemovedInAirflow3Warning)
return self.get_accessible_dags([permissions.ACTION_CAN_EDIT], user)
@provide_session
def get_accessible_dags(self, user_actions, user, session=None):
warnings.warn(
"`get_accessible_dags` has been deprecated. Please use `get_accessible_dag_ids` instead.",
RemovedInAirflow3Warning,
stacklevel=3,
)
dag_ids = self.get_accessible_dag_ids(user, user_actions, session)
return session.query(DagModel).filter(DagModel.dag_id.in_(dag_ids))
def get_readable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs readable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_READ])
def get_editable_dag_ids(self, user) -> set[str]:
"""Gets the DAG IDs editable by authenticated user."""
return self.get_accessible_dag_ids(user, [permissions.ACTION_CAN_EDIT])
@provide_session
def get_accessible_dag_ids(self, user, user_actions=None, session=None) -> set[str]:
"""Generic function to get readable or writable DAGs for user."""
if not user_actions:
user_actions = [permissions.ACTION_CAN_EDIT, permissions.ACTION_CAN_READ]
if user.is_anonymous:
roles = user.roles
else:
user_query = (
session.query(User)
.options(
joinedload(User.roles)
.subqueryload(Role.permissions)
.options(joinedload(Permission.action), joinedload(Permission.resource))
)
.filter(User.id == user.id)
.first()
)
roles = user_query.roles
resources = set()
for role in roles:
for permission in role.permissions:
action = permission.action.name
if action not in user_actions:
continue
resource = permission.resource.name
if resource == permissions.RESOURCE_DAG:
return {dag.dag_id for dag in session.query(DagModel.dag_id)}
if resource.startswith(permissions.RESOURCE_DAG_PREFIX):
resources.add(resource[len(permissions.RESOURCE_DAG_PREFIX) :])
else:
resources.add(resource)
return {dag.dag_id for dag in session.query(DagModel.dag_id).filter(DagModel.dag_id.in_(resources))}
def can_access_some_dags(self, action: str, dag_id: str | None = None) -> bool:
"""Checks if user has read or write access to some dags."""
if dag_id and dag_id != "~":
root_dag_id = self._get_root_dag_id(dag_id)
return self.has_access(action, permissions.resource_name_for_dag(root_dag_id))
user = g.user
if action == permissions.ACTION_CAN_READ:
return any(self.get_readable_dag_ids(user))
return any(self.get_editable_dag_ids(user))
def can_read_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG read access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_READ, dag_resource_name, user=user)
def can_edit_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG edit access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_EDIT, dag_resource_name, user=user)
def can_delete_dag(self, dag_id, user=None) -> bool:
"""Determines whether a user has DAG delete access."""
root_dag_id = self._get_root_dag_id(dag_id)
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
return self.has_access(permissions.ACTION_CAN_DELETE, dag_resource_name, user=user)
def prefixed_dag_id(self, dag_id):
"""Returns the permission name for a DAG id."""
warnings.warn(
"`prefixed_dag_id` has been deprecated. "
"Please use `airflow.security.permissions.resource_name_for_dag` instead.",
RemovedInAirflow3Warning,
stacklevel=2,
)
root_dag_id = self._get_root_dag_id(dag_id)
return permissions.resource_name_for_dag(root_dag_id)
def is_dag_resource(self, resource_name):
"""Determines if a resource belongs to a DAG or all DAGs."""
if resource_name == permissions.RESOURCE_DAG:
return True
return resource_name.startswith(permissions.RESOURCE_DAG_PREFIX)
def has_access(self, action_name, resource_name, user=None) -> bool:
"""
Verify whether a given user could perform a certain action
(e.g can_read, can_write, can_delete) on the given resource.
:param action_name: action_name on resource (e.g can_read, can_edit).
:param resource_name: name of view-menu or resource.
:param user: user name
:return: Whether user could perform certain action on the resource.
:rtype bool
"""
if not user:
user = g.user
if (action_name, resource_name) in user.perms:
return True
if self.is_dag_resource(resource_name):
if (action_name, permissions.RESOURCE_DAG) in user.perms:
return True
return (action_name, resource_name) in user.perms
return False
def _has_role(self, role_name_or_list, user):
"""Whether the user has this role name"""
if not isinstance(role_name_or_list, list):
role_name_or_list = [role_name_or_list]
return any(r.name in role_name_or_list for r in user.roles)
def has_all_dags_access(self, user):
"""
Has all the dag access in any of the 3 cases:
1. Role needs to be in (Admin, Viewer, User, Op).
2. Has can_read action on dags resource.
3. Has can_edit action on dags resource.
"""
if not user:
user = g.user
return (
self._has_role(["Admin", "Viewer", "Op", "User"], user)
or self.has_access(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG, user)
or self.has_access(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG, user)
)
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
self.log.debug("Cleaning faulty perms")
sesh = self.get_session
perms = sesh.query(Permission).filter(
or_(
Permission.action == None, # noqa
Permission.resource == None, # noqa
)
)
# Since FAB doesn't define ON DELETE CASCADE on these tables, we need
# to delete the _object_ so that SQLA knows to delete the many-to-many
# relationship object too. :(
deleted_count = 0
for perm in perms:
sesh.delete(perm)
deleted_count += 1
sesh.commit()
if deleted_count:
self.log.info("Deleted %s faulty permissions", deleted_count)
def _merge_perm(self, action_name, resource_name):
"""
Add the new (action, resource) to assoc_permission_role if it doesn't exist.
It will add the related entry to ab_permission and ab_resource two meta tables as well.
:param action_name: Name of the action
:param resource_name: Name of the resource
:return:
"""
action = self.get_action(action_name)
resource = self.get_resource(resource_name)
perm = None
if action and resource:
perm = (
self.get_session.query(self.permission_model)
.filter_by(action=action, resource=resource)
.first()
)
if not perm and action_name and resource_name:
self.create_permission(action_name, resource_name)
def add_homepage_access_to_custom_roles(self):
"""
Add Website.can_read access to all custom roles.
:return: None.
"""
website_permission = self.create_permission(permissions.ACTION_CAN_READ, permissions.RESOURCE_WEBSITE)
custom_roles = [role for role in self.get_all_roles() if role.name not in EXISTING_ROLES]
for role in custom_roles:
self.add_permission_to_role(role, website_permission)
self.get_session.commit()
def get_all_permissions(self) -> set[tuple[str, str]]:
"""Returns all permissions as a set of tuples with the action and resource names"""
return set(
self.get_session.query(self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.with_entities(self.action_model.name, self.resource_model.name)
.all()
)
def _get_all_non_dag_permissions(self) -> dict[tuple[str, str], Permission]:
"""
Returns a dict with a key of (action_name, resource_name) and value of permission
with all permissions except those that are for specific DAGs.
"""
return {
(action_name, resource_name): viewmodel
for action_name, resource_name, viewmodel in (
self.get_session.query(self.permission_model)
.join(self.permission_model.action)
.join(self.permission_model.resource)
.filter(~self.resource_model.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.with_entities(self.action_model.name, self.resource_model.name, self.permission_model)
.all()
)
}
def _get_all_roles_with_permissions(self) -> dict[str, Role]:
"""Returns a dict with a key of role name and value of role with early loaded permissions"""
return {
r.name: r
for r in (
self.get_session.query(self.role_model).options(joinedload(self.role_model.permissions)).all()
)
}
def create_dag_specific_permissions(self) -> None:
"""
Creates 'can_read', 'can_edit', and 'can_delete' permissions for all
DAGs, along with any `access_control` permissions provided in them.
This does iterate through ALL the DAGs, which can be slow. See `sync_perm_for_dag`
if you only need to sync a single DAG.
:return: None.
"""
perms = self.get_all_permissions()
dagbag = DagBag(read_dags_from_db=True)
dagbag.collect_dags_from_db()
dags = dagbag.dags.values()
for dag in dags:
root_dag_id = dag.parent_dag.dag_id if dag.parent_dag else dag.dag_id
dag_resource_name = permissions.resource_name_for_dag(root_dag_id)
for action_name in self.DAG_ACTIONS:
if (action_name, dag_resource_name) not in perms:
self._merge_perm(action_name, dag_resource_name)
if dag.access_control:
self.sync_perm_for_dag(dag_resource_name, dag.access_control)
def update_admin_permission(self):
"""
Admin should have all the permissions, except the dag permissions.
because Admin already has Dags permission.
Add the missing ones to the table for admin.
:return: None.
"""
dag_resources = (
self.get_session.query(Resource)
.filter(Resource.name.like(f"{permissions.RESOURCE_DAG_PREFIX}%"))
.all()
)
resource_ids = [resource.id for resource in dag_resources]
perms = self.get_session.query(Permission).filter(~Permission.resource_id.in_(resource_ids)).all()
perms = [p for p in perms if p.action and p.resource]
admin = self.find_role("Admin")
admin.permissions = list(set(admin.permissions) | set(perms))
self.get_session.commit()
def sync_roles(self):
"""
1. Init the default role(Admin, Viewer, User, Op, public)
with related permissions.
2. Init the custom role(dag-user) with related permissions.
:return: None.
"""
# Create global all-dag permissions
self.create_perm_vm_for_all_dag()
# Sync the default roles (Admin, Viewer, User, Op, public) with related permissions
self.bulk_sync_roles(self.ROLE_CONFIGS)
self.add_homepage_access_to_custom_roles()
# init existing roles, the rest role could be created through UI.
self.update_admin_permission()
self.clean_perms()
def sync_resource_permissions(self, perms=None):
"""Populates resource-based permissions."""
if not perms:
return
for action_name, resource_name in perms:
self.create_resource(resource_name)
self.create_permission(action_name, resource_name)
def sync_perm_for_dag(self, dag_id, access_control=None):
"""
Sync permissions for given dag id. The dag id surely exists in our dag bag
as only / refresh button or DagBag will call this function
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g.,
{'can_read'}
:return:
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
for dag_action_name in self.DAG_ACTIONS:
self.create_permission(dag_action_name, dag_resource_name)
if access_control:
self._sync_dag_view_permissions(dag_resource_name, access_control)
def _sync_dag_view_permissions(self, dag_id, access_control):
"""
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:param access_control: a dict where each key is a rolename and
each value is a set() of action names (e.g. {'can_read'})
"""
dag_resource_name = permissions.resource_name_for_dag(dag_id)
def _get_or_create_dag_permission(action_name: str) -> Permission | None:
perm = self.get_permission(action_name, dag_resource_name)
if not perm:
self.log.info("Creating new action '%s' on resource '%s'", action_name, dag_resource_name)
perm = self.create_permission(action_name, dag_resource_name)
return perm
def _revoke_stale_permissions(resource: Resource):
existing_dag_perms = self.get_resource_permissions(resource)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role if role.name != "Admin"]
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.action.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.action,
dag_resource_name,
role.name,
)
self.remove_permission_from_role(role, perm)
resource = self.get_resource(dag_resource_name)
if resource:
_revoke_stale_permissions(resource)
for rolename, action_names in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
f"The access_control mapping for DAG '{dag_id}' includes a role named "
f"'{rolename}', but that role does not exist"
)
action_names = set(action_names)
invalid_action_names = action_names - self.DAG_ACTIONS
if invalid_action_names:
raise AirflowException(
f"The access_control map for DAG '{dag_resource_name}' includes "
f"the following invalid permissions: {invalid_action_names}; "
f"The set of valid permissions is: {self.DAG_ACTIONS}"
)
for action_name in action_names:
dag_perm = _get_or_create_dag_permission(action_name)
if dag_perm:
self.add_permission_to_role(role, dag_perm)
def create_perm_vm_for_all_dag(self):
"""Create perm-vm if not exist and insert into FAB security model for all-dags."""
# create perm for global logical dag
for resource_name in self.DAG_RESOURCES:
for action_name in self.DAG_ACTIONS:
self._merge_perm(action_name, resource_name)
def check_authorization(
self, perms: Sequence[tuple[str, str]] | None = None, dag_id: str | None = None
) -> bool:
"""Checks that the logged in user has the specified permissions."""
if not perms:
return True
for perm in perms:
if perm in (
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
):
can_access_all_dags = self.has_access(*perm)
if can_access_all_dags:
continue
action = perm[0]
if self.can_access_some_dags(action, dag_id):
continue
return False
elif not self.has_access(*perm):
return False
return True
class ApplessAirflowSecurityManager(AirflowSecurityManager):
"""Security Manager that doesn't need the whole flask app"""
def __init__(self, session=None):
self.session = session
@property
def get_session(self):
return self.session
| {
"content_hash": "74f984b601716132411815e583f6e84b",
"timestamp": "",
"source": "github",
"line_count": 715,
"max_line_length": 110,
"avg_line_length": 42.13566433566434,
"alnum_prop": 0.6158263351810668,
"repo_name": "apache/airflow",
"id": "ce75970b14907e48a4827f51beac836a15ceac92",
"size": "30914",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/www/security.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
"""Compute Linearly constrained minimum variance (LCMV) beamformer."""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Roman Goj <roman.goj@gmail.com>
# Britta Westner <britta.wstnr@gmail.com>
#
# License: BSD-3-Clause
import numpy as np
from ..rank import compute_rank
from ..io.meas_info import _simplify_info
from ..io.pick import pick_channels_cov, pick_info
from ..forward import _subject_from_forward
from ..minimum_norm.inverse import combine_xyz, _check_reference, _check_depth
from ..source_estimate import _make_stc, _get_src_type
from ..utils import (logger, verbose, _check_channels_spatial_filter,
_check_one_ch_type, _check_info_inv)
from ._compute_beamformer import (
_prepare_beamformer_input, _compute_power,
_compute_beamformer, _check_src_type, Beamformer, _proj_whiten_data)
@verbose
def make_lcmv(info, forward, data_cov, reg=0.05, noise_cov=None, label=None,
pick_ori=None, rank='info',
weight_norm='unit-noise-gain-invariant',
reduce_rank=False, depth=None, inversion='matrix', verbose=None):
"""Compute LCMV spatial filter.
Parameters
----------
%(info_not_none)s
Specifies the channels to include. Bad channels (in ``info['bads']``)
are not used.
forward : instance of Forward
Forward operator.
data_cov : instance of Covariance
The data covariance.
reg : float
The regularization for the whitened data covariance.
noise_cov : instance of Covariance
The noise covariance. If provided, whitening will be done. Providing a
noise covariance is mandatory if you mix sensor types, e.g.
gradiometers with magnetometers or EEG with MEG.
.. note::
If ``noise_cov`` is ``None`` and ``weight_norm='unit-noise-gain'``,
the unit noise is assumed to be 1 in SI units, e.g., 1 T for
magnetometers, 1 V for EEG, so resulting amplitudes will be tiny.
Consider using :func:`mne.make_ad_hoc_cov` to provide a
``noise_cov`` to set noise values that are more reasonable for
neural data or using ``weight_norm='nai'`` for weight-normalized
beamformer output that is scaled by a noise estimate.
label : instance of Label
Restricts the LCMV solution to a given label.
%(pick_ori_bf)s
- ``'vector'``
Keeps the currents for each direction separate
%(rank_info)s
%(weight_norm)s
Defaults to ``'unit-noise-gain-invariant'``.
%(reduce_rank)s
%(depth)s
.. versionadded:: 0.18
%(inversion_bf)s
.. versionadded:: 0.21
%(verbose)s
Returns
-------
filters : instance of Beamformer
Dictionary containing filter weights from LCMV beamformer.
Contains the following keys:
'kind' : str
The type of beamformer, in this case 'LCMV'.
'weights' : array
The filter weights of the beamformer.
'data_cov' : instance of Covariance
The data covariance matrix used to compute the beamformer.
'noise_cov' : instance of Covariance | None
The noise covariance matrix used to compute the beamformer.
'whitener' : None | ndarray, shape (n_channels, n_channels)
Whitening matrix, provided if whitening was applied to the
covariance matrix and leadfield during computation of the
beamformer weights.
'weight_norm' : str | None
Type of weight normalization used to compute the filter
weights.
'pick-ori' : None | 'max-power' | 'normal' | 'vector'
The orientation in which the beamformer filters were computed.
'ch_names' : list of str
Channels used to compute the beamformer.
'proj' : array
Projections used to compute the beamformer.
'is_ssp' : bool
If True, projections were applied prior to filter computation.
'vertices' : list
Vertices for which the filter weights were computed.
'is_free_ori' : bool
If True, the filter was computed with free source orientation.
'n_sources' : int
Number of source location for which the filter weight were
computed.
'src_type' : str
Type of source space.
'source_nn' : ndarray, shape (n_sources, 3)
For each source location, the surface normal.
'proj' : ndarray, shape (n_channels, n_channels)
Projections used to compute the beamformer.
'subject' : str
The subject ID.
'rank' : int
The rank of the data covariance matrix used to compute the
beamformer weights.
'max-power-ori' : ndarray, shape (n_sources, 3) | None
When pick_ori='max-power', this fields contains the estimated
direction of maximum power at each source location.
'inversion' : 'single' | 'matrix'
Whether the spatial filters were computed for each dipole
separately or jointly for all dipoles at each vertex using a
matrix inversion.
Notes
-----
The original reference is :footcite:`VanVeenEtAl1997`.
To obtain the Sekihara unit-noise-gain vector beamformer, you should use
``weight_norm='unit-noise-gain', pick_ori='vector'`` followed by
:meth:`vec_stc.project('pca', src) <mne.VectorSourceEstimate.project>`.
.. versionchanged:: 0.21
The computations were extensively reworked, and the default for
``weight_norm`` was set to ``'unit-noise-gain-invariant'``.
References
----------
.. footbibliography::
"""
# check number of sensor types present in the data and ensure a noise cov
info = _simplify_info(info)
noise_cov, _, allow_mismatch = _check_one_ch_type(
'lcmv', info, forward, data_cov, noise_cov)
# XXX we need this extra picking step (can't just rely on minimum norm's
# because there can be a mismatch. Should probably add an extra arg to
# _prepare_beamformer_input at some point (later)
picks = _check_info_inv(info, forward, data_cov, noise_cov)
info = pick_info(info, picks)
data_rank = compute_rank(data_cov, rank=rank, info=info)
noise_rank = compute_rank(noise_cov, rank=rank, info=info)
for key in data_rank:
if (key not in noise_rank or data_rank[key] != noise_rank[key]) and \
not allow_mismatch:
raise ValueError('%s data rank (%s) did not match the noise '
'rank (%s)'
% (key, data_rank[key],
noise_rank.get(key, None)))
del noise_rank
rank = data_rank
logger.info('Making LCMV beamformer with rank %s' % (rank,))
del data_rank
depth = _check_depth(depth, 'depth_sparse')
if inversion == 'single':
depth['combine_xyz'] = False
is_free_ori, info, proj, vertno, G, whitener, nn, orient_std = \
_prepare_beamformer_input(
info, forward, label, pick_ori, noise_cov=noise_cov, rank=rank,
pca=False, **depth)
ch_names = list(info['ch_names'])
data_cov = pick_channels_cov(data_cov, include=ch_names)
Cm = data_cov._get_square()
if 'estimator' in data_cov:
del data_cov['estimator']
rank_int = sum(rank.values())
del rank
# compute spatial filter
n_orient = 3 if is_free_ori else 1
W, max_power_ori = _compute_beamformer(
G, Cm, reg, n_orient, weight_norm, pick_ori, reduce_rank, rank_int,
inversion=inversion, nn=nn, orient_std=orient_std,
whitener=whitener)
# get src type to store with filters for _make_stc
src_type = _get_src_type(forward['src'], vertno)
# get subject to store with filters
subject_from = _subject_from_forward(forward)
# Is the computed beamformer a scalar or vector beamformer?
is_free_ori = is_free_ori if pick_ori in [None, 'vector'] else False
is_ssp = bool(info['projs'])
filters = Beamformer(
kind='LCMV', weights=W, data_cov=data_cov, noise_cov=noise_cov,
whitener=whitener, weight_norm=weight_norm, pick_ori=pick_ori,
ch_names=ch_names, proj=proj, is_ssp=is_ssp, vertices=vertno,
is_free_ori=is_free_ori, n_sources=forward['nsource'],
src_type=src_type, source_nn=forward['source_nn'].copy(),
subject=subject_from, rank=rank_int, max_power_ori=max_power_ori,
inversion=inversion)
return filters
def _apply_lcmv(data, filters, info, tmin):
"""Apply LCMV spatial filter to data for source reconstruction."""
if isinstance(data, np.ndarray) and data.ndim == 2:
data = [data]
return_single = True
else:
return_single = False
W = filters['weights']
for i, M in enumerate(data):
if len(M) != len(filters['ch_names']):
raise ValueError('data and picks must have the same length')
if not return_single:
logger.info("Processing epoch : %d" % (i + 1))
M = _proj_whiten_data(M, info['projs'], filters)
# project to source space using beamformer weights
vector = False
if filters['is_free_ori']:
sol = np.dot(W, M)
if filters['pick_ori'] == 'vector':
vector = True
else:
logger.info('combining the current components...')
sol = combine_xyz(sol)
else:
# Linear inverse: do computation here or delayed
if (M.shape[0] < W.shape[0] and
filters['pick_ori'] != 'max-power'):
sol = (W, M)
else:
sol = np.dot(W, M)
tstep = 1.0 / info['sfreq']
# compatibility with 0.16, add src_type as None if not present:
filters, warn_text = _check_src_type(filters)
yield _make_stc(sol, vertices=filters['vertices'], tmin=tmin,
tstep=tstep, subject=filters['subject'],
vector=vector, source_nn=filters['source_nn'],
src_type=filters['src_type'], warn_text=warn_text)
logger.info('[done]')
@verbose
def apply_lcmv(evoked, filters, *, verbose=None):
"""Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights.
Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights
on evoked data.
Parameters
----------
evoked : Evoked
Evoked data to invert.
filters : instance of Beamformer
LCMV spatial filter (beamformer weights).
Filter weights returned from :func:`make_lcmv`.
%(verbose)s
Returns
-------
stc : SourceEstimate | VolSourceEstimate | VectorSourceEstimate
Source time courses.
See Also
--------
make_lcmv, apply_lcmv_raw, apply_lcmv_epochs, apply_lcmv_cov
Notes
-----
.. versionadded:: 0.18
"""
_check_reference(evoked)
info = evoked.info
data = evoked.data
tmin = evoked.times[0]
sel = _check_channels_spatial_filter(evoked.ch_names, filters)
data = data[sel]
stc = _apply_lcmv(data=data, filters=filters, info=info,
tmin=tmin)
return next(stc)
@verbose
def apply_lcmv_epochs(epochs, filters, *, return_generator=False,
verbose=None):
"""Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights.
Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights
on single trial data.
Parameters
----------
epochs : Epochs
Single trial epochs.
filters : instance of Beamformer
LCMV spatial filter (beamformer weights)
Filter weights returned from :func:`make_lcmv`.
return_generator : bool
Return a generator object instead of a list. This allows iterating
over the stcs without having to keep them all in memory.
%(verbose)s
Returns
-------
stc: list | generator of (SourceEstimate | VolSourceEstimate)
The source estimates for all epochs.
See Also
--------
make_lcmv, apply_lcmv_raw, apply_lcmv, apply_lcmv_cov
"""
_check_reference(epochs)
info = epochs.info
tmin = epochs.times[0]
sel = _check_channels_spatial_filter(epochs.ch_names, filters)
data = epochs.get_data()[:, sel, :]
stcs = _apply_lcmv(data=data, filters=filters, info=info,
tmin=tmin)
if not return_generator:
stcs = [s for s in stcs]
return stcs
@verbose
def apply_lcmv_raw(raw, filters, start=None, stop=None, *, verbose=None):
"""Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights.
Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights
on raw data.
Parameters
----------
raw : mne.io.Raw
Raw data to invert.
filters : instance of Beamformer
LCMV spatial filter (beamformer weights).
Filter weights returned from :func:`make_lcmv`.
start : int
Index of first time sample (index not time is seconds).
stop : int
Index of first time sample not to include (index not time is seconds).
%(verbose)s
Returns
-------
stc : SourceEstimate | VolSourceEstimate
Source time courses.
See Also
--------
make_lcmv, apply_lcmv_epochs, apply_lcmv, apply_lcmv_cov
"""
_check_reference(raw)
info = raw.info
sel = _check_channels_spatial_filter(raw.ch_names, filters)
data, times = raw[sel, start:stop]
tmin = times[0]
stc = _apply_lcmv(data=data, filters=filters, info=info, tmin=tmin)
return next(stc)
@verbose
def apply_lcmv_cov(data_cov, filters, verbose=None):
"""Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights.
Apply Linearly Constrained Minimum Variance (LCMV) beamformer weights
to a data covariance matrix to estimate source power.
Parameters
----------
data_cov : instance of Covariance
Data covariance matrix.
filters : instance of Beamformer
LCMV spatial filter (beamformer weights).
Filter weights returned from :func:`make_lcmv`.
%(verbose)s
Returns
-------
stc : SourceEstimate | VolSourceEstimate
Source power.
See Also
--------
make_lcmv, apply_lcmv, apply_lcmv_epochs, apply_lcmv_raw
"""
sel = _check_channels_spatial_filter(data_cov.ch_names, filters)
sel_names = [data_cov.ch_names[ii] for ii in sel]
data_cov = pick_channels_cov(data_cov, sel_names)
n_orient = filters['weights'].shape[0] // filters['n_sources']
# Need to project and whiten along both dimensions
data = _proj_whiten_data(data_cov['data'].T, data_cov['projs'], filters)
data = _proj_whiten_data(data.T, data_cov['projs'], filters)
del data_cov
source_power = _compute_power(data, filters['weights'], n_orient)
# compatibility with 0.16, add src_type as None if not present:
filters, warn_text = _check_src_type(filters)
return _make_stc(source_power, vertices=filters['vertices'],
src_type=filters['src_type'], tmin=0., tstep=1.,
subject=filters['subject'],
source_nn=filters['source_nn'], warn_text=warn_text)
| {
"content_hash": "5c6956b6a02e5925a7940c3b47962a77",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 79,
"avg_line_length": 35.903225806451616,
"alnum_prop": 0.6118598382749326,
"repo_name": "mne-tools/mne-python",
"id": "61c45a8ec66f8ba39f3a092a8f67b2b772b50ed1",
"size": "15582",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "mne/beamformer/_lcmv.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "14962"
},
{
"name": "Makefile",
"bytes": "4612"
},
{
"name": "Python",
"bytes": "10364736"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "20137"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_gran_thug_male_01.iff"
result.attribute_template_id = 9
result.stfName("npc_name","gran_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "602b077d608738a53ddcd3ee22bebff8",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 71,
"avg_line_length": 23.384615384615383,
"alnum_prop": 0.6907894736842105,
"repo_name": "obi-two/Rebelion",
"id": "7694c49a954e1f5063a32731811197dd64d6ee95",
"size": "449",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_gran_thug_male_01.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
import datetime
from django.contrib.auth.models import AnonymousUser, User
try:
# Django 1.10 and above
from django.urls import reverse
except:
# Django 1.8 and 1.9
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.test import TestCase
from django.test import Client
from .models import Poll, Choice, ActualVote
class GeneralAuthTest(TestCase):
def test_auth_fail(self):
c = Client()
response = c.post(reverse('login'), {'username': 'thisuserdoes', 'password': 'notexist'})
self.assertEqual(response.status_code, 200) # should fail
self.assertContains(response, "Your username and password didn't match. Please try again")
class PollMethodTests(TestCase):
def test_was_published_recently_with_future_poll(self):
"""
was_published_recently() should return False for polls whose
pub_date is in the future
"""
future_poll = Poll(pub_date=timezone.now() + datetime.timedelta(days=30))
self.assertEqual(future_poll.was_published_recently(), False)
def test_was_published_recently_with_old_poll(self):
"""
was_published_recently() should return False for polls whose pub_date
is older than 1 day
"""
old_poll = Poll(pub_date=timezone.now() - datetime.timedelta(days=30))
self.assertEqual(old_poll.was_published_recently(), False)
def test_was_published_recently_with_recent_poll(self):
"""
was_published_recently() should return True for polls whose pub_date
is within the last day
"""
recent_poll = Poll(pub_date=timezone.now() - datetime.timedelta(hours=1))
self.assertEqual(recent_poll.was_published_recently(), True)
def create_poll(question, days=0, choices=[]):
"""
Creates a poll with the given `question` published the given number of
`days` offset to now (negative for polls published in the past,
positive for polls that have yet to be published).
"""
p = Poll.objects.create(
question=question,
pub_date=timezone.now() + datetime.timedelta(days=days)
)
if len(choices) > 0:
for choice in choices:
c = Choice.objects.create(
poll=p,
choice_text=choice,
)
return p
class PollViewTests(TestCase):
def test_index_view_with_no_polls(self):
"""
If no polls exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_poll_list'], [])
def test_index_view_with_a_past_poll(self):
"""
Polls with a pub_date in the past should be displayed on the index page.
"""
create_poll(question="Past poll.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll.>']
)
def test_index_view_with_a_future_poll(self):
"""
Polls with a pub_date in the future should not be displayed on the
index page.
"""
create_poll(question="Future poll.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.", status_code=200)
self.assertQuerysetEqual(response.context['latest_poll_list'], [])
def test_index_view_with_future_poll_and_past_poll(self):
"""
Even if both past and future polls exist, only past polls should be
displayed.
"""
create_poll(question="Past poll.", days=-30)
create_poll(question="Future poll.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll.>']
)
def test_index_view_with_two_past_polls(self):
"""
The polls index page may display multiple polls.
"""
create_poll(question="Past poll 1.", days=-30)
create_poll(question="Past poll 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll 2.>', '<Poll: Past poll 1.>']
)
class PollIndexDetailTests(TestCase):
def test_detail_view_with_a_future_poll(self):
"""
The detail view of a poll with a pub_date in the future should
return a 404 not found.
"""
future_poll = create_poll(question='Future poll.', days=5)
response = self.client.get(reverse('polls:detail', args=(future_poll.id,)))
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_poll(self):
"""
The detail view of a poll with a pub_date in the past should display
the poll's question.
"""
past_poll = create_poll(question='Past Poll.', days=-5)
response = self.client.get(reverse('polls:detail', args=(past_poll.id,)))
self.assertContains(response, past_poll.question, status_code=200)
class PollVoteTests(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(
username='johndoe', email='johndoe@mail.com', password='top_secret')
self.user2 = User.objects.create_user(
username='homersimpson', email='h.simpson@springfield.com', password='top_secret')
self.user3 = User.objects.create_user(
username='mrburns', email='mrburns@aol.com', password='top_secret')
def test_vote_poll_anonymous(self):
"""
Should not be allowed to vote when not logged in
"""
poll = create_poll(question='What is the question?', choices=['I dont know', 'Whatever', '42'])
choices = Choice.objects.filter(poll=poll)
site = reverse('polls:vote', args=(poll.id, ))
response = self.client.get(site, {'choice': choices[0].id}, )
self.assertEqual(response.status_code, 302)
self.assertTrue("/results/" not in response.url)
self.assertTrue("/login/" in response.url)
def test_vote_poll_authed(self):
"""
Should be allowed to vote, and vote should count
"""
poll = create_poll(question='What is the question?', choices=['I dont know', 'Whatever', '42'])
choices = Choice.objects.filter(poll=poll)
site = reverse('polls:vote', args=(poll.id,))
c = Client()
response = c.post(reverse("login"), {'username': 'johndoe', 'password': 'top_secret'})
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/accounts/profile/" in response.url)
response = c.post(site, {'choice': choices[0].id}, )
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/results/" in response.url)
c = Client()
response = c.post(reverse("login"), {'username': 'homersimpson', 'password': 'top_secret'})
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/accounts/profile/" in response.url)
response = c.post(site, {'choice': choices[1].id}, )
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/results/" in response.url)
c = Client()
response = c.post(reverse("login"), {'username': 'mrburns', 'password': 'top_secret'})
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/accounts/profile/" in response.url)
response = c.post(site, {'choice': choices[0].id}, )
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/results/" in response.url)
# there should now be 2 votes for choices[0] and 1 for choices[1]
self.assertEqual(choices[0].votes, 2)
self.assertEqual(choices[1].votes, 1)
self.assertEqual(choices[2].votes, 0)
# check if ActualVotes is there
votes = ActualVote.objects.filter(poll=poll)
self.assertTrue(len(votes) == 3)
# check if the user, which is set by UserForeignKey, is populated properly
self.assertTrue(votes[0].user == self.user1)
self.assertTrue(votes[1].user == self.user2)
self.assertTrue(votes[2].user == self.user3)
def test_vote_poll_multivote(self):
"""
Should be allowed to vote, and vote should count
"""
poll = create_poll(question='What is the question?', choices=['I dont know', 'Whatever', '42'])
choices = Choice.objects.filter(poll=poll)
site = reverse('polls:vote', args=(poll.id,))
c = Client()
response = c.post(reverse("login"), {'username': 'johndoe', 'password': 'top_secret'})
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/accounts/profile/" in response.url)
response = c.post(site, {'choice': choices[0].id}, )
self.assertEqual(response.status_code, 302) # should work
self.assertTrue("/results/" in response.url)
# vote again
response = c.post(site, {'choice': choices[1].id}, )
self.assertContains(response, "You already voted", status_code=200)
# vote again
response = c.post(site, {'choice': choices[2].id}, )
self.assertContains(response, "You already voted", status_code=200)
# vote again
response = c.post(site, {'choice': choices[0].id}, )
self.assertContains(response, "You already voted", status_code=200)
# check if ActualVotes is there (should only be 1 vote)
votes = ActualVote.objects.filter(poll=poll)
self.assertTrue(len(votes) == 1)
| {
"content_hash": "986ee46c6fb1c9c69a22b704f7201658",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 103,
"avg_line_length": 38.00380228136882,
"alnum_prop": 0.623511755877939,
"repo_name": "anx-ckreuzberger/user_foreign_key_testapp",
"id": "ab9eceaa761b8efab8070b79360f530837f9dfb7",
"size": "9995",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "polls/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2504"
},
{
"name": "Python",
"bytes": "24084"
}
],
"symlink_target": ""
} |
Runtime.releaseService("arduino")
Runtime.releaseService("servo01")
# we tell to the service what is going on
# intro.isServoActivated = False ## FIXME this gives error readonly
intro.broadcastState()
| {
"content_hash": "e208b859ef068b066ed968d2a3f01958",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 68,
"avg_line_length": 25.625,
"alnum_prop": 0.775609756097561,
"repo_name": "MyRobotLab/myrobotlab",
"id": "23a2ae3ebb00f8183652d073dfc52c465bfff066",
"size": "677",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/main/resources/resource/Intro/Servo01_stop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1542"
},
{
"name": "C",
"bytes": "6677"
},
{
"name": "C++",
"bytes": "274868"
},
{
"name": "CSS",
"bytes": "83744"
},
{
"name": "GLSL",
"bytes": "757"
},
{
"name": "HTML",
"bytes": "374401"
},
{
"name": "Java",
"bytes": "7100082"
},
{
"name": "JavaScript",
"bytes": "1536187"
},
{
"name": "Propeller Spin",
"bytes": "14406"
},
{
"name": "Python",
"bytes": "191671"
},
{
"name": "Shell",
"bytes": "3547"
}
],
"symlink_target": ""
} |
from oslo_config import cfg
from oslo_serialization import jsonutils
from urllib import parse as urlparse
from watcher.tests.api import base as api_base
from watcher.tests.objects import utils as obj_utils
class TestListService(api_base.FunctionalTest):
def _assert_service_fields(self, service):
service_fields = ['id', 'name', 'host', 'status']
for field in service_fields:
self.assertIn(field, service)
def test_one(self):
service = obj_utils.create_test_service(self.context)
response = self.get_json('/services')
self.assertEqual(service.id, response['services'][0]["id"])
self._assert_service_fields(response['services'][0])
def test_get_one_by_id(self):
service = obj_utils.create_test_service(self.context)
response = self.get_json('/services/%s' % service.id)
self.assertEqual(service.id, response["id"])
self.assertEqual(service.name, response["name"])
self._assert_service_fields(response)
def test_get_one_by_name(self):
service = obj_utils.create_test_service(self.context)
response = self.get_json(urlparse.quote(
'/services/%s' % service['name']))
self.assertEqual(service.id, response['id'])
self._assert_service_fields(response)
def test_get_one_soft_deleted(self):
service = obj_utils.create_test_service(self.context)
service.soft_delete()
response = self.get_json(
'/services/%s' % service['id'],
headers={'X-Show-Deleted': 'True'})
self.assertEqual(service.id, response['id'])
self._assert_service_fields(response)
response = self.get_json(
'/services/%s' % service['id'],
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_detail(self):
service = obj_utils.create_test_service(self.context)
response = self.get_json('/services/detail')
self.assertEqual(service.id, response['services'][0]["id"])
self._assert_service_fields(response['services'][0])
for service in response['services']:
self.assertTrue(
all(val is not None for key, val in service.items()
if key in ['id', 'name', 'host', 'status'])
)
def test_detail_against_single(self):
service = obj_utils.create_test_service(self.context)
response = self.get_json('/services/%s/detail' % service.id,
expect_errors=True)
self.assertEqual(404, response.status_int)
def test_many(self):
service_list = []
for idx in range(1, 4):
service = obj_utils.create_test_service(
self.context, id=idx, host='CONTROLLER1',
name='SERVICE_{0}'.format(idx))
service_list.append(service.id)
for idx in range(1, 4):
service = obj_utils.create_test_service(
self.context, id=3+idx, host='CONTROLLER2',
name='SERVICE_{0}'.format(idx))
service_list.append(service.id)
response = self.get_json('/services')
self.assertEqual(6, len(response['services']))
for service in response['services']:
self.assertTrue(
all(val is not None for key, val in service.items()
if key in ['id', 'name', 'host', 'status']))
def test_many_without_soft_deleted(self):
service_list = []
for id_ in [1, 2, 3]:
service = obj_utils.create_test_service(
self.context, id=id_, host='CONTROLLER',
name='SERVICE_{0}'.format(id_))
service_list.append(service.id)
for id_ in [4, 5]:
service = obj_utils.create_test_service(
self.context, id=id_, host='CONTROLLER',
name='SERVICE_{0}'.format(id_))
service.soft_delete()
response = self.get_json('/services')
self.assertEqual(3, len(response['services']))
ids = [s['id'] for s in response['services']]
self.assertEqual(sorted(service_list), sorted(ids))
def test_services_collection_links(self):
for idx in range(1, 6):
obj_utils.create_test_service(
self.context, id=idx,
host='CONTROLLER',
name='SERVICE_{0}'.format(idx))
response = self.get_json('/services/?limit=2')
self.assertEqual(2, len(response['services']))
def test_services_collection_links_default_limit(self):
for idx in range(1, 6):
obj_utils.create_test_service(
self.context, id=idx,
host='CONTROLLER',
name='SERVICE_{0}'.format(idx))
cfg.CONF.set_override('max_limit', 3, 'api')
response = self.get_json('/services')
self.assertEqual(3, len(response['services']))
def test_many_with_sort_key_name(self):
service_list = []
for id_ in range(1, 4):
service = obj_utils.create_test_service(
self.context, id=id_, host='CONTROLLER',
name='SERVICE_{0}'.format(id_))
service_list.append(service.name)
response = self.get_json('/services/?sort_key=name')
self.assertEqual(3, len(response['services']))
names = [s['name'] for s in response['services']]
self.assertEqual(sorted(service_list), names)
def test_sort_key_validation(self):
response = self.get_json(
'/services?sort_key=%s' % 'bad_name',
expect_errors=True)
self.assertEqual(400, response.status_int)
class TestServicePolicyEnforcement(api_base.FunctionalTest):
def _common_policy_check(self, rule, func, *arg, **kwarg):
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
rule: "rule:default"})
response = func(*arg, **kwarg)
self.assertEqual(403, response.status_int)
self.assertEqual('application/json', response.content_type)
self.assertTrue(
"Policy doesn't allow %s to be performed." % rule,
jsonutils.loads(response.json['error_message'])['faultstring'])
def test_policy_disallow_get_all(self):
self._common_policy_check(
"service:get_all", self.get_json, '/services',
expect_errors=True)
def test_policy_disallow_get_one(self):
service = obj_utils.create_test_service(self.context)
self._common_policy_check(
"service:get", self.get_json,
'/services/%s' % service.id,
expect_errors=True)
def test_policy_disallow_detail(self):
self._common_policy_check(
"service:detail", self.get_json,
'/services/detail',
expect_errors=True)
class TestServiceEnforcementWithAdminContext(TestListService,
api_base.AdminRoleTest):
def setUp(self):
super(TestServiceEnforcementWithAdminContext, self).setUp()
self.policy.set_rules({
"admin_api": "(role:admin or role:administrator)",
"default": "rule:admin_api",
"service:detail": "rule:default",
"service:get": "rule:default",
"service:get_all": "rule:default"})
| {
"content_hash": "427ac5b1fc9b255a1c196a9a4510c1e0",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 75,
"avg_line_length": 39.876344086021504,
"alnum_prop": 0.5847377645948497,
"repo_name": "stackforge/watcher",
"id": "114534ad63890f04a28bf11ecbcd1912b7bbc705",
"size": "7986",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "watcher/tests/api/v1/test_services.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "995442"
},
{
"name": "Shell",
"bytes": "9758"
}
],
"symlink_target": ""
} |
class Solution(object):
def sumNumbers(self, root):
"""
:type root: TreeNode
:rtype: int
"""
return self.helper(root, 0)
def helper(self, root, num):
if not root:
return 0
num = num * 10 + root.val
if not root.left and not root.right:
return num
return self.helper(root.left, num) + self.helper(root.right, num)
| {
"content_hash": "2aee2c4061e81b394de32344b4109de5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 73,
"avg_line_length": 27.8,
"alnum_prop": 0.5323741007194245,
"repo_name": "Lanceolata/code-problems",
"id": "f7a81e2c4a180d0f485308171dc1e6dafd81a376",
"size": "623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/leetcode/Question_129_Sum_Root_to_Leaf_Numbers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "127"
},
{
"name": "C++",
"bytes": "130299"
},
{
"name": "Java",
"bytes": "149575"
},
{
"name": "Python",
"bytes": "106289"
}
],
"symlink_target": ""
} |
__doc__ = """
A Django template loader for loading and converting SHPAML markup to HTML
The django SHPAML template loader uses the official Python SHPAML implementation
which can be found at http://shpaml.webfactional.com/
Note: the SHPAML implementation from the above link is included for your
convenience.
"""
from setuptools import setup
setup(
name='django-shpaml',
version='1.0.2',
author='James Robert',
description=('A Django template loader for loading and converting '
'SHPAML markup to HTML'),
license='BSD',
keywords='django shpaml',
url='http://shpaml.com',
packages=['shpaml'],
long_description=__doc__,
classifiers=[
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Utilities'
]
)
| {
"content_hash": "1548e7c5bd442b2d035ab1805f7f1a7c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 80,
"avg_line_length": 30.12121212121212,
"alnum_prop": 0.6529175050301811,
"repo_name": "watchdesigner/shpamlfork",
"id": "a830f2e9106f1b8bf6d7bd5066231f870ebb7c53",
"size": "994",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "23800"
}
],
"symlink_target": ""
} |
import re
import os
import shutil
from fabric.api import task, local, abort
@task
def script(name, filename=None):
package = local('python setup.py --name', capture=True)
if filename is None:
filename = re.sub(r'[^a-z0-9]+', '_', name)
scripts = os.path.join(package, 'scripts')
filepath = os.path.join(scripts, filename + '.py')
entry = '{} = {}.scripts.{}:main\n'.format(name, package, filename)
if os.path.exists(filepath):
abort('Script "{}" already exists.'.format(filename))
elif not os.path.exists(scripts):
os.makedirs(scripts)
open(os.path.join(scripts, '__init__.py'), 'w').close()
shutil.copyfile(
os.path.join('fabtasks', 'templates', 'script.py'),
filepath
)
with open('entry-points.ini', 'a') as fh:
fh.write(entry)
| {
"content_hash": "a0b07008ad7eaf131c5b9ef60fd86e39",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 28.689655172413794,
"alnum_prop": 0.6105769230769231,
"repo_name": "GaretJax/dockertools",
"id": "7978af0159ef1858ab7f1d2294f8311fb7a77025",
"size": "832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabtasks/scaffholding.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20158"
}
],
"symlink_target": ""
} |
import os
import ujson
from typing import Any, Generator
from django.core.management.base import BaseCommand, CommandParser
from django.db.models import QuerySet
from zerver.lib.message import render_markdown
from zerver.models import Message
def queryset_iterator(queryset, chunksize=5000):
# type: (QuerySet, int) -> Generator
queryset = queryset.order_by('id')
while queryset.exists():
for row in queryset[:chunksize]:
msg_id = row.id
yield row
queryset = queryset.filter(id__gt=msg_id)
class Command(BaseCommand):
help = """
Render messages to a file.
Usage: ./manage.py render_messages <destination> [--amount=10000]
"""
def add_arguments(self, parser):
# type: (CommandParser) -> None
parser.add_argument('destination', help='Destination file path')
parser.add_argument('--amount', default=100000, help='Number of messages to render')
parser.add_argument('--latest_id', default=0, help="Last message id to render")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
dest_dir = os.path.realpath(os.path.dirname(options['destination']))
amount = int(options['amount'])
latest = int(options['latest_id']) or Message.objects.latest('id').id
self.stdout.write('Latest message id: {latest}'.format(latest=latest))
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
with open(options['destination'], 'w') as result:
result.write('[')
messages = Message.objects.filter(id__gt=latest - amount, id__lte=latest).order_by('id')
for message in queryset_iterator(messages):
content = message.content
# In order to ensure that the output of this tool is
# consistent across the time, even if messages are
# edited, we always render the original content
# version, extracting it from the edit history if
# necessary.
if message.edit_history:
history = ujson.loads(message.edit_history)
history = sorted(history, key=lambda i: i['timestamp'])
for entry in history:
if 'prev_content' in entry:
content = entry['prev_content']
break
result.write(ujson.dumps({
'id': message.id,
'content': render_markdown(message, content)
}))
if message.id != latest:
result.write(',')
result.write(']')
| {
"content_hash": "86043756a1c9c15e6bf235f3df647e9c",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 100,
"avg_line_length": 40.71212121212121,
"alnum_prop": 0.5805731298846297,
"repo_name": "brockwhittaker/zulip",
"id": "b98b8ed0e624dc88070dcdeed374e2c36a9a431d",
"size": "2687",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "zilencer/management/commands/render_messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "442662"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "515931"
},
{
"name": "JavaScript",
"bytes": "2195008"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "393671"
},
{
"name": "Puppet",
"bytes": "87413"
},
{
"name": "Python",
"bytes": "3948219"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "65702"
}
],
"symlink_target": ""
} |
"""
Tests for the server_stop JSON RPC method
"""
import asyncio
import unittest
import asynctest
import nose
from .... import utils
from mymcadmin.errors import ServerDoesNotExistError
from mymcadmin.rpc.errors import JsonRpcInvalidRequestError
class TestServerStop(utils.ManagerMixin, unittest.TestCase):
"""
Tests for the server_stop JSON RPC method
"""
@asynctest.patch('mymcadmin.server.Server')
@asynctest.patch('os.path.exists')
@utils.run_async
async def test_method(self, exists, server):
"""
Tests that the method works properly in ideal conditions
"""
exists.return_value = True
server.return_value = server
server_id = 'testification'
mock_proc = asynctest.Mock(spec = asyncio.subprocess.Process)
self.manager.instances = {
server_id: mock_proc
}
result = await self.manager.rpc_command_server_stop(
server_id = server_id,
)
self.assertEqual(
server_id,
result,
'Method did not return the server ID',
)
mock_proc.communicate.assert_called_with('stop'.encode())
@nose.tools.raises(ServerDoesNotExistError)
@utils.run_async
async def test_method_bad_id(self):
"""
Tests that we check for valid server IDs
"""
await self.manager.rpc_command_server_stop(
server_id = 'bad',
)
@nose.tools.raises(JsonRpcInvalidRequestError)
@asynctest.patch('os.path.exists')
@utils.run_async
async def test_method_not_running(self, exists):
"""
Tests that we throw an error when the server isn't running
"""
exists.return_value = True
await self.manager.rpc_command_server_stop('testification')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "43598e4b1eb8927411ad7cd3beae0987",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 69,
"avg_line_length": 24.63157894736842,
"alnum_prop": 0.625534188034188,
"repo_name": "durandj/mymcadmin",
"id": "415a1f4ab89e09022a4ebef865d99eef8fdde405",
"size": "1872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/small/manager/rpc/test_server_stop.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "255074"
},
{
"name": "Shell",
"bytes": "404"
}
],
"symlink_target": ""
} |
import string
import sys
import warnings
## Module level state. You'll need to set a synapse object at least
## before using this module.
syn = None
send_messages = True
send_notifications = True
acknowledge_receipt = False
dry_run = False
## Edit these URLs to point to your challenge and its support forum
defaults = dict(
challenge_instructions_url = "https://www.synapse.org/",
support_forum_url = "http://support.sagebase.org/sagebase",
scoring_script = "the scoring script")
##---------------------------------------------------------
## Message templates:
## Edit to fit your challenge.
##---------------------------------------------------------
dockerstop_failed_subject_template = "Docker stop error in submission to {queue_name}"
dockerstop_failed_template = """\
<p>Hello {username},</p>
<p>Sorry, but we were unable to stop your docker container. Your container has has either been stopped or finished running.</p>
<p>submission name: <b>{submission_name}</b><br>
submission ID: <b>{submission_id}</b></p>
<p>If you have questions, please ask on the forums at {support_forum_url}.</p>
<p>Sincerely,<br>
{scoring_script}</p>
"""
dockerstop_passed_subject_template = "Docker stop succeeded to {queue_name}"
dockerstop_passed_template = """\
<p>Hello {username},</p>
<p>We have successfully stopped your docker container.</p>
<p>submission name: <b>{submission_name}</b><br>
submission ID: <b>{submission_id}</b></p>
<p>If you have questions, please ask on the forums at {support_forum_url} or refer to the challenge \
instructions which can be found at {challenge_instructions_url}.</p>
<p>Sincerely,<br>
{scoring_script}</p>
"""
validation_failed_subject_template = "Validation error in submission to {queue_name}"
validation_failed_template = """\
<p>Hello {username},</p>
<p>Sorry, but we were unable to validate your submission to the {queue_name}.</p>
<p>Please refer to the challenge instructions which can be found at \
{challenge_instructions_url} and to the error message below:</p>
<p>submission name: <b>{submission_name}</b><br>
submission ID: <b>{submission_id}</b></p>
<blockquote><pre>
{message}
</pre></blockquote>
<p>If you have questions, please ask on the forums at {support_forum_url}.</p>
<p>Sincerely,<br>
{scoring_script}</p>
"""
validation_passed_subject_template = "Submission received to {queue_name}"
validation_passed_template = """\
<p>Hello {username},</p>
<p>We have received your submission to the {queue_name} and confirmed that it is correctly formatted.</p>
<p>submission name: <b>{submission_name}</b><br>
submission ID: <b>{submission_id}</b></p>
<blockquote><pre>
{message}
</pre></blockquote>
<p>If you have questions, please ask on the forums at {support_forum_url} or refer to the challenge \
instructions which can be found at {challenge_instructions_url}.</p>
<p>Sincerely,<br>
{scoring_script}</p>
"""
scoring_succeeded_subject_template = "Scored submission to {queue_name}"
scoring_succeeded_template = """\
<p>Hello {username},</p>
<p>Your submission \"{submission_name}\" (ID: {submission_id}) to the {queue_name} has been scored:</p>
<blockquote><pre>
{message}
</pre></blockquote>
<p>If you have questions, please ask on the forums at {support_forum_url}.</p>
<p>Sincerely,<br>
{scoring_script}</p>
"""
scoring_error_subject_template = "Exception while scoring submission to {queue_name}"
scoring_error_template = """\
<p>Hello {username},</p>
<p>Sorry, but we were unable to process your submission to the {queue_name}.</p>
<p>Please refer to the challenge instructions which can be found at \
{challenge_instructions_url} and to the error message below:</p>
<p>submission name: <b>{submission_name}</b><br>
submission ID: <b>{submission_id}</b></p>
<blockquote><pre>
{message}
</pre></blockquote>
<p>If you have questions, please ask on the forums at {support_forum_url}.</p>
<p>Sincerely,<br>
{scoring_script}</p>
"""
notification_subject_template = "Exception while scoring submission to {queue_name}"
error_notification_template = """\
<p>Hello Challenge Administrator,</p>
<p>The scoring script for {queue_name} encountered an error:</p>
<blockquote><pre>
{message}
</pre></blockquote>
<p>Sincerely,<br>
{scoring_script}</p>
"""
class DefaultingFormatter(string.Formatter):
"""
Python's string.format has the annoying habit of raising a KeyError
if you don't completely fill in the template. Let's do something a
bit nicer.
Adapted from: http://stackoverflow.com/a/19800610/199166
"""
def get_value(self, key, args, kwds):
if isinstance(key, str):
value = kwds.get(key, defaults.get(key, None))
if value is None:
value = "{{{0}}}".format(key)
warnings.warn("Missing template variable %s" % value)
return value
else:
Formatter.get_value(key, args, kwds)
formatter = DefaultingFormatter()
##---------------------------------------------------------
## functions for sending various types of messages
##---------------------------------------------------------
def dockerstop_failed(userIds, **kwargs):
if send_messages:
return send_message(userIds=userIds,
subject_template=dockerstop_failed_subject_template,
message_template=dockerstop_failed_template,
kwargs=kwargs)
def dockerstop_passed(userIds, **kwargs):
if send_messages:
return send_message(userIds=userIds,
subject_template=dockerstop_passed_subject_template,
message_template=dockerstop_passed_template,
kwargs=kwargs)
def validation_failed(userIds, **kwargs):
if send_messages:
return send_message(userIds=userIds,
subject_template=validation_failed_subject_template,
message_template=validation_failed_template,
kwargs=kwargs)
def validation_passed(userIds, **kwargs):
if acknowledge_receipt:
return send_message(userIds=userIds,
subject_template=validation_passed_subject_template,
message_template=validation_passed_template,
kwargs=kwargs)
def scoring_succeeded(userIds, **kwargs):
if send_messages:
return send_message(userIds=userIds,
subject_template=scoring_succeeded_subject_template,
message_template=scoring_succeeded_template,
kwargs=kwargs)
def scoring_error(userIds, **kwargs):
if send_messages:
return send_message(userIds=userIds,
subject_template=scoring_error_subject_template,
message_template=scoring_error_template,
kwargs=kwargs)
def error_notification(userIds, **kwargs):
if send_notifications:
return send_message(userIds=userIds,
subject_template=notification_subject_template,
message_template=error_notification_template,
kwargs=kwargs)
def send_message(userIds, subject_template, message_template, kwargs):
subject = formatter.format(subject_template, **kwargs)
message = formatter.format(message_template, **kwargs)
if dry_run:
print "\nDry Run: would have sent:"
print subject
print "-" * 60
print message
return None
elif syn:
response = syn.sendMessage(
userIds=userIds,
messageSubject=subject,
messageBody=message,
contentType="text/html")
print "sent: ", unicode(response).encode('utf-8')
return response
else:
sys.stderr.write("Can't send message. No Synapse object configured\n")
| {
"content_hash": "daf86ea44702ea04892fdc1acfba10a8",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 128,
"avg_line_length": 32.618852459016395,
"alnum_prop": 0.6351300414624953,
"repo_name": "thomasyu888/SynapseChallengeTemplates",
"id": "89ffcdc1bde32cf2a4fa01fe55550c1c91d0aa3a",
"size": "8002",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/docker_agent/messages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Common Workflow Language",
"bytes": "24400"
},
{
"name": "Java",
"bytes": "24891"
},
{
"name": "Python",
"bytes": "193231"
},
{
"name": "R",
"bytes": "11474"
},
{
"name": "Shell",
"bytes": "3993"
}
],
"symlink_target": ""
} |
import datetime
from unittest import skipIf, skipUnless
from django.core.exceptions import FieldError
from django.db import NotSupportedError, connection
from django.db.models import (
F, RowRange, Value, ValueRange, Window, WindowFrame,
)
from django.db.models.aggregates import Avg, Max, Min, Sum
from django.db.models.functions import (
CumeDist, DenseRank, ExtractYear, FirstValue, Lag, LastValue, Lead,
NthValue, Ntile, PercentRank, Rank, RowNumber, Upper,
)
from django.test import SimpleTestCase, TestCase, skipUnlessDBFeature
from .models import Employee
@skipUnlessDBFeature('supports_over_clause')
class WindowFunctionTests(TestCase):
@classmethod
def setUpTestData(cls):
Employee.objects.bulk_create([
Employee(name=e[0], salary=e[1], department=e[2], hire_date=e[3], age=e[4])
for e in [
('Jones', 45000, 'Accounting', datetime.datetime(2005, 11, 1), 20),
('Williams', 37000, 'Accounting', datetime.datetime(2009, 6, 1), 20),
('Jenson', 45000, 'Accounting', datetime.datetime(2008, 4, 1), 20),
('Adams', 50000, 'Accounting', datetime.datetime(2013, 7, 1), 50),
('Smith', 55000, 'Sales', datetime.datetime(2007, 6, 1), 30),
('Brown', 53000, 'Sales', datetime.datetime(2009, 9, 1), 30),
('Johnson', 40000, 'Marketing', datetime.datetime(2012, 3, 1), 30),
('Smith', 38000, 'Marketing', datetime.datetime(2009, 10, 1), 20),
('Wilkinson', 60000, 'IT', datetime.datetime(2011, 3, 1), 40),
('Moore', 34000, 'IT', datetime.datetime(2013, 8, 1), 40),
('Miller', 100000, 'Management', datetime.datetime(2005, 6, 1), 40),
('Johnson', 80000, 'Management', datetime.datetime(2005, 7, 1), 50),
]
])
def test_dense_rank(self):
qs = Employee.objects.annotate(rank=Window(
expression=DenseRank(),
order_by=ExtractYear(F('hire_date')).asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 2),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 3),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 4),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 4),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 4),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 5),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 6),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 7),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 7),
], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
def test_department_salary(self):
qs = Employee.objects.annotate(department_sum=Window(
expression=Sum('salary'),
partition_by=F('department'),
order_by=[F('hire_date').asc()],
)).order_by('department', 'department_sum')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, 45000),
('Jenson', 'Accounting', 45000, 90000),
('Williams', 'Accounting', 37000, 127000),
('Adams', 'Accounting', 50000, 177000),
('Wilkinson', 'IT', 60000, 60000),
('Moore', 'IT', 34000, 94000),
('Miller', 'Management', 100000, 100000),
('Johnson', 'Management', 80000, 180000),
('Smith', 'Marketing', 38000, 38000),
('Johnson', 'Marketing', 40000, 78000),
('Smith', 'Sales', 55000, 55000),
('Brown', 'Sales', 53000, 108000),
], lambda entry: (entry.name, entry.department, entry.salary, entry.department_sum))
def test_rank(self):
"""
Rank the employees based on the year they're were hired. Since there
are multiple employees hired in different years, this will contain
gaps.
"""
qs = Employee.objects.annotate(rank=Window(
expression=Rank(),
order_by=ExtractYear(F('hire_date')).asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 1),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 1),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 1),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 4),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 5),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 6),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 6),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 6),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 9),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 10),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 11),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 11),
], lambda entry: (entry.name, entry.salary, entry.department, entry.hire_date, entry.rank), ordered=False)
def test_row_number(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted. Depending on the backend,
Oracle requires an ordering-clause in the Window expression.
"""
qs = Employee.objects.annotate(row_number=Window(
expression=RowNumber(),
order_by=F('pk').asc(),
)).order_by('pk')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 1),
('Williams', 'Accounting', 2),
('Jenson', 'Accounting', 3),
('Adams', 'Accounting', 4),
('Smith', 'Sales', 5),
('Brown', 'Sales', 6),
('Johnson', 'Marketing', 7),
('Smith', 'Marketing', 8),
('Wilkinson', 'IT', 9),
('Moore', 'IT', 10),
('Miller', 'Management', 11),
('Johnson', 'Management', 12),
], lambda entry: (entry.name, entry.department, entry.row_number))
@skipIf(connection.vendor == 'oracle', "Oracle requires ORDER BY in row_number, ANSI:SQL doesn't")
def test_row_number_no_ordering(self):
"""
The row number window function computes the number based on the order
in which the tuples were inserted.
"""
# Add a default ordering for consistent results across databases.
qs = Employee.objects.annotate(row_number=Window(
expression=RowNumber(),
)).order_by('pk')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 1),
('Williams', 'Accounting', 2),
('Jenson', 'Accounting', 3),
('Adams', 'Accounting', 4),
('Smith', 'Sales', 5),
('Brown', 'Sales', 6),
('Johnson', 'Marketing', 7),
('Smith', 'Marketing', 8),
('Wilkinson', 'IT', 9),
('Moore', 'IT', 10),
('Miller', 'Management', 11),
('Johnson', 'Management', 12),
], lambda entry: (entry.name, entry.department, entry.row_number))
def test_avg_salary_department(self):
qs = Employee.objects.annotate(avg_salary=Window(
expression=Avg('salary'),
order_by=F('department').asc(),
partition_by='department',
)).order_by('department', '-salary', 'name')
self.assertQuerysetEqual(qs, [
('Adams', 50000, 'Accounting', 44250.00),
('Jenson', 45000, 'Accounting', 44250.00),
('Jones', 45000, 'Accounting', 44250.00),
('Williams', 37000, 'Accounting', 44250.00),
('Wilkinson', 60000, 'IT', 47000.00),
('Moore', 34000, 'IT', 47000.00),
('Miller', 100000, 'Management', 90000.00),
('Johnson', 80000, 'Management', 90000.00),
('Johnson', 40000, 'Marketing', 39000.00),
('Smith', 38000, 'Marketing', 39000.00),
('Smith', 55000, 'Sales', 54000.00),
('Brown', 53000, 'Sales', 54000.00),
], transform=lambda row: (row.name, row.salary, row.department, row.avg_salary))
def test_lag(self):
"""
Compute the difference between an employee's salary and the next
highest salary in the employee's department. Return None if the
employee has the lowest salary.
"""
qs = Employee.objects.annotate(lag=Window(
expression=Lag(expression='salary', offset=1),
partition_by=F('department'),
order_by=[F('salary').asc(), F('name').asc()],
)).order_by('department', F('salary').asc(), F('name').asc())
self.assertQuerysetEqual(qs, [
('Williams', 37000, 'Accounting', None),
('Jenson', 45000, 'Accounting', 37000),
('Jones', 45000, 'Accounting', 45000),
('Adams', 50000, 'Accounting', 45000),
('Moore', 34000, 'IT', None),
('Wilkinson', 60000, 'IT', 34000),
('Johnson', 80000, 'Management', None),
('Miller', 100000, 'Management', 80000),
('Smith', 38000, 'Marketing', None),
('Johnson', 40000, 'Marketing', 38000),
('Brown', 53000, 'Sales', None),
('Smith', 55000, 'Sales', 53000),
], transform=lambda row: (row.name, row.salary, row.department, row.lag))
def test_first_value(self):
qs = Employee.objects.annotate(first_value=Window(
expression=FirstValue('salary'),
partition_by=F('department'),
order_by=F('hire_date').asc(),
)).order_by('department', 'hire_date')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 45000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 45000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 60000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 38000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 55000),
], lambda row: (row.name, row.salary, row.department, row.hire_date, row.first_value))
def test_last_value(self):
qs = Employee.objects.annotate(last_value=Window(
expression=LastValue('hire_date'),
partition_by=F('department'),
order_by=F('hire_date').asc(),
))
self.assertQuerysetEqual(qs, [
('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, datetime.date(2013, 7, 1)),
('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, datetime.date(2008, 4, 1)),
('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, datetime.date(2005, 11, 1)),
('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, datetime.date(2009, 6, 1)),
('Moore', 'IT', datetime.date(2013, 8, 1), 34000, datetime.date(2013, 8, 1)),
('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, datetime.date(2011, 3, 1)),
('Miller', 'Management', datetime.date(2005, 6, 1), 100000, datetime.date(2005, 6, 1)),
('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, datetime.date(2005, 7, 1)),
('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, datetime.date(2012, 3, 1)),
('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, datetime.date(2009, 10, 1)),
('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, datetime.date(2009, 9, 1)),
('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, datetime.date(2007, 6, 1)),
], transform=lambda row: (row.name, row.department, row.hire_date, row.salary, row.last_value), ordered=False)
def test_function_list_of_values(self):
qs = Employee.objects.annotate(lead=Window(
expression=Lead(expression='salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by='department',
)).values_list('name', 'salary', 'department', 'hire_date', 'lead') \
.order_by('department', F('hire_date').asc(), F('name').desc())
self.assertNotIn('GROUP BY', str(qs.query))
self.assertSequenceEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
])
def test_min_department(self):
"""An alternative way to specify a query for FirstValue."""
qs = Employee.objects.annotate(min_salary=Window(
expression=Min('salary'),
partition_by=F('department'),
order_by=[F('salary').asc(), F('name').asc()]
)).order_by('department', 'salary', 'name')
self.assertQuerysetEqual(qs, [
('Williams', 'Accounting', 37000, 37000),
('Jenson', 'Accounting', 45000, 37000),
('Jones', 'Accounting', 45000, 37000),
('Adams', 'Accounting', 50000, 37000),
('Moore', 'IT', 34000, 34000),
('Wilkinson', 'IT', 60000, 34000),
('Johnson', 'Management', 80000, 80000),
('Miller', 'Management', 100000, 80000),
('Smith', 'Marketing', 38000, 38000),
('Johnson', 'Marketing', 40000, 38000),
('Brown', 'Sales', 53000, 53000),
('Smith', 'Sales', 55000, 53000),
], lambda row: (row.name, row.department, row.salary, row.min_salary))
def test_max_per_year(self):
"""
Find the maximum salary awarded in the same year as the
employee was hired, regardless of the department.
"""
qs = Employee.objects.annotate(max_salary_year=Window(
expression=Max('salary'),
order_by=ExtractYear('hire_date').asc(),
partition_by=ExtractYear('hire_date')
)).order_by(ExtractYear('hire_date'), 'salary')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, 2005, 100000),
('Johnson', 'Management', 80000, 2005, 100000),
('Miller', 'Management', 100000, 2005, 100000),
('Smith', 'Sales', 55000, 2007, 55000),
('Jenson', 'Accounting', 45000, 2008, 45000),
('Williams', 'Accounting', 37000, 2009, 53000),
('Smith', 'Marketing', 38000, 2009, 53000),
('Brown', 'Sales', 53000, 2009, 53000),
('Wilkinson', 'IT', 60000, 2011, 60000),
('Johnson', 'Marketing', 40000, 2012, 40000),
('Moore', 'IT', 34000, 2013, 50000),
('Adams', 'Accounting', 50000, 2013, 50000),
], lambda row: (row.name, row.department, row.salary, row.hire_date.year, row.max_salary_year))
def test_cume_dist(self):
"""
Compute the cumulative distribution for the employees based on the
salary in increasing order. Equal to rank/total number of rows (12).
"""
qs = Employee.objects.annotate(cume_dist=Window(
expression=CumeDist(),
order_by=F('salary').asc(),
)).order_by('salary', 'name')
# Round result of cume_dist because Oracle uses greater precision.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0833333333),
('Williams', 'Accounting', 37000, 0.1666666667),
('Smith', 'Marketing', 38000, 0.25),
('Johnson', 'Marketing', 40000, 0.3333333333),
('Jenson', 'Accounting', 45000, 0.5),
('Jones', 'Accounting', 45000, 0.5),
('Adams', 'Accounting', 50000, 0.5833333333),
('Brown', 'Sales', 53000, 0.6666666667),
('Smith', 'Sales', 55000, 0.75),
('Wilkinson', 'IT', 60000, 0.8333333333),
('Johnson', 'Management', 80000, 0.9166666667),
('Miller', 'Management', 100000, 1),
], lambda row: (row.name, row.department, row.salary, round(row.cume_dist, 10)))
def test_nthvalue(self):
qs = Employee.objects.annotate(
nth_value=Window(expression=NthValue(
expression='salary', nth=2),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by=F('department'),
)
).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', datetime.date(2005, 11, 1), 45000, None),
('Jenson', 'Accounting', datetime.date(2008, 4, 1), 45000, 45000),
('Williams', 'Accounting', datetime.date(2009, 6, 1), 37000, 45000),
('Adams', 'Accounting', datetime.date(2013, 7, 1), 50000, 45000),
('Wilkinson', 'IT', datetime.date(2011, 3, 1), 60000, None),
('Moore', 'IT', datetime.date(2013, 8, 1), 34000, 34000),
('Miller', 'Management', datetime.date(2005, 6, 1), 100000, None),
('Johnson', 'Management', datetime.date(2005, 7, 1), 80000, 80000),
('Smith', 'Marketing', datetime.date(2009, 10, 1), 38000, None),
('Johnson', 'Marketing', datetime.date(2012, 3, 1), 40000, 40000),
('Smith', 'Sales', datetime.date(2007, 6, 1), 55000, None),
('Brown', 'Sales', datetime.date(2009, 9, 1), 53000, 53000),
], lambda row: (row.name, row.department, row.hire_date, row.salary, row.nth_value))
def test_lead(self):
"""
Determine what the next person hired in the same department makes.
Because the dataset is ambiguous, the name is also part of the
ordering clause. No default is provided, so None/NULL should be
returned.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead(expression='salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
partition_by='department',
)).order_by('department', F('hire_date').asc(), F('name').desc())
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 37000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 50000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 34000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 80000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 40000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 53000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead))
def test_lead_offset(self):
"""
Determine what the person hired after someone makes. Due to
ambiguity, the name is also included in the ordering.
"""
qs = Employee.objects.annotate(lead=Window(
expression=Lead('salary', offset=2),
partition_by='department',
order_by=F('hire_date').asc(),
))
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 37000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 50000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), None),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), None),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), None),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), None),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), None),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), None),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), None),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), None),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), None),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), None),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.lead),
ordered=False
)
@skipUnlessDBFeature('supports_default_in_lead_lag')
def test_lead_default(self):
qs = Employee.objects.annotate(lead_default=Window(
expression=Lead(expression='salary', offset=5, default=60000),
partition_by=F('department'),
order_by=F('department').asc(),
))
self.assertEqual(list(qs.values_list('lead_default', flat=True).distinct()), [60000])
def test_ntile(self):
"""
Compute the group for each of the employees across the entire company,
based on how high the salary is for them. There are twelve employees
so it divides evenly into four groups.
"""
qs = Employee.objects.annotate(ntile=Window(
expression=Ntile(num_buckets=4),
order_by=F('salary').desc(),
)).order_by('ntile', '-salary', 'name')
self.assertQuerysetEqual(qs, [
('Miller', 'Management', 100000, 1),
('Johnson', 'Management', 80000, 1),
('Wilkinson', 'IT', 60000, 1),
('Smith', 'Sales', 55000, 2),
('Brown', 'Sales', 53000, 2),
('Adams', 'Accounting', 50000, 2),
('Jenson', 'Accounting', 45000, 3),
('Jones', 'Accounting', 45000, 3),
('Johnson', 'Marketing', 40000, 3),
('Smith', 'Marketing', 38000, 4),
('Williams', 'Accounting', 37000, 4),
('Moore', 'IT', 34000, 4),
], lambda x: (x.name, x.department, x.salary, x.ntile))
def test_percent_rank(self):
"""
Calculate the percentage rank of the employees across the entire
company based on salary and name (in case of ambiguity).
"""
qs = Employee.objects.annotate(percent_rank=Window(
expression=PercentRank(),
order_by=[F('salary').asc(), F('name').asc()],
)).order_by('percent_rank')
# Round to account for precision differences among databases.
self.assertQuerysetEqual(qs, [
('Moore', 'IT', 34000, 0.0),
('Williams', 'Accounting', 37000, 0.0909090909),
('Smith', 'Marketing', 38000, 0.1818181818),
('Johnson', 'Marketing', 40000, 0.2727272727),
('Jenson', 'Accounting', 45000, 0.3636363636),
('Jones', 'Accounting', 45000, 0.4545454545),
('Adams', 'Accounting', 50000, 0.5454545455),
('Brown', 'Sales', 53000, 0.6363636364),
('Smith', 'Sales', 55000, 0.7272727273),
('Wilkinson', 'IT', 60000, 0.8181818182),
('Johnson', 'Management', 80000, 0.9090909091),
('Miller', 'Management', 100000, 1.0),
], transform=lambda row: (row.name, row.department, row.salary, round(row.percent_rank, 10)))
def test_nth_returns_null(self):
"""
Find the nth row of the data set. None is returned since there are
fewer than 20 rows in the test data.
"""
qs = Employee.objects.annotate(nth_value=Window(
expression=NthValue('salary', nth=20),
order_by=F('salary').asc()
))
self.assertEqual(list(qs.values_list('nth_value', flat=True).distinct()), [None])
def test_multiple_partitioning(self):
"""
Find the maximum salary for each department for people hired in the
same year.
"""
qs = Employee.objects.annotate(max=Window(
expression=Max('salary'),
partition_by=[F('department'), ExtractYear(F('hire_date'))],
)).order_by('department', 'hire_date', 'name')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 45000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 100000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.max))
def test_multiple_ordering(self):
"""
Accumulate the salaries over the departments based on hire_date.
If two people were hired on the same date in the same department, the
ordering clause will render a different result for those people.
"""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='department',
order_by=[F('hire_date').asc(), F('name').asc()],
)).order_by('department', 'sum')
self.assertQuerysetEqual(qs, [
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 45000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 127000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 177000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 94000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 180000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 78000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 108000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum))
@skipIf(connection.vendor == 'postgresql', 'n following/preceding not supported by PostgreSQL')
def test_range_n_preceding_and_following(self):
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
order_by=F('salary').asc(),
partition_by='department',
frame=ValueRange(start=-2, end=2),
))
self.assertIn('RANGE BETWEEN 2 PRECEDING AND 2 FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 37000),
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 90000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 90000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 50000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 53000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 55000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 40000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 38000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 60000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 34000),
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 100000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 80000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum), ordered=False)
def test_range_unbound(self):
"""A query with RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING."""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
partition_by='age',
order_by=[F('age').asc()],
frame=ValueRange(start=None, end=None),
)).order_by('department', 'hire_date', 'name')
self.assertIn('RANGE BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Jones', 'Accounting', 45000, datetime.date(2005, 11, 1), 165000),
('Jenson', 'Accounting', 45000, datetime.date(2008, 4, 1), 165000),
('Williams', 'Accounting', 37000, datetime.date(2009, 6, 1), 165000),
('Adams', 'Accounting', 50000, datetime.date(2013, 7, 1), 130000),
('Wilkinson', 'IT', 60000, datetime.date(2011, 3, 1), 194000),
('Moore', 'IT', 34000, datetime.date(2013, 8, 1), 194000),
('Miller', 'Management', 100000, datetime.date(2005, 6, 1), 194000),
('Johnson', 'Management', 80000, datetime.date(2005, 7, 1), 130000),
('Smith', 'Marketing', 38000, datetime.date(2009, 10, 1), 165000),
('Johnson', 'Marketing', 40000, datetime.date(2012, 3, 1), 148000),
('Smith', 'Sales', 55000, datetime.date(2007, 6, 1), 148000),
('Brown', 'Sales', 53000, datetime.date(2009, 9, 1), 148000)
], transform=lambda row: (row.name, row.department, row.salary, row.hire_date, row.sum))
def test_row_range_rank(self):
"""
A query with ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING.
The resulting sum is the sum of the three next (if they exist) and all
previous rows according to the ordering clause.
"""
qs = Employee.objects.annotate(sum=Window(
expression=Sum('salary'),
order_by=[F('hire_date').asc(), F('name').desc()],
frame=RowRange(start=None, end=3),
)).order_by('sum', 'hire_date')
self.assertIn('ROWS BETWEEN UNBOUNDED PRECEDING AND 3 FOLLOWING', str(qs.query))
self.assertQuerysetEqual(qs, [
('Miller', 100000, 'Management', datetime.date(2005, 6, 1), 280000),
('Johnson', 80000, 'Management', datetime.date(2005, 7, 1), 325000),
('Jones', 45000, 'Accounting', datetime.date(2005, 11, 1), 362000),
('Smith', 55000, 'Sales', datetime.date(2007, 6, 1), 415000),
('Jenson', 45000, 'Accounting', datetime.date(2008, 4, 1), 453000),
('Williams', 37000, 'Accounting', datetime.date(2009, 6, 1), 513000),
('Brown', 53000, 'Sales', datetime.date(2009, 9, 1), 553000),
('Smith', 38000, 'Marketing', datetime.date(2009, 10, 1), 603000),
('Wilkinson', 60000, 'IT', datetime.date(2011, 3, 1), 637000),
('Johnson', 40000, 'Marketing', datetime.date(2012, 3, 1), 637000),
('Adams', 50000, 'Accounting', datetime.date(2013, 7, 1), 637000),
('Moore', 34000, 'IT', datetime.date(2013, 8, 1), 637000),
], transform=lambda row: (row.name, row.salary, row.department, row.hire_date, row.sum))
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_window_function(self):
"""
Window functions are not aggregates, and hence a query to filter out
duplicates may be useful.
"""
qs = Employee.objects.annotate(
sum=Window(
expression=Sum('salary'),
partition_by=ExtractYear('hire_date'),
order_by=ExtractYear('hire_date')
),
year=ExtractYear('hire_date'),
).values('year', 'sum').distinct('year').order_by('year')
results = [
{'year': 2005, 'sum': 225000}, {'year': 2007, 'sum': 55000},
{'year': 2008, 'sum': 45000}, {'year': 2009, 'sum': 128000},
{'year': 2011, 'sum': 60000}, {'year': 2012, 'sum': 40000},
{'year': 2013, 'sum': 84000},
]
for idx, val in zip(range(len(results)), results):
with self.subTest(result=val):
self.assertEqual(qs[idx], val)
def test_fail_update(self):
"""Window expressions can't be used in an UPDATE statement."""
msg = 'Window expressions are not allowed in this query'
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.filter(department='Management').update(
salary=Window(expression=Max('salary'), partition_by='department'),
)
def test_fail_insert(self):
"""Window expressions can't be used in an INSERT statement."""
msg = 'Window expressions are not allowed in this query'
with self.assertRaisesMessage(FieldError, msg):
Employee.objects.create(
name='Jameson', department='Management', hire_date=datetime.date(2007, 7, 1),
salary=Window(expression=Sum(Value(10000), order_by=F('pk').asc())),
)
def test_window_expression_within_subquery(self):
subquery_qs = Employee.objects.annotate(
highest=Window(FirstValue('id'), partition_by=F('department'), order_by=F('salary').desc())
).values('highest')
highest_salary = Employee.objects.filter(pk__in=subquery_qs)
self.assertSequenceEqual(highest_salary.values('department', 'salary'), [
{'department': 'Accounting', 'salary': 50000},
{'department': 'Sales', 'salary': 55000},
{'department': 'Marketing', 'salary': 40000},
{'department': 'IT', 'salary': 60000},
{'department': 'Management', 'salary': 100000}
])
def test_invalid_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got '3'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(start=3),
)))
def test_invalid_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got '-3'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end=-3),
)))
def test_invalid_type_end_value_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end='a'),
)))
def test_invalid_type_start_value_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
frame=ValueRange(start='a'),
)))
def test_invalid_type_end_row_range(self):
msg = "end argument must be a positive integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
frame=RowRange(end='a'),
)))
@skipUnless(connection.vendor == 'postgresql', 'Frame construction not allowed on PostgreSQL')
def test_postgresql_illegal_range_frame_start(self):
msg = 'PostgreSQL only supports UNBOUNDED together with PRECEDING and FOLLOWING.'
with self.assertRaisesMessage(NotSupportedError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(start=-1),
)))
@skipUnless(connection.vendor == 'postgresql', 'Frame construction not allowed on PostgreSQL')
def test_postgresql_illegal_range_frame_end(self):
msg = 'PostgreSQL only supports UNBOUNDED together with PRECEDING and FOLLOWING.'
with self.assertRaisesMessage(NotSupportedError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=ValueRange(end=1),
)))
def test_invalid_type_start_row_range(self):
msg = "start argument must be a negative integer, zero, or None, but got 'a'."
with self.assertRaisesMessage(ValueError, msg):
list(Employee.objects.annotate(test=Window(
expression=Sum('salary'),
order_by=F('hire_date').asc(),
frame=RowRange(start='a'),
)))
class NonQueryWindowTests(SimpleTestCase):
def test_window_repr(self):
self.assertEqual(
repr(Window(expression=Sum('salary'), partition_by='department')),
'<Window: Sum(F(salary)) OVER (PARTITION BY F(department))>'
)
self.assertEqual(
repr(Window(expression=Avg('salary'), order_by=F('department').asc())),
'<Window: Avg(F(salary)) OVER (ORDER BY OrderBy(F(department), descending=False))>'
)
def test_window_frame_repr(self):
self.assertEqual(
repr(RowRange(start=-1)),
'<RowRange: ROWS BETWEEN 1 PRECEDING AND UNBOUNDED FOLLOWING>'
)
self.assertEqual(
repr(ValueRange(start=None, end=1)),
'<ValueRange: RANGE BETWEEN UNBOUNDED PRECEDING AND 1 FOLLOWING>'
)
self.assertEqual(
repr(ValueRange(start=0, end=0)),
'<ValueRange: RANGE BETWEEN CURRENT ROW AND CURRENT ROW>'
)
self.assertEqual(
repr(RowRange(start=0, end=0)),
'<RowRange: ROWS BETWEEN CURRENT ROW AND CURRENT ROW>'
)
def test_empty_group_by_cols(self):
window = Window(expression=Sum('pk'))
self.assertEqual(window.get_group_by_cols(), [])
self.assertFalse(window.contains_aggregate)
def test_frame_empty_group_by_cols(self):
frame = WindowFrame()
self.assertEqual(frame.get_group_by_cols(), [])
def test_frame_window_frame_notimplemented(self):
frame = WindowFrame()
msg = 'Subclasses must implement window_frame_start_end().'
with self.assertRaisesMessage(NotImplementedError, msg):
frame.window_frame_start_end(None, None, None)
def test_invalid_filter(self):
msg = 'Window is disallowed in the filter clause'
with self.assertRaisesMessage(NotSupportedError, msg):
Employee.objects.annotate(dense_rank=Window(expression=DenseRank())).filter(dense_rank__gte=1)
def test_invalid_order_by(self):
msg = 'order_by must be either an Expression or a sequence of expressions'
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Sum('power'), order_by='-horse')
def test_invalid_source_expression(self):
msg = "Expression 'Upper' isn't compatible with OVER clauses."
with self.assertRaisesMessage(ValueError, msg):
Window(expression=Upper('name'))
| {
"content_hash": "7baa68aad094dc1fa37491aa29f85095",
"timestamp": "",
"source": "github",
"line_count": 798,
"max_line_length": 118,
"avg_line_length": 51.946115288220554,
"alnum_prop": 0.5659421513521338,
"repo_name": "frankvdp/django",
"id": "ba757dfb947d108565f1256cddf157fd22c57ac6",
"size": "41453",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/expressions_window/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52098"
},
{
"name": "HTML",
"bytes": "174031"
},
{
"name": "JavaScript",
"bytes": "249623"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11310936"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import urllib
import logging
from django import forms
from django.db import models
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import pre_save
from .. widgets import APIChoiceWidget, APIModelChoiceWidget
logger = logging.getLogger(__name__)
class TaggedRelationWidget(APIModelChoiceWidget):
template = u'<div class="api-select" data-tags="%(tags)s" data-title="%(value)s" data-api="%(link)s" data-add="%(add_link)s">%(input)s</div>'
def __init__(self, *args, **kwargs):
from . import handler
super(TaggedRelationWidget, self).__init__(*args, **kwargs)
if self.attrs:
self.tags = handler.tags_to_string(self.attrs.pop('tags', []))
self.required_tags = handler.tags_to_string(
self.attrs.pop('required_tags', []))
else:
self.tags = ''
self.required_tags = ''
def get_qs(self):
qs = {}
if self.required_tags:
qs['required_tags'] = self.required_tags
return qs
def get_add_qs(self):
qs = self.get_qs()
if 'ftype' in qs:
qs['type'] = qs.pop('ftype')
return qs
def get_add_link(self):
"""
Appends the popup=1 query string to the url so the
destination url treats it as a popup.
"""
url = super(TaggedRelationWidget, self).get_add_link()
if url:
qs = self.get_add_qs()
if qs:
url = "%s&%s" % (url, urllib.urlencode(qs))
return url
def render(self, name, value, attrs=None, choices=()):
self.auto_tags = None
data = {
'input': super(APIChoiceWidget, self).render(name, value,
attrs=attrs),
'value': conditional_escape(self.label_for_value(value)),
'link': self.get_api_link(),
'add_link': self.get_add_link(),
'tags': self.tags,
'required_tags': self.required_tags
}
return mark_safe(self.template % data)
def obj_for_value(self, value, key='pk'):
if not key:
key = self.rel.get_related_field().name
if value is not None:
try:
obj = self.model._default_manager.using(self.db
).get(**{key: value})
return obj
except (ValueError, self.model.DoesNotExist):
return None
return None
class TaggedRelationFormField(forms.ModelChoiceField):
widget = TaggedRelationWidget
def __init__(self, **kwargs):
widget_instance = kwargs.pop('widget', None)
# Type/Tags
self.tags = kwargs.pop('tags', None)
self.required_tags = kwargs.pop('required_tags', None)
queryset = kwargs.pop('queryset')
if not isinstance(widget_instance, type) and not \
isinstance(widget_instance, self.widget):
attrs = {}
attrs['tags'] = self.tags
attrs['required_tags'] = self.required_tags
widget_instance = self.widget(queryset.model, attrs=attrs)
kwargs['widget'] = widget_instance
super(TaggedRelationFormField, self).__init__(queryset, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class TaggedRelationField(models.ForeignKey):
default_form_class = TaggedRelationFormField
def __init__(self, *args, **kwargs):
self.required_tags = kwargs.pop('required_tags', tuple())
self.tags = kwargs.pop('tags', tuple())
if self.required_tags:
if self.tags:
self.tags = tuple(set(self.tags).union(
set(self.required_tags)))
else:
self.tags = tuple(self.required_tags)
return super(TaggedRelationField, self).__init__(
*args, **kwargs)
def get_formfield_defaults(self):
return {
'form_class': self.default_form_class,
'tags': self.tags,
'required_tags': self.required_tags
}
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = self.get_formfield_defaults()
defaults.update(kwargs)
return super(TaggedRelationField, self).formfield(**defaults)
def contribute_to_class(self, cls, *args, **kwargs):
super(TaggedRelationField, self).contribute_to_class(
cls, *args, **kwargs)
pre_save.connect(save_auto_tags, sender=cls)
def save_auto_tags(sender, instance, **kwargs):
if not getattr(sender._meta, '_view_model', None):
for field in instance._meta.fields:
if isinstance(field, TaggedRelationField):
try:
ins = getattr(instance, field.name)
if ins:
ins.add_pending_tags(field.tags)
except ObjectDoesNotExist:
pass
| {
"content_hash": "2c2369c085ab916a1f3bef411812848a",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 145,
"avg_line_length": 34.21568627450981,
"alnum_prop": 0.566189111747851,
"repo_name": "ff0000/scarlet",
"id": "d9f3d7cf1a84df2e794c135bcf1141db7a825a6d",
"size": "5235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scarlet/cms/internal_tags/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "217430"
},
{
"name": "HTML",
"bytes": "43216"
},
{
"name": "JavaScript",
"bytes": "2200686"
},
{
"name": "Python",
"bytes": "508579"
},
{
"name": "Ruby",
"bytes": "485"
},
{
"name": "Shell",
"bytes": "1813"
}
],
"symlink_target": ""
} |
import weakref
from abc import ABCMeta, abstractmethod
from typing import Any, ClassVar, Generic, MutableMapping, TypeVar
_KeyT = TypeVar("_KeyT")
_DataT = TypeVar("_DataT")
class BaseNotes(Generic[_KeyT, _DataT], metaclass=ABCMeta):
"""This class defines a generic type-safe mechanism for associating
additional data with an object (without modifying the original
object via subclassing or monkey-patching).
It was originally designed to avoid monkey-patching the Django
HttpRequest object, to which we want to associate computed state
(e.g. parsed state computed from the User-Agent) so that it's
available in code paths that receive the HttpRequest object.
The implementation uses a WeakKeyDictionary, so that the notes
object will be garbage-collected when the original object no
longer has other references (avoiding memory leaks).
We still need to be careful to avoid any of the attributes of
_NoteT having points to the original object, as that can create a
cyclic reference cycle that the Python garbage collect may not
handle correctly.
"""
__notes_map: ClassVar[MutableMapping[Any, Any]] = weakref.WeakKeyDictionary()
@classmethod
def get_notes(cls, key: _KeyT) -> _DataT:
try:
return cls.__notes_map[key]
except KeyError:
cls.__notes_map[key] = cls.init_notes()
return cls.__notes_map[key]
@classmethod
def set_notes(cls, key: _KeyT, notes: _DataT) -> None:
cls.__notes_map[key] = notes
@classmethod
@abstractmethod
def init_notes(cls) -> _DataT:
...
| {
"content_hash": "6a18f35387e235a4698801402c203493",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 81,
"avg_line_length": 35.47826086956522,
"alnum_prop": 0.6948529411764706,
"repo_name": "eeshangarg/zulip",
"id": "ae6885bddab9527753d379c94753c92dbe1faee0",
"size": "1632",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "zerver/lib/notes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "484233"
},
{
"name": "Dockerfile",
"bytes": "5056"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "713408"
},
{
"name": "Handlebars",
"bytes": "343958"
},
{
"name": "JavaScript",
"bytes": "3738321"
},
{
"name": "Perl",
"bytes": "9884"
},
{
"name": "Puppet",
"bytes": "106355"
},
{
"name": "Python",
"bytes": "9442083"
},
{
"name": "Ruby",
"bytes": "3250"
},
{
"name": "Shell",
"bytes": "135667"
},
{
"name": "TypeScript",
"bytes": "275302"
}
],
"symlink_target": ""
} |
import numpy as np
import math
import random
import itertools
import collections
class NeuralNet:
def __init__(self, n_inputs, n_outputs, n_hiddens, n_hidden_layers, activation_functions ):
self.n_inputs = n_inputs # Number of network input signals
self.n_outputs = n_outputs # Number of desired outputs from the network
self.n_hiddens = n_hiddens # Number of nodes in each hidden layer
self.n_hidden_layers = n_hidden_layers # Number of hidden layers in the network
self.activation_functions = activation_functions
assert len(activation_functions)==(n_hidden_layers+1), "Requires "+(n_hidden_layers+1)+" activation functions, got: "+len(activation_functions)+"."
if n_hidden_layers == 0:
# Count the necessary number of weights for the input->output connection.
# input -> [] -> output
self.n_weights = ((n_inputs+1)*n_outputs)
else:
# Count the necessary number of weights summed over all the layers.
# input -> [n_hiddens -> n_hiddens] -> output
self.n_weights = (n_inputs+1)*n_hiddens+\
(n_hiddens**2+n_hiddens)*(n_hidden_layers-1)+\
n_hiddens*n_outputs+n_outputs
# Initialize the network with new randomized weights
self.set_weights( self.generate_weights() )
#end
def generate_weights(self, low=-0.1, high=0.1):
# Generate new random weights for all the connections in the network
if not False:
# Support NumPy
return [random.uniform(low,high) for _ in xrange(self.n_weights)]
else:
return np.random.uniform(low, high, size=(1,self.n_weights)).tolist()[0]
#end
def unpack(self, weight_list ):
# This method will create a list of weight matrices. Each list element
# corresponds to the connection between two layers.
if self.n_hidden_layers == 0:
return [ np.array(weight_list).reshape(self.n_inputs+1,self.n_outputs) ]
else:
weight_layers = [ np.array(weight_list[:(self.n_inputs+1)*self.n_hiddens]).reshape(self.n_inputs+1,self.n_hiddens) ]
weight_layers += [ np.array(weight_list[(self.n_inputs+1)*self.n_hiddens+(i*(self.n_hiddens**2+self.n_hiddens)):(self.n_inputs+1)*self.n_hiddens+((i+1)*(self.n_hiddens**2+self.n_hiddens))]).reshape(self.n_hiddens+1,self.n_hiddens) for i in xrange(self.n_hidden_layers-1) ]
weight_layers += [ np.array(weight_list[(self.n_inputs+1)*self.n_hiddens+((self.n_hidden_layers-1)*(self.n_hiddens**2+self.n_hiddens)):]).reshape(self.n_hiddens+1,self.n_outputs) ]
return weight_layers
#end
def set_weights(self, weight_list ):
# This is a helper method for setting the network weights to a previously defined list.
# This is useful for utilizing a previously optimized neural network weight set.
self.weights = self.unpack( weight_list )
#end
def get_weights(self, ):
# This will stack all the weights in the network on a list, which may be saved to the disk.
return [w for l in self.weights for w in l.flat]
#end
def backpropagation(self, trainingset, ERROR_LIMIT=1e-3, learning_rate=0.3, momentum_factor=0.9 ):
def addBias(A):
# Add 1 as bias.
return np.hstack(( np.ones((A.shape[0],1)), A ))
#end addBias
assert trainingset[0].features.shape[0] == self.n_inputs, "ERROR: input size varies from the defined input setting"
assert trainingset[0].targets.shape[0] == self.n_outputs, "ERROR: output size varies from the defined output setting"
training_data = np.array( [instance.features for instance in trainingset ] )
training_targets = np.array( [instance.targets for instance in trainingset ] )
MSE = ( ) # inf
neterror = None
momentum = collections.defaultdict( int )
epoch = 0
while MSE > ERROR_LIMIT:
epoch += 1
input_layers = self.update( training_data, trace=True )
out = input_layers[-1]
error = training_targets - out
delta = error
MSE = np.mean( np.power(error,2) )
loop = itertools.izip(
xrange(len(self.weights)-1, -1, -1),
reversed(self.weights),
reversed(input_layers[:-1]),
)
for i, weight_layer, input_signals in loop:
# Loop over the weight layers in reversed order to calculate the deltas
# Calculate weight change
dW = learning_rate * np.dot( addBias(input_signals).T, delta ) + momentum_factor * momentum[i]
if i!= 0:
"""Do not calculate the delta unnecessarily."""
# Skipping the bias weight during calculation.
weight_delta = np.dot( delta, weight_layer[1:,:].T )
# Calculate the delta for the subsequent layer
delta = np.multiply( weight_delta, self.activation_functions[i-1]( input_signals, derivative=True) )
# Store the momentum
momentum[i] = dW
# Update the weights
self.weights[ i ] += dW
if epoch%1000==0:
# Show the current training status
print "* current network error (MSE):", MSE
print "* Converged to error bound (%.4g) with MSE = %.4g." % ( ERROR_LIMIT, MSE )
print "* Trained for %d epochs." % epoch
# end backprop
def update(self, input_values, trace=False ):
# This is a forward operation in the network. This is how we calculate the network output
# from a set of input signals.
output = input_values
if trace: tracelist = [ output ]
for i, weight_layer in enumerate(self.weights):
# Loop over the network layers and calculate the output
output = np.dot( output, weight_layer[1:,:] ) + weight_layer[0:1,:] # implicit bias
output = self.activation_functions[i]( output )
if trace: tracelist.append( output )
if trace: return tracelist
return output
#end
def save_to_file(self, filename = "network.pkl" ):
import cPickle
"""
This save method pickles the parameters of the current network into a
binary file for persistant storage.
"""
with open( filename , 'wb') as file:
store_dict = {
"n_inputs" : self.n_inputs,
"n_outputs" : self.n_outputs,
"n_hiddens" : self.n_hiddens,
"n_hidden_layers" : self.n_hidden_layers,
"activation_functions" : self.activation_functions,
"n_weights" : self.n_weights,
"weights" : self.weights
}
cPickle.dump( store_dict, file, 2 )
#end
@staticmethod
def load_from_file( filename = "network.pkl" ):
"""
Load the complete configuration of a previously stored network.
"""
network = NeuralNet( 0, 0, 0, 0, [0] )
with open( filename , 'rb') as file:
import cPickle
store_dict = cPickle.load(file)
network.n_inputs = store_dict["n_inputs"]
network.n_outputs = store_dict["n_outputs"]
network.n_hiddens = store_dict["n_hiddens"]
network.n_hidden_layers = store_dict["n_hidden_layers"]
network.n_weights = store_dict["n_weights"]
network.weights = store_dict["weights"]
network.activation_functions = store_dict["activation_functions"]
return network
#end
#end class | {
"content_hash": "5191cff7b483a0a60aa00509bebb46ae",
"timestamp": "",
"source": "github",
"line_count": 193,
"max_line_length": 284,
"avg_line_length": 44.04663212435233,
"alnum_prop": 0.5400541112810258,
"repo_name": "vibhatha/python-neural-network",
"id": "4fb7b405bead42818b95ca4d4b3cbd4c1fdd040a",
"size": "8501",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backprop/neuralnet.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "12706"
}
],
"symlink_target": ""
} |
from copy import copy
DEFAULT_SPECIAL_CHARS = [
' ', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '=', '+',
'{', '}', '[', ']', '\\', '|', ':', ';', "'", '"', '/', ',', '<', '.',
'>', '/', '?'
]
class Autocompleter(object):
autocomplete_words = set()
'''Set with autocomplete words'''
special_chars = DEFAULT_SPECIAL_CHARS
def __init__(self):
self.autocomplete_words = copy(self.autocomplete_words)
self.special_chars = copy(self.special_chars)
def add_word(self, word):
self.autocomplete_words.add(word)
def add_words_from_text(self, text):
text2 = text
for x in (' ', '!', '@', '$', '#', '%', '^', '&', '*', '(', ')',
'-', '=', '+', '{', '}', '[', ']', '\\', '|', '?',
';', ':', '<', '>', ',', '.', '/', '1', '2', '3',
'4', '5', '6', '7', '8', '9', '`', '~'):
text2 = text2.replace(x, '0')
text2 = text2.replace("'", '0')
text2 = text2.replace('"', '0')
text2 = text2.replace('"', '0')
words = text2.split('0')
for word in words:
word = word.strip()
if word:
word = word.lower()
if word not in self.autocomplete_words:
self.autocomplete_words.add(word)
def find_nearest_special(self, text, cursor_index):
text = text[:cursor_index]
rev_text = text[::-1]
nearest = -1
for x in self.special_chars:
b = rev_text.find(x)
if b != -1:
if nearest == -1:
nearest = b
elif nearest > b:
nearest = b
if nearest == -1:
nearest = 0
else:
nearest = len(text) - nearest
return nearest
def autocomplete(self, text, cursor_index):
found_words, insert_text = [], ''
if not text:
return found_words, insert_text
len_text = len(text)
insert_text = ''
# looks for a special characters before cursor,
# then sets word start slice number
start = self.find_nearest_special(text, cursor_index)
if start != -1:
word = text[start:cursor_index]
# Does nothing when word is empty
if word:
len_word = len(word)
# Looks for matching strings in autocomplete_words
# Appends all results to found list
for x in self.autocomplete_words:
if x[:len(word)].lower() == word.lower():
found_words.append(x)
len_found_words = len(found_words)
# If only one word found, insert text from it
if len_found_words:
if len_found_words == 1:
insert_text = found_words[0][len_word:]
if len_text == cursor_index:
insert_text = insert_text + ' '
else:
# If multiple words found, looks for and adds
# matching characters untill word_min_len index
# is reached, then stops and returns character string
# for inserting
found_lens = [len(x) for x in found_words]
word_min_len = min(found_lens)
len_word = len(word)
match_index = len_word
for char in found_words[0][match_index:word_min_len]:
are_matching = True
for x in found_words[1:word_min_len]:
if x[match_index].lower() != char.lower():
are_matching = False
break
match_index += 1
if are_matching:
insert_text += char
else:
break
if word_min_len - 1 < match_index:
break
return found_words, insert_text
| {
"content_hash": "712a71284876680986c22f17cace5097",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 74,
"avg_line_length": 37.53636363636364,
"alnum_prop": 0.4298861709857108,
"repo_name": "Bakterija/mmplayer",
"id": "fb3f1ccf6b18499671e9b80003a86506ae1c3517",
"size": "4129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmplayer/kivy_soil/utils/autocomplete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "450641"
}
],
"symlink_target": ""
} |
"""
Tools to handle reads sequenced with unique molecular identifiers (UMIs).
"""
from __future__ import print_function
import editdist
import gzip
import os
import re
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import Counter, defaultdict
from itertools import islice, izip
from pysam import index, Samfile
try:
from itertools import izip as zip
except ImportError:
pass
from ._version import __version__
IUPAC = {
"A": "A",
"T": "T",
"C": "C",
"G": "G",
"R": "GA",
"Y": "TC",
"M": "AC",
"K": "GT",
"S": "GC",
"W": "AT",
"H": "ACT",
"B": "GTC",
"V": "GCA",
"D": "GAT",
"N": "GATC"
}
UMI_REGEX = re.compile(r'UMI_([\w]*)')
gzopen = lambda f: gzip.open(f) if f.endswith(".gz") else open(f)
class UMINotFound(Exception):
pass
class Fastq(object):
"""FASTQ record. Holds record of name, sequence, and quality scores.
>>> fq = Fastq(["@illumina_naming_scheme", "ACTGACTG", "+", "KKKKKKKK"])
>>> fq.name, fq.seq, fq.qual
('illumina_naming_scheme', 'ACTGACTG', 'KKKKKKKK')
>>> fq
Fastq(illumina_naming_scheme)
>>> print(fq)
@illumina_naming_scheme
ACTGACTG
+
KKKKKKKK
>>> fq = Fastq(["@fail", "ACTGACTG", "+", "KKK"]) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AssertionError: Seq and Qual vary in length
"""
def __init__(self, args):
self.name = args[0][1:]
self.seq = args[1]
self.qual = args[3]
assert len(self.seq) == len(self.qual), "Seq and Qual vary in length"
def __repr__(self):
return "Fastq(%s)" % self.name
def __str__(self):
return "@%s\n%s\n+\n%s" % (self.name, self.seq, self.qual)
def is_indexed(bam):
if not os.path.exists("%s.bai" % bam):
# this returns nothing even when it fails
index(bam)
if not os.path.exists("%s.bai" % bam):
print("index not found for %s and indexing failed" % bam)
sys.exit(1)
return True
def umi_from_name(name):
"""Extracts the UMI sequence from the read name.
Args:
name (str): Name of the sequence
Returns:
str: UMI sequence
>>> umi_from_name("cluster_1017333:UMI_GCCGCA")
'GCCGCA'
>>> umi_from_name("TEST") # doctest: +ELLIPSIS
Traceback (most recent call last):
...
UMINotFound: TEST
"""
try:
umi = UMI_REGEX.findall(name)[0].strip()
except IndexError:
raise UMINotFound(name)
return umi
def is_similar(query, targets, n):
"""Tests target set of sequences to the query.
Args:
query (str): query sequence
targets (set): unique sequences
n (int): allowable mismatches when comparing a query to a given sequence of the targets
Returns:
bool
>>> import editdist
>>> s = "ACTGA"
>>> ts_1 = {"ACTGG"}
>>> ts_2 = {"ACTCC", "ACTGG"}
>>> ts_3 = {"ACTCC", "ACTTT"}
>>> n = 1
>>> is_similar(s, ts_1, n)
True
>>> is_similar(s, ts_2, n)
True
>>> is_similar(s, ts_3, n)
False
"""
if targets:
for target in targets:
if editdist.distance(target, query) <= n:
return True
return False
def process_bam(abam, bbam, mismatches=0):
"""Removes duplicate reads characterized by their UMI at any given start location.
Args:
abam (str): Input bam with potential duplicate UMIs
bbam (str): Output bam after removing duplicate UMIs
mismatches (Optional[int]): Allowable edit distance between UMIs
"""
is_indexed(abam)
with Samfile(abam, 'rb') as in_bam, Samfile(bbam, 'wb', template=in_bam) as out_bam:
for chrom in in_bam.references:
print("processing chromosome", chrom, file=sys.stderr)
umi_idx = defaultdict(set)
read_counts = Counter()
for read in in_bam.fetch(chrom):
if read.is_unmapped:
continue
# get the iupac umi sequence
try:
umi = umi_from_name(read.qname)
except UMINotFound:
print("You may be processing alignments that haven't been annotated with UMIs!", file=sys.stderr)
raise
# get actual read start
# read.pos accounts for 5' soft clipping
if read.is_reverse:
# read.alen alignment length accounting for 3' soft clipping
# UMIs are then compared to reads with the same start
read_start = read.pos + read.alen
else:
read_start = read.pos
# add count for this start; counts all reads
read_counts[read_start] += 1
# check if UMI seen
if umi in umi_idx[read_start]:
continue
# check if UMI is similar enough to another that has been seen
if mismatches > 0 and is_similar(umi, umi_idx[read_start], mismatches):
# do not count; group similar UMIs into one
continue
# keep track of unique UMIs - set eliminates duplicates
umi_idx[read_start].add(umi)
out_bam.write(read)
# process before and after counts over chrom
for start, before_count in sorted(read_counts.items()):
print(chrom, start, start + 1, before_count, len(umi_idx[start]), sep="\t")
def readfq(filehandle):
"""Fastq iterator.
Args:
filehandle (file): open file handle
Yields:
Fastq
"""
fqclean = (x.strip("\r\n") for x in filehandle if x.strip())
while True:
rd = [x for x in islice(fqclean, 4)]
if not rd:
raise StopIteration
assert all(rd) and len(rd) == 4
yield Fastq(rd)
def valid_umi(iupac, umi):
"""Parse UMI sequence to validate against IUPAC sequence.
Args:
iupac (str): IUPAC sequence
umi (str): observed sequence
Returns:
bool
>>> valid_umi("NNNV", "ACGT")
False
>>> valid_umi("NNNV", "ACGG")
True
"""
for code, base in zip(iupac, umi):
try:
if base not in IUPAC[code]:
return False
except KeyError:
return False
return True
def clip_umi(record, n, end):
"""Removes UMI sequence from read, trims respective length from qual, then appends UMI onto read name.
Args:
record (Fastq): `Fastq` record
n (int): Length of the UMI
end (int): The end of the read on which the UMI resides
Returns:
Fastq else str: The record or the failed UMI sequence
>>> fq = Fastq(["@cluster_455 2","GGGGGAGCCACGAGGTGTGTTTTATTTTCATTATTC","+","C===>=B=@:<;4A;8=9?6EEC0?DDA72B@3EB4"])
>>> r, umi = clip_umi(fq, 6, 5)
>>> r
Fastq(cluster_455:UMI_GGGGGA 2)
>>> r.seq
'GCCACGAGGTGTGTTTTATTTTCATTATTC'
>>> fq = Fastq(["@cluster_455 2","GGXXGAGCCACGAGGTGTGTTTTATTTTCATTATTC","+","C===>=B=@:<;4A;8=9?6EEC0?DDA72B@3EB4"])
>>> r, umi = clip_umi(fq, 6, 5)
>>> umi
'GGXXGA'
>>> fq = Fastq(["@cluster_455 2","GGXXGAGCCACGAGGTGTGTTTTATTTTCATTATTC","+","C===>=B=@:<;4A;8=9?6EEC0?DDA72B@3EB4"])
>>> r, umi = clip_umi(fq, 5, 3)
>>> r.seq
'GGXXGAGCCACGAGGTGTGTTTTATTTTCAT'
>>> r.qual
'C===>=B=@:<;4A;8=9?6EEC0?DDA72B'
"""
if end == 5:
umi = record.seq[:n]
record.seq = record.seq[n:]
record.qual = record.qual[n:]
else:
umi = record.seq[-n:]
record.seq = record.seq[:-n]
record.qual = record.qual[:-n]
try:
name, pair = record.name.split(" ", 1)
record.name = "{name}:UMI_{umi} {pair}".format(name=name, umi=umi, pair=pair)
except ValueError:
record.name = "{name}:UMI_{umi}".format(name=record.name, umi=umi)
return record, umi
def process_fastq(fastq, umi, end=5, invalid=None, verbose=False, top=10):
"""For every valid umi, trim while incorporating UMI into read name.
Args:
fastq (str): file path to unprocessed FASTQ file
umi (str): IUPAC sequence of UMI
end (Optional[int]): 5 or 3, which ever end you're UMI is located on
invalid (Optional[str]): file path to save invalid, non-passing reads
verbose (Optional[bool]): True prints basic stats on observed UMIs
top (Optional[int]): Number of the the top invalid UMIs to print out
"""
umi_stats = Counter()
umi = umi.upper()
u_leng = len(umi)
if invalid:
with gzopen(fastq) as fq, open(invalid, 'w') as ofh:
for read in readfq(fq):
read, observed_umi = clip_umi(read, u_leng, end)
if not valid_umi(umi, observed_umi):
umi_stats.update([observed_umi])
print(read, file=ofh)
else:
print(read)
else:
with gzopen(fastq) as fq:
for read in readfq(fq):
read, observed_umi = clip_umi(read, u_leng, end)
if not valid_umi(umi, observed_umi):
umi_stats.update([observed_umi])
else:
print(read)
if verbose:
print("Invalid UMI Total:", sum(umi_stats.values()), file=sys.stderr)
print("Unique UMIs Removed:", len(list(umi_stats)), file=sys.stderr)
print("Top", top, "Invalid UMIs:", file=sys.stderr)
for umi, val in umi_stats.most_common(top):
print(umi, val, sep="\t", file=sys.stderr)
def main():
def _file_exists(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist" % arg)
if not os.path.isfile(arg):
parser.error("Expected file, not folder (%s)" % arg)
return arg
p = ArgumentParser(description=__doc__)
p.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__))
subp = p.add_subparsers(help='commands', dest='command')
# fastq processing
fastq = subp.add_parser('trim', description=("Trims the UMI sequence from the read, incorporating the unique "
"sequence in the read name facilitating filtering of the alignments."),
formatter_class=ArgumentDefaultsHelpFormatter,
help="trim UMI and incorporate sequence into read name")
fastq.add_argument('fastq', metavar='FASTQ', type=lambda x: _file_exists(p, x),
help='reads with untrimmed UMI')
fastq.add_argument('umi', metavar='UMI',
help='IUPAC UMI sequence, e.g. NNNNNV')
fastq.add_argument('--end', choices=[5, 3], default=5, type=int,
help="UMI location on the read")
fastq.add_argument('--invalid', metavar='STR',
help='save invalid, non-passing reads to this file')
fastq.add_argument('--verbose', action='store_true',
help="print UMI stats to stderr")
fastq.add_argument('--top', metavar='INT', type=int, default=10,
help="when verbose, print this many of the top filtered UMI sequences")
# bam processing
bam = subp.add_parser('rmdup', description=("Removes duplicate reads, that were previously characterized by "
"their UMI, at any given start location. Coverage differences before "
"and after are written to STDOUT as BED3+."),
formatter_class=ArgumentDefaultsHelpFormatter,
help="remove duplicate UMI entries from all start positions")
bam.add_argument('abam', metavar='INPUT_BAM', type=lambda x: _file_exists(p, x),
help='bam with UMI in read name')
bam.add_argument('bbam', metavar='OUTPUT_BAM',
help='non-duplicate UMIs at any given start position')
bam.add_argument('-m', '--mismatches', metavar='INT', default=0, type=int,
help="allowable mismatches when comparing UMIs at any given start location")
args = p.parse_args()
if args.command == 'trim':
process_fastq(args.fastq, args.umi, end=args.end, invalid=args.invalid,
verbose=args.verbose, top=args.top)
elif args.command == 'rmdup':
process_bam(args.abam, args.bbam, mismatches=args.mismatches)
if __name__ == "__main__":
main()
| {
"content_hash": "3e4c1b2f9851e406c1adb9d0abd3608f",
"timestamp": "",
"source": "github",
"line_count": 381,
"max_line_length": 120,
"avg_line_length": 33.24409448818898,
"alnum_prop": 0.5647402494868151,
"repo_name": "brwnj/umitools",
"id": "9e952e94ce04f7403d652e6c387f67bfefac4824",
"size": "12706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "umitools/umitools.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21080"
}
],
"symlink_target": ""
} |
class EigenvalueNumberError(Exception):
def __init__(self, all_vals, unique_vals):
self.all_vals, self.unique_vals = all_vals, unique_vals
def __str__(self):
return "Number of eigenvalues of K does not match \
dimension of the the Hilbert space. \n All vals: " \
+ repr(self.all_vals) + "\n 'Unique' vals: " \
+ repr(self.unique_vals)
class UsageError(Exception):
pass
class NZTooLargeError(Exception):
"""Thrown when NZ gets too large"""
pass
| {
"content_hash": "d7b99b4fcbc4cc14ed391048ed3b265a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 28.105263157894736,
"alnum_prop": 0.601123595505618,
"repo_name": "sirmarcel/floq",
"id": "3b5e0fdceb078f3fbeba3eb907b9825a86733018",
"size": "534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "floq/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "226290"
}
],
"symlink_target": ""
} |
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.shortcuts import redirect, render
from django.views import View
from experiments_manager.consumers import send_message
from experiments_manager.helper import MessageStatus
from git_manager.helpers.git_helper import GitHelper
from git_manager.helpers.github_helper import GitHubHelper
from helpers.helper import get_package_or_experiment
from helpers.helper_mixins import ExperimentPackageTypeMixin
from .tasks import task_generate_docs
class DocView(View):
"""Shows the documentation for the selected page_slug
via the attached language helper to an experiment"""
def get(self, request, object_id, object_type, page_slug=None):
exp_or_package = get_package_or_experiment(request, object_type, object_id)
language_helper = exp_or_package.language_helper()
if page_slug:
location = language_helper.get_document(page_slug)
else:
location = language_helper.get_document('index')
return redirect(to=location)
class DocStatusView(ExperimentPackageTypeMixin, View):
"""Views the Documentation status: if doc gen is enabled and
the latest docs quality messages"""
def get(self, request, object_id, object_type):
context = {}
django_object = get_package_or_experiment(request, object_type, object_id)
context['object'] = django_object
context['docs'] = django_object.docs
context['object_type'] = object_type
return render(request, 'docs_manager/docs_status.html', context)
@login_required
def toggle_docs_status(request, object_id, object_type):
exp_or_package = get_package_or_experiment(request, object_type, object_id)
docs = exp_or_package.docs
docs.enabled = not docs.enabled
docs.save()
if docs.enabled:
github_helper = GitHubHelper(request.user, exp_or_package.git_repo.name)
git_helper = GitHelper(github_helper)
git_helper.clone_or_pull_repository()
return redirect(exp_or_package.get_absolute_url())
@login_required
def docs_generate(request, object_id, object_type):
"""Start task to regenerate documentation"""
exp_or_package = get_package_or_experiment(request, object_type, object_id)
send_message(exp_or_package.owner.user.username, MessageStatus.INFO,
'Task to regenerate documentation started.')
task_generate_docs.delay(object_type, object_id)
return JsonResponse({})
| {
"content_hash": "98b0ce2c9277dfb5c87e70903551a413",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 83,
"avg_line_length": 39.96825396825397,
"alnum_prop": 0.7235901509134234,
"repo_name": "MOOCworkbench/MOOCworkbench",
"id": "193ed8a72b323b46fccdf60d4944069db945b980",
"size": "2518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs_manager/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1942"
},
{
"name": "HTML",
"bytes": "129189"
},
{
"name": "Python",
"bytes": "423140"
},
{
"name": "Shell",
"bytes": "952"
}
],
"symlink_target": ""
} |
import os
from mi.logging import config
from mi.dataset.driver.moas.gl.adcpa.adcpa_driver_common import AdcpaDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
__author__ = "Jeff Roy"
@version("0.3.0")
def parse(unused, source_file_path, particle_data_handler):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
'velocity': 'VelocityGlider',
'engineering': 'GliderEngineering',
'config': 'GliderConfig',
'bottom_track': 'EarthBottom',
'bottom_track_config': 'BottomConfig',
}
}
driver = AdcpaDriver(source_file_path, particle_data_handler, parser_config)
return driver.process()
| {
"content_hash": "3b1c4d61b964ab04d54ccd65316ff9ef",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 80,
"avg_line_length": 28.923076923076923,
"alnum_prop": 0.6768617021276596,
"repo_name": "oceanobservatories/mi-instrument",
"id": "c2fc1a2789f055096ccecffb358f81a6dc5e328e",
"size": "809",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/moas/gl/adcpa/adcpa_m_glider_recovered_driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "4746"
},
{
"name": "Python",
"bytes": "10221924"
}
],
"symlink_target": ""
} |
from smartbotsol import StateMachine
import logging as logger
log = logger.getLogger(__name__)
class HandeledStateMachine(StateMachine):
"""
State machine with telegram handlers functionality
For each new message search handlers for current state
if this does not exists returns FallbackState and next this return back to current state
"""
def __init__(self, init_state, filters, fallback, handlers_not_required=True):
self.fallback = fallback
self.handlers_not_required = handlers_not_required
super(HandeledStateMachine, self).__init__(init_state, filters)
def collect_handlers(self, trigger, handlers):
"""returns set of handled updates"""
# return set(name for name, candidate in handlers.items() if candidate.check_update(trigger.update))
result = set()
classname = self.state.__class__.__name__
for name, candidate in handlers.items():
log.info('Check update for "{}" {}'.format(name,candidate.check_update(trigger.update)))
if candidate.check_update(trigger.update):
result.add(name)
log.debug('Found {} Handlers: {}'.format(classname, list(result)))
return result
def fire(self, trigger):
log.debug('Resolve Handlers {}'.format(self.state.__class__.__name__))
trigger.handler = set()
classname = self.state.__class__.__name__
#resolve entry point
if classname == 'BootStrapState':
log.debug('Check entry points for {}'.format(classname))
for name, entry_point in self.state.handlers.items():
if entry_point.check_update(trigger.update) or self.state.skip_start:
trigger.handler.add(name)
log.debug('Found {} Handlers: {}'.format(classname, list(trigger.handler)))
#search handler for update
if classname not in ['BootStrapState', 'FallBackState'] and not trigger.handler:
handlers = self.state.handlers
trigger.handler = self.collect_handlers(trigger, handlers)
#if handlers not exists go to FallbackState and search fallback
if not trigger.handler:
log.debug('Handlers not found')
handlers = self.fallback.handlers
self.fallback.parent = self.state # need to know where we came from when we get into FallbackState
self.to_state(self.fallback, trigger)
log.debug('Check fallbacks: {}'.format(list(handlers.keys())))
trigger.handler = self.collect_handlers(trigger, handlers)
if trigger.handler or self.handlers_not_required:
if self.state.__class__.__name__ in ['BootStrapState', 'FallbackState']:
new_state = self.state._on_trigger(trigger)
log.debug(new_state)
self.to_state(new_state, trigger)
else:
super(HandeledStateMachine, self).fire(trigger)
| {
"content_hash": "708bddbbbd6d8b41ba5f8328865fd7a0",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 115,
"avg_line_length": 41.69444444444444,
"alnum_prop": 0.6269153897401732,
"repo_name": "dqunbp/smartbotsol",
"id": "ffab4e4ebd60ddf444a76364b4a4c8ab0e8585d2",
"size": "3003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smartbotsol/telegram/handeledfsm.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "28018"
}
],
"symlink_target": ""
} |
from configuration import *
from utils import make_request, APIError
class EverybitAPI():
def __init__(self):
pass
def get_account_info(self):
"""
Get user's account info.
"""
url = '%s/v1/account' % api_base_url
return make_request(url)
def get_videos(self, uuid=None):
"""
Get videos for this user.
If there is a uuid provided, we'll retrieve a single video's details.
If there is no uuid, it will get all the videos in the user's account.
"""
url = '%s/v1/videos' % api_base_url
if uuid:
url = '%s/%s' % (url, uuid)
return make_request(url)
def create_video(self, data=None):
"""
Create a new video.
Will throw an APIError if there is no json_data present.
"""
if not data:
raise APIError("You must provide a data to create a video.")
url = '%s/v1/videos' % api_base_url
return make_request(url, 'POST', data)
def update_video(self, uuid=None, data=None):
"""
Update an existing video.
A uuid and some json data (dict) are required here.
"""
if not uuid:
raise APIError("You must provide a uuid to update a video.")
if not data:
raise APIError("You must provide a data to update a video.")
url = '%s/v1/videos' % api_base_url
url = '%s/%s' % (url, uuid)
return make_request(url, 'PUT', data)
def get_video_status(self, uuid=None):
"""
Get the video's status.
If the video is currently encoding, we can see how far along it is.
If the video is done encoding, or there was an error, we can find that
out here as well.
"""
if not uuid:
raise APIError("You must provide a uuid to update a video.")
url = '%s/v1/videos/%s/status' % (api_base_url, uuid)
return make_request(url, 'GET') | {
"content_hash": "a0321746aaf8d1e4751e99bc05f9de3e",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 27.84722222222222,
"alnum_prop": 0.5561097256857855,
"repo_name": "everybit/everybit-python",
"id": "341fbc915f11d038faf8fabaeb7c1eb84c7ea12f",
"size": "2005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "everybit/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7242"
}
],
"symlink_target": ""
} |
import unittest
from unittest.mock import patch
import pytest
from airflow.providers.google.cloud.transfers.presto_to_gcs import PrestoToGCSOperator
TASK_ID = "test-presto-to-gcs"
PRESTO_CONN_ID = "my-presto-conn"
GCP_CONN_ID = "my-gcp-conn"
IMPERSONATION_CHAIN = ["ACCOUNT_1", "ACCOUNT_2", "ACCOUNT_3"]
SQL = "SELECT * FROM memory.default.test_multiple_types"
BUCKET = "gs://test"
FILENAME = "test_{}.ndjson"
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n',
]
CSV_LINES = [
b"some_num,some_str\r\n",
b"42,mock_row_content_1\r\n",
b"43,mock_row_content_2\r\n",
b"44,mock_row_content_3\r\n",
]
SCHEMA_FILENAME = "schema_test.json"
SCHEMA_JSON = b'[{"name": "some_num", "type": "INT64"}, {"name": "some_str", "type": "STRING"}]'
@pytest.mark.integration("presto")
class TestPrestoToGCSOperator(unittest.TestCase):
def test_init(self):
"""Test PrestoToGCSOperator instance is properly initialized."""
op = PrestoToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
impersonation_chain=IMPERSONATION_CHAIN,
)
assert op.task_id == TASK_ID
assert op.sql == SQL
assert op.bucket == BUCKET
assert op.filename == FILENAME
assert op.impersonation_chain == IMPERSONATION_CHAIN
@patch("airflow.providers.google.cloud.transfers.presto_to_gcs.PrestoHook")
@patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_save_as_json(self, mock_gcs_hook, mock_presto_hook):
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
assert BUCKET == bucket
assert FILENAME.format(0) == obj
assert "application/json" == mime_type
assert not gzip
with open(tmp_filename, "rb") as file:
assert b"".join(NDJSON_LINES) == file.read()
mock_gcs_hook.return_value.upload.side_effect = _assert_upload
mock_cursor = mock_presto_hook.return_value.get_conn.return_value.cursor
mock_cursor.return_value.description = [
("some_num", "INTEGER", None, None, None, None, None),
("some_str", "VARCHAR", None, None, None, None, None),
]
mock_cursor.return_value.fetchone.side_effect = [
[42, "mock_row_content_1"],
[43, "mock_row_content_2"],
[44, "mock_row_content_3"],
None,
]
op = PrestoToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
presto_conn_id=PRESTO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_presto_hook.assert_called_once_with(presto_conn_id=PRESTO_CONN_ID)
mock_gcs_hook.assert_called_once_with(
delegate_to=None,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_gcs_hook.return_value.upload.assert_called()
@patch("airflow.providers.google.cloud.transfers.presto_to_gcs.PrestoHook")
@patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_save_as_json_with_file_splitting(self, mock_gcs_hook, mock_presto_hook):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
expected_upload = {
FILENAME.format(0): b"".join(NDJSON_LINES[:2]),
FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
assert BUCKET == bucket
assert "application/json" == mime_type
assert not gzip
with open(tmp_filename, "rb") as file:
assert expected_upload[obj] == file.read()
mock_gcs_hook.return_value.upload.side_effect = _assert_upload
mock_cursor = mock_presto_hook.return_value.get_conn.return_value.cursor
mock_cursor.return_value.description = [
("some_num", "INTEGER", None, None, None, None, None),
("some_str", "VARCHAR(20)", None, None, None, None, None),
]
mock_cursor.return_value.fetchone.side_effect = [
[42, "mock_row_content_1"],
[43, "mock_row_content_2"],
[44, "mock_row_content_3"],
None,
]
op = PrestoToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
approx_max_file_size_bytes=len(expected_upload[FILENAME.format(0)]),
)
op.execute(None)
mock_gcs_hook.return_value.upload.assert_called()
@patch("airflow.providers.google.cloud.transfers.presto_to_gcs.PrestoHook")
@patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_save_as_json_with_schema_file(self, mock_gcs_hook, mock_presto_hook):
"""Test writing schema files."""
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip): # pylint: disable=unused-argument
if obj == SCHEMA_FILENAME:
with open(tmp_filename, "rb") as file:
assert SCHEMA_JSON == file.read()
mock_gcs_hook.return_value.upload.side_effect = _assert_upload
mock_cursor = mock_presto_hook.return_value.get_conn.return_value.cursor
mock_cursor.return_value.description = [
("some_num", "INTEGER", None, None, None, None, None),
("some_str", "VARCHAR", None, None, None, None, None),
]
mock_cursor.return_value.fetchone.side_effect = [
[42, "mock_row_content_1"],
[43, "mock_row_content_2"],
[44, "mock_row_content_3"],
None,
]
op = PrestoToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
schema_filename=SCHEMA_FILENAME,
export_format="csv",
presto_conn_id=PRESTO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
)
op.execute(None)
# once for the file and once for the schema
assert 2 == mock_gcs_hook.return_value.upload.call_count
@patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
@patch("airflow.providers.google.cloud.transfers.presto_to_gcs.PrestoHook")
def test_save_as_csv(self, mock_presto_hook, mock_gcs_hook):
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
assert BUCKET == bucket
assert FILENAME.format(0) == obj
assert "text/csv" == mime_type
assert not gzip
with open(tmp_filename, "rb") as file:
assert b"".join(CSV_LINES) == file.read()
mock_gcs_hook.return_value.upload.side_effect = _assert_upload
mock_cursor = mock_presto_hook.return_value.get_conn.return_value.cursor
mock_cursor.return_value.description = [
("some_num", "INTEGER", None, None, None, None, None),
("some_str", "VARCHAR", None, None, None, None, None),
]
mock_cursor.return_value.fetchone.side_effect = [
[42, "mock_row_content_1"],
[43, "mock_row_content_2"],
[44, "mock_row_content_3"],
None,
]
op = PrestoToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
export_format="csv",
presto_conn_id=PRESTO_CONN_ID,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(None)
mock_gcs_hook.return_value.upload.assert_called()
mock_presto_hook.assert_called_once_with(presto_conn_id=PRESTO_CONN_ID)
mock_gcs_hook.assert_called_once_with(
delegate_to=None,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
@patch("airflow.providers.google.cloud.transfers.presto_to_gcs.PrestoHook")
@patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_save_as_csv_with_file_splitting(self, mock_gcs_hook, mock_presto_hook):
"""Test that csv is split by approx_max_file_size_bytes param."""
expected_upload = {
FILENAME.format(0): b"".join(CSV_LINES[:3]),
FILENAME.format(1): b"".join([CSV_LINES[0], CSV_LINES[3]]),
}
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
assert BUCKET == bucket
assert "text/csv" == mime_type
assert not gzip
with open(tmp_filename, "rb") as file:
assert expected_upload[obj] == file.read()
mock_gcs_hook.return_value.upload.side_effect = _assert_upload
mock_cursor = mock_presto_hook.return_value.get_conn.return_value.cursor
mock_cursor.return_value.description = [
("some_num", "INTEGER", None, None, None, None, None),
("some_str", "VARCHAR(20)", None, None, None, None, None),
]
mock_cursor.return_value.fetchone.side_effect = [
[42, "mock_row_content_1"],
[43, "mock_row_content_2"],
[44, "mock_row_content_3"],
None,
]
op = PrestoToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
approx_max_file_size_bytes=len(expected_upload[FILENAME.format(0)]),
export_format="csv",
)
op.execute(None)
mock_gcs_hook.return_value.upload.assert_called()
@patch("airflow.providers.google.cloud.transfers.presto_to_gcs.PrestoHook")
@patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_save_as_csv_with_schema_file(self, mock_gcs_hook, mock_presto_hook):
"""Test writing schema files."""
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip): # pylint: disable=unused-argument
if obj == SCHEMA_FILENAME:
with open(tmp_filename, "rb") as file:
assert SCHEMA_JSON == file.read()
mock_gcs_hook.return_value.upload.side_effect = _assert_upload
mock_cursor = mock_presto_hook.return_value.get_conn.return_value.cursor
mock_cursor.return_value.description = [
("some_num", "INTEGER", None, None, None, None, None),
("some_str", "VARCHAR", None, None, None, None, None),
]
mock_cursor.return_value.fetchone.side_effect = [
[42, "mock_row_content_1"],
[43, "mock_row_content_2"],
[44, "mock_row_content_3"],
None,
]
op = PrestoToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=FILENAME,
schema_filename=SCHEMA_FILENAME,
export_format="csv",
)
op.execute(None)
# once for the file and once for the schema
assert 2 == mock_gcs_hook.return_value.upload.call_count
| {
"content_hash": "4e47512e1428f3add18719dad0e7cf39",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 106,
"avg_line_length": 36.11464968152866,
"alnum_prop": 0.5828924162257496,
"repo_name": "sekikn/incubator-airflow",
"id": "3eb9f63025b02892808662286446db6680d11ff7",
"size": "12127",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/providers/google/cloud/transfers/test_presto_to_gcs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
} |
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache'
}
}
# django will complain if we don't include this
SECRET_KEY = 'dummysecret'
| {
"content_hash": "7e1b0ef648af95be1fe5face8237649d",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 64,
"avg_line_length": 22.125,
"alnum_prop": 0.6497175141242938,
"repo_name": "ssaw/django-inlinify",
"id": "2350cc8b949a2c090d45e48deb5f45114742e891",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_inlinify/test_settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1351"
},
{
"name": "HTML",
"bytes": "10265"
},
{
"name": "Python",
"bytes": "33166"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('census', '0005_auto_20170622_1416'),
]
operations = [
migrations.AlterModelOptions(
name='entity',
options={'verbose_name_plural': 'entities'},
),
]
| {
"content_hash": "8494b3e22c45cd4c4e311b9c9e352784",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 56,
"avg_line_length": 20.529411764705884,
"alnum_prop": 0.5988538681948424,
"repo_name": "mujinyun2009/shakespeare-census",
"id": "f1e155bc20ec8c9feb4e0e82fe312579d212f26c",
"size": "422",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "census/migrations/0006_auto_20170622_1752.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6667"
},
{
"name": "HTML",
"bytes": "36778"
},
{
"name": "Python",
"bytes": "32418"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
} |
import tornado.web
import sqlite3
from model import Model
import config
class MainHandler(tornado.web.RequestHandler):
def initialize(self):
self.db = self.application.db
def get(self):
# Initializing cookie
self.set_cookie('keyword', '')
self.set_cookie('categoria', '')
self.set_cookie('search_index', '1')
self.render("index.html")
def post(self):
azione = self.get_argument('azione', default='')
_model = Model(self.db)
response_dict = dict()
keyword_cookie = self.get_cookie('keyword')
categoria_cookie = self.get_cookie('categoria')
search_index_cookie = int(self.get_cookie('search_index'))
if azione == 'caricaContenuti':
ricette = dict()
ricette['list'], ricette['search_total'], ricette['search_index'] = _model.get_ricette(keyword_cookie, categoria_cookie, search_index_cookie)
categorie = _model.get_categorie()
sezioni = _model.get_sezioni()
response_dict['ricette'] = ricette
response_dict['categorie'] = categorie
response_dict['sezioni'] = sezioni
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'searchRicette':
keyword_cookie = self.get_argument('keyword')
categoria_cookie = self.get_argument('categoria')
search_index_cookie = int(self.get_argument('searchIndex'))
self.set_cookie('keyword', keyword_cookie)
self.set_cookie('categoria', str(categoria_cookie))
self.set_cookie('search_index', str(search_index_cookie))
ricette = dict()
ricette['list'], ricette['search_total'], ricette['search_index'] = _model.get_ricette(keyword_cookie, categoria_cookie, search_index_cookie)
response_dict['ricette'] = ricette
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'editRicetta':
id_ricetta = int(self.get_argument('id_ricetta'))
response_dict['ricetta'] = _model.get_ricetta(id_ricetta)
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'salvaRicetta':
_ric = tornado.escape.json_decode(self.get_argument('ricetta'))
response_dict = _model.salva_ricetta(_ric)
# Se tutto OK, ritorno le ricette per il reload
if response_dict['stato'] == 0:
ricette = dict()
ricette['list'], ricette['search_total'], ricette['search_index'] = _model.get_ricette(keyword_cookie, categoria_cookie, search_index_cookie)
response_dict['ricette'] = ricette
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'deleteRicetta':
id_ricetta = int(self.get_argument('id_ricetta'))
response_dict = _model.delete_ricetta(id_ricetta)
# Se tutto OK, ritorno le ricette per il reload
if response_dict['stato'] == 0:
ricette = dict()
ricette['list'], ricette['search_total'], ricette['search_index'] = _model.get_ricette(keyword_cookie, categoria_cookie, search_index_cookie)
self.set_cookie('search_index', str(ricette['search_index']))
response_dict['ricette'] = ricette
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'salvaCategoria':
_cat = tornado.escape.json_decode(self.get_argument('categoria'))
response_dict = _model.salva_categoria(_cat)
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'deleteCategoria':
id_categoria = int(self.get_argument('id_categoria'))
response_dict = _model.delete_categoria(id_categoria)
# Se tutto OK, ritorno le categorie per il reload
if response_dict['stato'] == 0: response_dict['categorie'] = _model.get_categorie()
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'salvaSezione':
_sez = tornado.escape.json_decode(self.get_argument('sezione'))
response_dict = _model.salva_sezione(_sez)
self.write(tornado.escape.json_encode(response_dict))
elif azione == 'deleteSezione':
id_sezione = int(self.get_argument('id_sezione'))
response_dict = _model.delete_sezione(id_sezione)
# Se tutto OK, ritorno le categorie per il reload
if response_dict['stato'] == 0: response_dict['sezioni'] = _model.get_sezioni()
self.write(tornado.escape.json_encode(response_dict))
""" Defining Application """
application = tornado.web.Application([
(r"/static/(.*)", tornado.web.StaticFileHandler, dict(path=config.static_path)),
(r"/(favicon.ico)", tornado.web.StaticFileHandler, dict(path=config.static_path)),
(r"/", MainHandler),
],
autoescape=None,
debug=True,
)
""" MAIN """
if __name__ == "__main__":
application.listen(config.port)
application.db = sqlite3.connect(config.db)
tornado.ioloop.IOLoop.instance().start()
tornado.autoreload.wait()
| {
"content_hash": "230f0fa13aee697c0717d34761693eb9",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 157,
"avg_line_length": 46.70175438596491,
"alnum_prop": 0.6012396694214877,
"repo_name": "mr--dev/salepepe",
"id": "520cb8f57303113adc13a086e4e25af5d611210c",
"size": "5324",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1322"
},
{
"name": "JavaScript",
"bytes": "12440"
},
{
"name": "Python",
"bytes": "14308"
}
],
"symlink_target": ""
} |
"""MSYS specifics for Msys terminal IO and for running shell scripts
Configured for MSYS 1.0.17.
exports msys_raw_input, MsysException, Msys
Uses optional environment variable MSYS_ROOT_DIRECTORY. Set this to the full
path of the MSYS home directory, 1.0 subdirectory in path optional.
"""
from msysio import raw_input_ as msys_raw_input, print_ as msys_print
from msysio import is_msys
import sys
import os
import time
import subprocess
import re
import glob
try:
import _winreg
except ImportError:
import winreg as _winreg
# For Python 2.x/3.x compatibility
def geterror():
return sys.exc_info()[1]
if sys.version_info > (3,):
def encode_script(s):
return s.encode('ascii')
else:
def encode_script(s):
return s
FSTAB_REGEX = (r'^[ \t]*'
r'(?P<path>([a-zA-Z]:)?[^\s*^?:%]+)'
r'[ \t]+/mingw(\s|$)'
)
def has_drive(path):
"""Return true if the MSYS path strats with a drive letter"""
return re.match('/[A-Z]/', path, re.I) is not None
class MsysException(Exception):
"""Path retrieval problem"""
pass
def find_msys_version_subdir(msys_dir):
"""Return the full MSYS root directory path
If msys_dir path lacks the version subdirectory, e.g. 1.0, then the
path is searched for one. The user will be prompted to choose if more
than one version is found.
"""
regex = r'[\\/][1-9][.][0-9]$'
if re.search(regex, msys_dir) is not None:
return msys_dir
roots = glob.glob(os.path.join(msys_dir, '[1-9].[0-9]'))
roots.sort()
roots.reverse()
if not roots:
raise MsysException("No msys versions found.\n")
else:
if len(roots) == 1:
root = roots[0]
else:
msys_print("Select an Msys version:")
for i, path in enumerate(roots):
msys_print(" %d = %s" % (i+1, os.path.split(path)[1]))
choice = msys_raw_input("Select 1-%d (1 = default):")
if not choice:
root = roots[0]
else:
root = roots[int(choice)-1]
return root
def input_msys_dir():
"""Return user entered MSYS directory path
May raise MsysException."""
while 1:
dir_path = msys_raw_input("Enter the MSYS directory path,\n"
"(or press [Enter] to quit):")
dir_path = dir_path.strip()
if not dir_path:
raise MsysException("Input aborted by user")
dir_path = os.path.abspath(dir_path)
try:
return find_msys_version_subdir(dir_path)
except MsysException:
msys_print(geterror())
def find_msys_registry():
"""Return the MSYS 1.0 directory path stored in the Windows registry
The return value is an encoded ascii str. The registry entry for the
uninstaller is used. Raise a LookupError if not found.
"""
#!! Leave until known if new MSYS package manager makes a registry entry.
subkey = ('Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall'
'\\{AC2C1BDB-1E91-4F94-B99C-E716FE2E9C75}_is1')
for hkey in [_winreg.HKEY_CURRENT_USER, _winreg.HKEY_LOCAL_MACHINE]:
try:
key = _winreg.OpenKey(hkey, subkey)
try:
mingw_path = _winreg.QueryValueEx(key, 'Inno Setup: App Path')[0].encode()
finally:
key.Close()
except WindowsError:
pass
else:
return os.path.join(mingw_path, 'msys', '1.0')
raise LookupError("MSYS not found in the registry")
def as_shell(msys_root):
"""Append MSYS shell program to MSYS root directory path"""
return os.path.join(msys_root, 'bin', 'sh.exe')
def check_for_shell(msys_directory=None):
"""Check various locations for MSYS shell or root directory.
May raise MsysException.
"""
if msys_directory is not None:
try:
dir_path = find_msys_version_subdir(msys_directory)
except MsysException:
pass
else:
return as_shell(dir_path)
try:
shell = os.environ['MSYS_ROOT_DIRECTORY']
except KeyError:
pass
else:
if not re.search(r'[\\/]1\.[0-9]', shell):
shell = os.path.join(shell, '1.0')
return os.path.join(shell, 'bin', 'sh.exe')
if is_msys():
try:
return os.environ['WD'] + os.environ['MSYSCON']
except KeyError:
pass
try:
dir_path = find_msys_registry()
except LookupError:
pass
else:
return as_shell(dir_path)
return as_shell(input_msys_dir())
def find_msys_shell(msys_directory=None):
"""Retrun the MSYS shell program path
MsysException is raised if the shell program is not found. The user
is prompt is prompted as a last resort if no directory is found or
there are multiple choices.
"""
shell = check_for_shell(msys_directory)
while 1:
shell = os.path.abspath(shell.replace('/', os.sep))
if os.path.isfile(shell):
break
msys_print("Directory %s has no MSYS shell." % shell)
shell = as_shell(input_msys_dir())
return shell
def find_mingw_root(msys_directory):
"""Return the Windows equivalent of /mingw"""
# Look it up in the fstabs file.
fstab_path = os.path.join(msys_directory, 'etc', 'fstab')
try:
fstab = open(fstab_path, 'r')
except IOError:
raise MsysException("Unable to open MSYS fstab file %s" % fstab_path)
else:
match = re.search(FSTAB_REGEX, fstab.read(), re.MULTILINE)
if match is None:
raise MsysException(
"The required MinGW path is not in the MSYS fstab file")
dir_path = os.path.abspath(match.groupdict()['path'])
if not os.path.isdir(dir_path):
raise MsysException("%s is not a directory" % dir_path)
return dir_path
class Msys(object):
"""Return a new Msys environment; May raise MsysException
Msys([msys_directory, [require_mingw]])
msys_directory: A string giving the path of the MSYS directory.
Either or both keyword arguments can be omitted. If msys_directory
is not provided then the environment variable SHELL and the Windows
registry are checked. Finally the user is prompted for the directory
path. If require_mingw is True, the default, the mingw directory path
is retrieved from the MSYS fstab file. An MsysException is raised if
the required paths are not found.
"""
_is_msys = is_msys()
def __init__(self, msys_directory=None, require_mingw=None):
"""New environment
May raise MsysException"""
if require_mingw is None:
require_mingw = True
self._environ = os.environ.copy()
self._shell = find_msys_shell(msys_directory)
self._msys_root = os.path.split(os.path.split(self.shell)[0])[0].lower()
try:
self._mingw_root = find_mingw_root(self.msys_root)
except MsysException:
if require_mingw:
raise
self._mingw_root = None
else:
self.environ['MINGW_ROOT_DIRECTORY'] = self._mingw_root
environ = property(lambda self: self._environ,
doc="Environment variables")
shell = property(lambda self: self._shell,
doc="MSYS shell program path")
msys_root = property(lambda self: self._msys_root,
doc="MSYS root directory path")
mingw_root = property(lambda self: self._mingw_root,
doc="MinGW root directory path")
is_msys = property(lambda self: self._is_msys,
doc="True if the execution environment is MSYS")
def windows_to_msys(self, path):
"""Return an MSYS translation of a Windows path
"""
path = os.path.abspath(path)
msys_root = self.msys_root
mingw_root = self.mingw_root
path_lower = path.lower()
if path_lower.startswith(msys_root.lower()):
return '/usr' + path[len(msys_root):].replace(os.sep, '/')
if mingw_root is not None and path_lower.startswith(mingw_root.lower()):
return '/mingw' + path[len(mingw_root):].replace(os.sep, '/')
drive, tail = os.path.splitdrive(path)
drive_mount_point = drive[0].lower()
drive_subpath = tail.replace(os.sep, '/')
return '/%s%s' % (drive_mount_point, drive_subpath)
def msys_to_windows(self, path):
"""Return a Windows translation of an MSYS path
The Unix path separator is used as it survives the distutils setup
file read process. Raises a ValueError if the path cannot be
translated.
"""
msys_root = self.msys_root
mingw_root = self.mingw_root
if path.startswith('/usr'):
path = msys_root + path[4:]
elif path.startswith('/mingw'):
if mingw_root is None:
raise ValueError('Unable to map the MinGW directory')
path = mingw_root + path[6:]
elif has_drive(path):
path = path[1] + ":" + path[2:]
elif path == '/':
path = msys_root
elif path.startswith('/'):
path = msys_root + path
return path.replace(os.sep, '/')
def run_shell_script(self, script):
"""Run the MSYS shell script and return the shell return code
script is a string representing the contents of the script.
"""
cmd = [self.shell]
if not self._is_msys:
cmd.append('--login')
previous_cwd = os.getcwd()
try:
process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
env=self.environ)
process.communicate(encode_script(script))
return process.returncode
finally:
time.sleep(2) # Allow shell subprocesses to terminate.
os.chdir(previous_cwd)
def run_shell_command(self, command):
"""Run the MSYS shell command and return stdout output as a string
command is a list of strings giving the command and its arguments.
The first list entry must be the MSYS path name of a bash shell
script file.
"""
args = [self.shell]
if not self._is_msys:
args.append('--login')
args.extend(command)
previous_cwd = os.getcwd()
try:
return subprocess.Popen(args,
stdout=subprocess.PIPE,
env=self.environ).communicate()[0]
finally:
time.sleep(3) # Allow shell subprocesses to terminate.
os.chdir(previous_cwd)
__all__ = ['Msys', 'msys_raw_input', 'msys_print', 'MsysException']
| {
"content_hash": "010081cde6bdfe16c2275fff99f43626",
"timestamp": "",
"source": "github",
"line_count": 335,
"max_line_length": 90,
"avg_line_length": 32.77313432835821,
"alnum_prop": 0.5835686310228618,
"repo_name": "hanvo/MusicCloud",
"id": "5e9f9db76dccb07618c9a3c7fe5f8dd278c8c643",
"size": "11043",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Crawler/Install Files/pygame/msys.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2593783"
},
{
"name": "C++",
"bytes": "27120"
},
{
"name": "CSS",
"bytes": "18398"
},
{
"name": "D",
"bytes": "4176145"
},
{
"name": "Java",
"bytes": "654098"
},
{
"name": "JavaScript",
"bytes": "3992"
},
{
"name": "Objective-C",
"bytes": "530579"
},
{
"name": "PHP",
"bytes": "5512"
},
{
"name": "Python",
"bytes": "6888570"
},
{
"name": "Shell",
"bytes": "4583"
}
],
"symlink_target": ""
} |
"""
Example Airflow DAG that demonstrates interactions with Google Cloud Transfer.
This DAG relies on the following OS environment variables
* GCP_PROJECT_ID - Google Cloud Project to use for the Google Cloud Transfer Service.
* GCP_TRANSFER_FIRST_TARGET_BUCKET - Google Cloud Storage bucket to which files are copied from AWS.
It is also a source bucket in next step
* GCP_TRANSFER_SECOND_TARGET_BUCKET - Google Cloud Storage bucket bucket to which files are copied
"""
import os
from datetime import datetime, timedelta
from airflow import models
from airflow.providers.google.cloud.hooks.cloud_storage_transfer_service import (
ALREADY_EXISTING_IN_SINK,
BUCKET_NAME,
DESCRIPTION,
FILTER_JOB_NAMES,
FILTER_PROJECT_ID,
GCS_DATA_SINK,
GCS_DATA_SOURCE,
PROJECT_ID,
SCHEDULE,
SCHEDULE_END_DATE,
SCHEDULE_START_DATE,
START_TIME_OF_DAY,
STATUS,
TRANSFER_JOB,
TRANSFER_JOB_FIELD_MASK,
TRANSFER_OPTIONS,
TRANSFER_SPEC,
GcpTransferJobsStatus,
GcpTransferOperationStatus,
)
from airflow.providers.google.cloud.operators.cloud_storage_transfer_service import (
CloudDataTransferServiceCreateJobOperator,
CloudDataTransferServiceDeleteJobOperator,
CloudDataTransferServiceGetOperationOperator,
CloudDataTransferServiceListOperationsOperator,
CloudDataTransferServiceUpdateJobOperator,
)
from airflow.providers.google.cloud.sensors.cloud_storage_transfer_service import (
CloudDataTransferServiceJobStatusSensor,
)
from airflow.utils.dates import days_ago
GCP_PROJECT_ID = os.environ.get("GCP_PROJECT_ID", "example-project")
GCP_TRANSFER_FIRST_TARGET_BUCKET = os.environ.get(
"GCP_TRANSFER_FIRST_TARGET_BUCKET", "gcp-transfer-first-target"
)
GCP_TRANSFER_SECOND_TARGET_BUCKET = os.environ.get(
"GCP_TRANSFER_SECOND_TARGET_BUCKET", "gcp-transfer-second-target"
)
# [START howto_operator_gcp_transfer_create_job_body_gcp]
gcs_to_gcs_transfer_body = {
DESCRIPTION: "description",
STATUS: GcpTransferJobsStatus.ENABLED,
PROJECT_ID: GCP_PROJECT_ID,
SCHEDULE: {
SCHEDULE_START_DATE: datetime(2015, 1, 1).date(),
SCHEDULE_END_DATE: datetime(2030, 1, 1).date(),
START_TIME_OF_DAY: (datetime.utcnow() + timedelta(seconds=120)).time(),
},
TRANSFER_SPEC: {
GCS_DATA_SOURCE: {BUCKET_NAME: GCP_TRANSFER_FIRST_TARGET_BUCKET},
GCS_DATA_SINK: {BUCKET_NAME: GCP_TRANSFER_SECOND_TARGET_BUCKET},
TRANSFER_OPTIONS: {ALREADY_EXISTING_IN_SINK: True},
},
}
# [END howto_operator_gcp_transfer_create_job_body_gcp]
# [START howto_operator_gcp_transfer_update_job_body]
update_body = {
PROJECT_ID: GCP_PROJECT_ID,
TRANSFER_JOB: {DESCRIPTION: "description_updated"},
TRANSFER_JOB_FIELD_MASK: "description",
}
# [END howto_operator_gcp_transfer_update_job_body]
with models.DAG(
"example_gcp_transfer",
schedule_interval=None, # Override to match your needs
start_date=days_ago(1),
tags=["example"],
) as dag:
create_transfer = CloudDataTransferServiceCreateJobOperator(
task_id="create_transfer", body=gcs_to_gcs_transfer_body
)
# [START howto_operator_gcp_transfer_update_job]
update_transfer = CloudDataTransferServiceUpdateJobOperator(
task_id="update_transfer",
job_name="{{task_instance.xcom_pull('create_transfer')['name']}}",
body=update_body,
)
# [END howto_operator_gcp_transfer_update_job]
wait_for_transfer = CloudDataTransferServiceJobStatusSensor(
task_id="wait_for_transfer",
job_name="{{task_instance.xcom_pull('create_transfer')['name']}}",
project_id=GCP_PROJECT_ID,
expected_statuses={GcpTransferOperationStatus.SUCCESS},
)
list_operations = CloudDataTransferServiceListOperationsOperator(
task_id="list_operations",
request_filter={
FILTER_PROJECT_ID: GCP_PROJECT_ID,
FILTER_JOB_NAMES: ["{{task_instance.xcom_pull('create_transfer')['name']}}"],
},
)
get_operation = CloudDataTransferServiceGetOperationOperator(
task_id="get_operation",
operation_name="{{task_instance.xcom_pull('list_operations')[0]['name']}}",
)
delete_transfer = CloudDataTransferServiceDeleteJobOperator(
task_id="delete_transfer_from_gcp_job",
job_name="{{task_instance.xcom_pull('create_transfer')['name']}}",
project_id=GCP_PROJECT_ID,
)
create_transfer >> wait_for_transfer >> update_transfer >> list_operations >> get_operation
get_operation >> delete_transfer
| {
"content_hash": "3c7c5001e0149e965eba74641c8c55d5",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 100,
"avg_line_length": 35.12307692307692,
"alnum_prop": 0.709592641261498,
"repo_name": "mrkm4ntr/incubator-airflow",
"id": "c4cfa2e31efc3ceb2eec3dfab13dfbb42542f5b4",
"size": "5354",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "airflow/providers/google/cloud/example_dags/example_cloud_storage_transfer_service_gcp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22581"
},
{
"name": "Dockerfile",
"bytes": "31475"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "221101"
},
{
"name": "JavaScript",
"bytes": "32643"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "14407542"
},
{
"name": "Shell",
"bytes": "541811"
}
],
"symlink_target": ""
} |
"""A re-implementation of the MS DirectoryService samples related to services.
* Adds and removes an ActiveDirectory "Service Connection Point",
including managing the security on the object.
* Creates and registers Service Principal Names.
* Changes the username for a domain user.
Some of these functions are likely to become move to a module - but there
is also a little command-line-interface to try these functions out.
For example:
scp.py --account-name=domain\\user --service-class=PythonScpTest \\
--keyword=foo --keyword=bar --binding-string=bind_info \\
ScpCreate SpnCreate SpnRegister
would:
* Attempt to delete a Service Connection Point for the service class
'PythonScpTest'
* Attempt to create a Service Connection Point for that class, with 2
keywords and a binding string of 'bind_info'
* Create a Service Principal Name for the service and register it
to undo those changes, you could execute:
scp.py --account-name=domain\\user --service-class=PythonScpTest \\
SpnCreate SpnUnregister ScpDelete
which will:
* Create a SPN
* Unregister that SPN from the Active Directory.
* Delete the Service Connection Point
Executing with --test will create and remove one of everything.
"""
from win32com.adsi.adsicon import *
from win32com.adsi import adsi
import win32api, win32con, winerror
from win32com.client import Dispatch
import ntsecuritycon as dscon
import win32security
import optparse, textwrap
import traceback
verbose = 1
g_createdSCP = None
g_createdSPNs = []
g_createdSPNLast = None
import logging
logger = logging # use logging module global methods for now.
# still a bit confused about log(n, ...) vs logger.info/debug()
# Returns distinguished name of SCP.
def ScpCreate(
service_binding_info,
service_class_name, # Service class string to store in SCP.
account_name = None, # Logon account that needs access to SCP.
container_name = None,
keywords = None,
object_class = "serviceConnectionPoint",
dns_name_type = "A",
dn = None,
dns_name = None,
):
container_name = container_name or service_class_name
if not dns_name:
# Get the DNS name of the local computer
dns_name = win32api.GetComputerNameEx(win32con.ComputerNameDnsFullyQualified)
# Get the distinguished name of the computer object for the local computer
if dn is None:
dn = win32api.GetComputerObjectName(win32con.NameFullyQualifiedDN)
# Compose the ADSpath and bind to the computer object for the local computer
comp = adsi.ADsGetObject("LDAP://" + dn, adsi.IID_IDirectoryObject)
# Publish the SCP as a child of the computer object
keywords = keywords or []
# Fill in the attribute values to be stored in the SCP.
attrs = [
("cn", ADS_ATTR_UPDATE, ADSTYPE_CASE_IGNORE_STRING, (container_name,)),
("objectClass", ADS_ATTR_UPDATE, ADSTYPE_CASE_IGNORE_STRING, (object_class,)),
("keywords", ADS_ATTR_UPDATE, ADSTYPE_CASE_IGNORE_STRING, keywords),
("serviceDnsName", ADS_ATTR_UPDATE, ADSTYPE_CASE_IGNORE_STRING, (dns_name,)),
("serviceDnsNameType", ADS_ATTR_UPDATE, ADSTYPE_CASE_IGNORE_STRING, (dns_name_type,)),
("serviceClassName", ADS_ATTR_UPDATE, ADSTYPE_CASE_IGNORE_STRING, (service_class_name,)),
("serviceBindingInformation", ADS_ATTR_UPDATE, ADSTYPE_CASE_IGNORE_STRING, (service_binding_info,)),
]
new = comp.CreateDSObject("cn=" + container_name, attrs)
logger.info("New connection point is at %s", container_name)
# Wrap in a usable IDispatch object.
new = Dispatch(new)
# And allow access to the SCP for the specified account name
AllowAccessToScpProperties(account_name, new)
return new
def ScpDelete(container_name, dn = None):
if dn is None:
dn = win32api.GetComputerObjectName(win32con.NameFullyQualifiedDN)
logger.debug("Removing connection point '%s' from %s", container_name, dn)
# Compose the ADSpath and bind to the computer object for the local computer
comp = adsi.ADsGetObject("LDAP://" + dn, adsi.IID_IDirectoryObject)
comp.DeleteDSObject("cn=" + container_name)
logger.info("Deleted service connection point '%s'", container_name)
# This function is described in detail in the MSDN article titled
# "Enabling Service Account to Access SCP Properties"
# From that article:
# The following sample code sets a pair of ACEs on a service connection point
# (SCP) object. The ACEs grant read/write access to the user or computer account
# under which the service instance will be running. Your service installation
# program calls this code to ensure that the service will be allowed to update
# its properties at run time. If you don't set ACEs like these, your service
# will get access-denied errors if it tries to modify the SCP's properties.
#
# The code uses the IADsSecurityDescriptor, IADsAccessControlList, and
# IADsAccessControlEntry interfaces to do the following:
# * Get the SCP object's security descriptor.
# * Set ACEs in the DACL of the security descriptor.
# * Set the security descriptor back on the SCP object.
def AllowAccessToScpProperties(
accountSAM, #Service account to allow access.
scpObject, # The IADs SCP object.
schemaIDGUIDs = # Attributes to allow write-access to.
("{28630eb8-41d5-11d1-a9c1-0000f80367c1}", # serviceDNSName
"{b7b1311c-b82e-11d0-afee-0000f80367c1}", # serviceBindingInformation
)
):
# If no service account is specified, service runs under LocalSystem.
# So allow access to the computer account of the service's host.
if accountSAM:
trustee = accountSAM
else:
# Get the SAM account name of the computer object for the server.
trustee = win32api.GetComputerObjectName(win32con.NameSamCompatible)
# Get the nTSecurityDescriptor attribute
attribute = "nTSecurityDescriptor"
sd = getattr(scpObject, attribute)
acl = sd.DiscretionaryAcl
for sguid in schemaIDGUIDs:
ace = Dispatch(adsi.CLSID_AccessControlEntry)
# Set the properties of the ACE.
# Allow read and write access to the property.
ace.AccessMask = ADS_RIGHT_DS_READ_PROP | ADS_RIGHT_DS_WRITE_PROP
# Set the trustee, which is either the service account or the
# host computer account.
ace.Trustee = trustee
# Set the ACE type.
ace.AceType = ADS_ACETYPE_ACCESS_ALLOWED_OBJECT
# Set AceFlags to zero because ACE is not inheritable.
ace.AceFlags = 0
# Set Flags to indicate an ACE that protects a specified object.
ace.Flags = ADS_FLAG_OBJECT_TYPE_PRESENT
# Set ObjectType to the schemaIDGUID of the attribute.
ace.ObjectType = sguid
# Add the ACEs to the DACL.
acl.AddAce(ace)
# Write the modified DACL back to the security descriptor.
sd.DiscretionaryAcl = acl
# Write the ntSecurityDescriptor property to the property cache.
setattr(scpObject, attribute, sd)
# SetInfo updates the SCP object in the directory.
scpObject.SetInfo()
logger.info("Set security on object for account '%s'" % (trustee,))
# Service Principal Names functions from the same sample.
# The example calls the DsWriteAccountSpn function, which stores the SPNs in
# Microsoft Active Directory under the servicePrincipalName attribute of the
# account object specified by the serviceAcctDN parameter. The account object
# corresponds to the logon account specified in the CreateService call for this
# service instance. If the logon account is a domain user account,
# serviceAcctDN must be the distinguished name of the account object in
# Active Directory for that user account. If the service's logon account is the
# LocalSystem account, serviceAcctDN must be the distinguished name of the
# computer account object for the host computer on which the service is
# installed. win32api.TranslateNames and win32security.DsCrackNames can
# be used to convert a domain\account format name to a distinguished name.
def SpnRegister(
serviceAcctDN, # DN of the service's logon account
spns, # List of SPNs to register
operation, # Add, replace, or delete SPNs
):
assert type(spns) not in [str, str] and hasattr(spns, "__iter__"), \
"spns must be a sequence of strings (got %r)" % spns
# Bind to a domain controller.
# Get the domain for the current user.
samName = win32api.GetUserNameEx(win32api.NameSamCompatible)
samName = samName.split('\\', 1)[0]
if not serviceAcctDN:
# Get the SAM account name of the computer object for the server.
serviceAcctDN = win32api.GetComputerObjectName(win32con.NameFullyQualifiedDN)
logger.debug("SpnRegister using DN '%s'", serviceAcctDN)
# Get the name of a domain controller in that domain.
info = win32security.DsGetDcName(
domainName=samName,
flags=dscon.DS_IS_FLAT_NAME |
dscon.DS_RETURN_DNS_NAME |
dscon.DS_DIRECTORY_SERVICE_REQUIRED)
# Bind to the domain controller.
handle = win32security.DsBind( info['DomainControllerName'] )
# Write the SPNs to the service account or computer account.
logger.debug("DsWriteAccountSpn with spns %s")
win32security.DsWriteAccountSpn(
handle, # handle to the directory
operation, # Add or remove SPN from account's existing SPNs
serviceAcctDN, # DN of service account or computer account
spns) # names
# Unbind the DS in any case (but Python would do it anyway)
handle.Close()
def UserChangePassword(username_dn, new_password):
# set the password on the account.
# Use the distinguished name to bind to the account object.
accountPath = "LDAP://" + username_dn
user = adsi.ADsGetObject(accountPath, adsi.IID_IADsUser)
# Set the password on the account.
user.SetPassword(new_password)
# functions related to the command-line interface
def log(level, msg, *args):
if verbose >= level:
print(msg % args)
class _NoDefault: pass
def _get_option(po, opt_name, default = _NoDefault):
parser, options = po
ret = getattr(options, opt_name, default)
if not ret and default is _NoDefault:
parser.error("The '%s' option must be specified for this operation" % opt_name)
if not ret:
ret = default
return ret
def _option_error(po, why):
parser = po[0]
parser.error(why)
def do_ScpCreate(po):
"""Create a Service Connection Point"""
global g_createdSCP
scp = ScpCreate(_get_option(po, "binding_string"),
_get_option(po, "service_class"),
_get_option(po, "account_name_sam", None),
keywords=_get_option(po, "keywords", None))
g_createdSCP = scp
return scp.distinguishedName
def do_ScpDelete(po):
"""Delete a Service Connection Point"""
sc = _get_option(po, "service_class")
try:
ScpDelete(sc)
except adsi.error as details:
if details[0] != winerror.ERROR_DS_OBJ_NOT_FOUND:
raise
log(2, "ScpDelete ignoring ERROR_DS_OBJ_NOT_FOUND for service-class '%s'",
sc)
return sc
def do_SpnCreate(po):
"""Create a Service Principal Name"""
# The 'service name' is the dn of our scp.
if g_createdSCP is None:
# Could accept an arg to avoid this?
_option_error(po, "ScpCreate must have been specified before SpnCreate")
# Create a Service Principal Name"
spns = win32security.DsGetSpn(dscon.DS_SPN_SERVICE,
_get_option(po, "service_class"),
g_createdSCP.distinguishedName,
_get_option(po, "port", 0),
None, None)
spn = spns[0]
log(2, "Created SPN: %s", spn)
global g_createdSPNLast
g_createdSPNLast = spn
g_createdSPNs.append(spn)
return spn
def do_SpnRegister(po):
"""Register a previously created Service Principal Name"""
if not g_createdSPNLast:
_option_error(po, "SpnCreate must appear before SpnRegister")
SpnRegister(_get_option(po, "account_name_dn", None),
(g_createdSPNLast,),
dscon.DS_SPN_ADD_SPN_OP)
return g_createdSPNLast
def do_SpnUnregister(po):
"""Unregister a previously created Service Principal Name"""
if not g_createdSPNLast:
_option_error(po, "SpnCreate must appear before SpnUnregister")
SpnRegister(_get_option(po, "account_name_dn", None),
(g_createdSPNLast,),
dscon.DS_SPN_DELETE_SPN_OP)
return g_createdSPNLast
def do_UserChangePassword(po):
"""Change the password for a specified user"""
UserChangePassword(_get_option(po, "account_name_dn"),
_get_option(po, "password"))
return "Password changed OK"
handlers = (
('ScpCreate', do_ScpCreate),
('ScpDelete', do_ScpDelete),
('SpnCreate', do_SpnCreate),
('SpnRegister', do_SpnRegister),
('SpnUnregister', do_SpnUnregister),
('UserChangePassword', do_UserChangePassword),
)
class HelpFormatter(optparse.IndentedHelpFormatter):
def format_description(self, description):
return description
def main():
global verbose
_handlers_dict = {}
arg_descs = []
for arg, func in handlers:
this_desc = "\n".join(textwrap.wrap(func.__doc__,
subsequent_indent = " " * 8))
arg_descs.append(" %s: %s" % (arg, this_desc))
_handlers_dict[arg.lower()] = func
description = __doc__ + "\ncommands:\n" + "\n".join(arg_descs) + "\n"
parser = optparse.OptionParser(usage = "%prog [options] command ...",
description=description,
formatter=HelpFormatter())
parser.add_option("-v", action="count",
dest="verbose", default=1,
help="increase the verbosity of status messages")
parser.add_option("-q", "--quiet", action="store_true",
help="Don't print any status messages")
parser.add_option("-t", "--test", action="store_true",
help="Execute a mini-test suite, providing defaults for most options and args"),
parser.add_option("", "--show-tracebacks", action="store_true",
help="Show the tracebacks for any exceptions")
parser.add_option("", "--service-class",
help="The service class name to use")
parser.add_option("", "--port", default=0,
help="The port number to associate with the SPN")
parser.add_option("", "--binding-string",
help="The binding string to use for SCP creation")
parser.add_option("", "--account-name",
help="The account name to use (default is LocalSystem)")
parser.add_option("", "--password",
help="The password to set.")
parser.add_option("", "--keyword", action="append", dest="keywords",
help="""A keyword to add to the SCP. May be specified
multiple times""")
parser.add_option("", "--log-level",
help="""The log-level to use - may be a number or a logging
module constant""", default=str(logging.WARNING))
options, args = parser.parse_args()
po = (parser, options)
# fixup misc
try:
options.port = int(options.port)
except (TypeError, ValueError):
parser.error("--port must be numeric")
# fixup log-level
try:
log_level = int(options.log_level)
except (TypeError, ValueError):
try:
log_level = int(getattr(logging, options.log_level.upper()))
except (ValueError, TypeError, AttributeError):
parser.error("Invalid --log-level value")
try:
sl = logger.setLevel
# logger is a real logger
except AttributeError:
# logger is logging module
sl = logging.getLogger().setLevel
sl(log_level)
# Check -q/-v
if options.quiet and options.verbose:
parser.error("Can't specify --quiet and --verbose")
if options.quiet:
options.verbose -= 1
verbose = options.verbose
# --test
if options.test:
if args:
parser.error("Can't specify args with --test")
args = "ScpDelete ScpCreate SpnCreate SpnRegister SpnUnregister ScpDelete"
log(1, "--test - pretending args are:\n %s", args)
args = args.split()
if not options.service_class:
options.service_class = "PythonScpTest"
log(2, "--test: --service-class=%s", options.service_class)
if not options.keywords:
options.keywords = "Python Powered".split()
log(2, "--test: --keyword=%s", options.keywords)
if not options.binding_string:
options.binding_string = "test binding string"
log(2, "--test: --binding-string=%s", options.binding_string)
# check args
if not args:
parser.error("No command specified (use --help for valid commands)")
for arg in args:
if arg.lower() not in _handlers_dict:
parser.error("Invalid command '%s' (use --help for valid commands)" % arg)
# Patch up account-name.
if options.account_name:
log(2, "Translating account name '%s'", options.account_name)
options.account_name_sam = win32security.TranslateName(options.account_name,
win32api.NameUnknown,
win32api.NameSamCompatible)
log(2, "NameSamCompatible is '%s'",options.account_name_sam)
options.account_name_dn = win32security.TranslateName(options.account_name,
win32api.NameUnknown,
win32api.NameFullyQualifiedDN)
log(2, "NameFullyQualifiedDNis '%s'",options.account_name_dn)
# do it.
for arg in args:
handler = _handlers_dict[arg.lower()] # already been validated
if handler is None:
parser.error("Invalid command '%s'" % arg)
err_msg = None
try:
try:
log(2, "Executing '%s'...", arg)
result = handler(po)
log(1, "%s: %s", arg, result)
except:
if options.show_tracebacks:
print("--show-tracebacks specified - dumping exception")
traceback.print_exc()
raise
except adsi.error as xxx_todo_changeme:
(hr, desc, exc, argerr) = xxx_todo_changeme.args
if exc:
extra_desc = exc[2]
else:
extra_desc = ""
err_msg = desc
if extra_desc:
err_msg += "\n\t" + extra_desc
except win32api.error as xxx_todo_changeme1:
(hr, func, msg) = xxx_todo_changeme1.args
err_msg = msg
if err_msg:
log(1, "Command '%s' failed: %s", arg, err_msg)
if __name__=='__main__':
try:
main()
except KeyboardInterrupt:
print("*** Interrupted")
| {
"content_hash": "59b564f7ebb960ed662bede8a1415b22",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 108,
"avg_line_length": 39.697154471544714,
"alnum_prop": 0.6368849521273873,
"repo_name": "Microsoft/PTVS",
"id": "7fd4ad379e4f76a10d2bd58e4d085533414aaadf",
"size": "19531",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/win32comext/adsi/demos/scp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
from django.db import models
# Create your models here.
class order(models.Model):
item_id = models.IntegerField(default=0)
product = models.CharField(max_length=200)
brandName = models.CharField(max_length=200)
class useraccount(models.Model):
username = models.EmailField(max_length=200)
password = models.TextField(max_length=16)
| {
"content_hash": "0d1dadc0a31b514834c7668e0e1864e1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 48,
"avg_line_length": 29.666666666666668,
"alnum_prop": 0.7415730337078652,
"repo_name": "Enyruas/Kitux",
"id": "0713ecb14059eae511d190d73a21e4aab76e3760",
"size": "356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sellrobots/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "149511"
},
{
"name": "JavaScript",
"bytes": "61962"
},
{
"name": "Python",
"bytes": "11513"
}
],
"symlink_target": ""
} |
"""
===========
Print Graph
===========
Example subclass of the Graph class.
"""
# Author: Aric Hagberg (hagberg@lanl.gov)
# Copyright (C) 2004-2019 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
__docformat__ = "restructuredtext en"
from copy import deepcopy
import matplotlib.pyplot as plt
import networkx as nx
from networkx import Graph
class PrintGraph(Graph):
"""
Example subclass of the Graph class.
Prints activity log to file or standard output.
"""
def __init__(self, data=None, name='', file=None, **attr):
Graph.__init__(self, data=data, name=name, **attr)
if file is None:
import sys
self.fh = sys.stdout
else:
self.fh = open(file, 'w')
def add_node(self, n, attr_dict=None, **attr):
Graph.add_node(self, n, attr_dict=attr_dict, **attr)
self.fh.write("Add node: %s\n" % n)
def add_nodes_from(self, nodes, **attr):
for n in nodes:
self.add_node(n, **attr)
def remove_node(self, n):
Graph.remove_node(self, n)
self.fh.write("Remove node: %s\n" % n)
def remove_nodes_from(self, nodes):
for n in nodes:
self.remove_node(n)
def add_edge(self, u, v, attr_dict=None, **attr):
Graph.add_edge(self, u, v, attr_dict=attr_dict, **attr)
self.fh.write("Add edge: %s-%s\n" % (u, v))
def add_edges_from(self, ebunch, attr_dict=None, **attr):
for e in ebunch:
u, v = e[0:2]
self.add_edge(u, v, attr_dict=attr_dict, **attr)
def remove_edge(self, u, v):
Graph.remove_edge(self, u, v)
self.fh.write("Remove edge: %s-%s\n" % (u, v))
def remove_edges_from(self, ebunch):
for e in ebunch:
u, v = e[0:2]
self.remove_edge(u, v)
def clear(self):
Graph.clear(self)
self.fh.write("Clear graph\n")
if __name__ == '__main__':
G = PrintGraph()
G.add_node('foo')
G.add_nodes_from('bar', weight=8)
G.remove_node('b')
G.remove_nodes_from('ar')
print("Nodes in G: ", G.nodes(data=True))
G.add_edge(0, 1, weight=10)
print("Edges in G: ", G.edges(data=True))
G.remove_edge(0, 1)
G.add_edges_from(zip(range(0, 3), range(1, 4)), weight=10)
print("Edges in G: ", G.edges(data=True))
G.remove_edges_from(zip(range(0, 3), range(1, 4)))
print("Edges in G: ", G.edges(data=True))
G = PrintGraph()
nx.add_path(G, range(10))
nx.add_star(G, range(9, 13))
nx.draw(G)
plt.show()
| {
"content_hash": "715194c7bd673c2f8c1ecf86c121ec95",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 63,
"avg_line_length": 27.040816326530614,
"alnum_prop": 0.5637735849056604,
"repo_name": "sserrot/champion_relationships",
"id": "a9ad473cc7ff76047614fac3165e8297b388bf44",
"size": "2650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/share/doc/networkx-2.4/examples/subclass/plot_printgraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
import os
import shutil
import pdb
import torch
import torch.nn as nn
import torch.nn.functional as F
from auto_LiRPA.utils import logger
class LSTMCore(nn.Module):
def __init__(self, args):
super(LSTMCore, self).__init__()
self.input_size = args.input_size // args.num_slices
self.hidden_size = args.hidden_size
self.num_classes = args.num_classes
self.device = args.device
self.cell_f = nn.LSTMCell(self.input_size, self.hidden_size)
self.linear = nn.Linear(self.hidden_size, self.num_classes)
def forward(self, X):
batch_size, length = X.shape[0], X.shape[1]
h_f = torch.zeros(batch_size, self.hidden_size).to(X.device)
c_f = h_f.clone()
h_f_sum = h_f.clone()
for i in range(length):
h_f, c_f = self.cell_f(X[:, i], (h_f, c_f))
h_f_sum = h_f_sum + h_f
states = h_f_sum / float(length)
logits = self.linear(states)
return logits
class LSTM(nn.Module):
def __init__(self, args):
super(LSTM, self).__init__()
self.args = args
self.device = args.device
self.lr = args.lr
self.num_slices = args.num_slices
self.dir = args.dir
if not os.path.exists(self.dir):
os.makedirs(self.dir)
self.checkpoint = 0
self.model = LSTMCore(args)
if args.load:
self.model.load_state_dict(args.load)
logger.info(f"Model loaded: {args.load}")
else:
logger.info("Model initialized")
self.model = self.model.to(self.device)
self.core = self.model
def save(self, epoch):
output_dir = os.path.join(self.dir, "ckpt-%d" % epoch)
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
os.mkdir(output_dir)
path = os.path.join(output_dir, "model")
torch.save(self.core.state_dict(), path)
with open(os.path.join(self.dir, "checkpoint"), "w") as file:
file.write(str(epoch))
logger.info("LSTM saved: %s" % output_dir)
def build_optimizer(self):
param_group = []
for p in self.core.named_parameters():
param_group.append(p)
param_group = [{"params": [p[1] for p in param_group], "weight_decay": 0.}]
return torch.optim.Adam(param_group, lr=self.lr)
def get_input(self, batch):
X = torch.cat([example[0].reshape(1, self.num_slices, -1) for example in batch])
y = torch.tensor([example[1] for example in batch], dtype=torch.long)
return X.to(self.device), y.to(self.device)
def train(self):
self.core.train()
def eval(self):
self.core.eval() | {
"content_hash": "316784a60e5b6c8ba4729118506bae1a",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 88,
"avg_line_length": 33.592592592592595,
"alnum_prop": 0.5780962881293642,
"repo_name": "KaidiXu/auto_LiRPA",
"id": "6e985b46e999ff1e20b3a082631ba8b0f8e26b8e",
"size": "2721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sequence/lstm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "981"
},
{
"name": "Cuda",
"bytes": "1234"
},
{
"name": "Python",
"bytes": "699644"
}
],
"symlink_target": ""
} |
"""
Serializers
"""
import logging
from rest_framework import serializers
import todolistapp.models as models
'''
from rest_framework.renderers import JSONRenderer
import todolistapp.models as models;import todolistapp.serializers as serializers
JSONRenderer().render(serializers.ListSerializer(models.List.objects.all()[0]).data)
'''
# Get an instance of a logger
logger = logging.getLogger('todoapplist')
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = models.TodoItem
fields = ('id','title')
read_only_fields = ('id',)
class ListSerializer(serializers.ModelSerializer):
todoitem_set = ItemSerializer(many=True)
class Meta:
model = models.TodoList
fields = ('id', 'title', 'todo_set')
read_only_fields = ('id',)
| {
"content_hash": "72a63f342ccee1b2cce51409f06f4fca",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 84,
"avg_line_length": 26.766666666666666,
"alnum_prop": 0.7123287671232876,
"repo_name": "mannyrivera2010/todolist-py3-drf",
"id": "f20c90b76bc39afe349475f83d7e6afd1f408c29",
"size": "803",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todolistapp/api/todolist/serializers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16734"
},
{
"name": "Shell",
"bytes": "1146"
}
],
"symlink_target": ""
} |
import os.path
import unittest
from flashproxy.keys import PIN_GOOGLE_CA_CERT, PIN_GOOGLE_PUBKEY_SHA1, check_certificate_pin, temp_cert
class TempCertTest(unittest.TestCase):
def test_temp_cert_success(self):
fn = None
with temp_cert(PIN_GOOGLE_CA_CERT) as ca_filename:
self.assertTrue(os.path.exists(ca_filename))
with open(ca_filename) as f:
lines = f.readlines()
self.assertIn("-----BEGIN CERTIFICATE-----\n", lines)
self.assertFalse(os.path.exists(ca_filename))
def test_temp_cert_raise(self):
fn = None
try:
with temp_cert(PIN_GOOGLE_CA_CERT) as ca_filename:
raise ValueError()
self.fail()
except ValueError:
self.assertFalse(os.path.exists(ca_filename))
| {
"content_hash": "3fda201fd58d70a4bf755f388bf24c7a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 104,
"avg_line_length": 34.5,
"alnum_prop": 0.6111111111111112,
"repo_name": "arlolra/flashproxy",
"id": "0adff32d8d10d3e36f61f7b17983c6b2df1a33a8",
"size": "828",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "flashproxy/test/test_keys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "240"
},
{
"name": "Go",
"bytes": "1864"
},
{
"name": "HTML",
"bytes": "6120"
},
{
"name": "JavaScript",
"bytes": "51282"
},
{
"name": "Makefile",
"bytes": "3866"
},
{
"name": "Python",
"bytes": "179423"
},
{
"name": "R",
"bytes": "544"
},
{
"name": "Scheme",
"bytes": "1192"
},
{
"name": "Shell",
"bytes": "27640"
}
],
"symlink_target": ""
} |
"""
You're given a matrix represented by a two-dimensional array,
and two positive integers r and c representing the row number and column number of the wanted reshaped matrix, respectively.
The reshaped matrix need to be filled with all the elements of the original matrix in the same row-traversing order as they were.
If the 'reshape' operation with given parameters is possible and legal, output the new reshaped matrix;
Otherwise, output the original matrix.
Example 1: Example 2:
Input: Input:
nums = nums =
[[1,2], [[1,2],
[3,4]] [3,4]]
r = 1, c = 4 r = 2, c = 4
Output: Output:
[[1,2,3,4]] [[1,2],
[3,4]]
Note:
The height and width of the given matrix is in range [1, 100].
The given r and c are all positive.
"""
class Solution(object):
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
r1 = len(nums)
c1 = len(nums[0])
if r1 * c1 != r * c:
return nums
else:
temp = []
new_nums = []
for e in nums:
for e1 in e:
temp.append(e1)
i = 0
while i < r:
new_nums.append(temp[0 + i * c: c + i * c])
i += 1
return new_nums
if __name__ == "__main__":
sample = Solution()
print(sample.matrixReshape(nums=[[1, 2], [3, 4]], r=1, c=4))
| {
"content_hash": "1612c6f1c8b9035ff26911d08e63127c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 129,
"avg_line_length": 33.450980392156865,
"alnum_prop": 0.47245017584994137,
"repo_name": "Vonzpf/LeetCode",
"id": "6ce295a51a08570c8fd7676d8ad9789e7f695f6a",
"size": "1744",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ReshapeTheMatrix.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "25807"
},
{
"name": "Python",
"bytes": "43800"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
DUAS_ENABLE_DB_WRITE = getattr(settings, 'DUAS_ENABLE_DB_WRITE', False)
DUAS_DB_ROUTE_PREFIX = getattr(settings, 'DUAS_DB_ROUTE_PREFIX',
'unity_asset_server')
class UnityAssetServerRouter(object):
"""
Router for unity asset server data base
"""
def db_for_read(self, model, **hints):
"""
Attempts to read auth models go to duashttp.
"""
if model._meta.app_label == 'duashttp':
return DUAS_DB_ROUTE_PREFIX
return None
def db_for_write(self, model, **hints):
"""
Attempts to write auth models go to duashttp.
"""
if model._meta.app_label == 'duashttp':
if not DUAS_ENABLE_DB_WRITE:
raise ImproperlyConfigured(
"Set `DUAS_ENABLE_DB_WRITE` to True in your settings to enable "
"write operations on unity asset server database"
)
return DUAS_DB_ROUTE_PREFIX
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in the auth app is involved.
"""
if obj1._meta.app_label == 'duashttp' or \
obj2._meta.app_label == 'duashttp':
return True
return None
def allow_migrate(self, db, model):
"""
Make sure the auth app only appears in the 'duashttp'
database.
"""
if db == DUAS_DB_ROUTE_PREFIX:
return model._meta.app_label == 'duashttp'
elif model._meta.app_label == 'duashttp':
return False
return None
| {
"content_hash": "2f85d1e29def527240962bb90641abd7",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 84,
"avg_line_length": 32.320754716981135,
"alnum_prop": 0.5674255691768827,
"repo_name": "tarvitz/django-unity-asset-server-http-client",
"id": "e1e19c23859e3e51973bb0f041c4b27f87ce827d",
"size": "1729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "duashttp/router.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15766"
}
],
"symlink_target": ""
} |
import subprocess, shutil, sys, os
def gen_image(scriptname, filename, fid='', view=True):
basename = os.path.splitext(os.path.basename(filename))[0]
subprocess.call('yosys %s scripts/%s' % (filename, scriptname), shell=True)
shutil.copy('/home/lucas/.yosys_show.png',
'output/circuitimages/%s.png' % (basename + fid))
if view:
subprocess.call('xdg-open output/circuitimages/%s.png' % (basename + fid), shell=True)
def show(files):
for filename in files:
gen_image('show.ys', filename, view=True)
gen_image('optshow.ys', filename, '_opt', view=True)
if __name__ == "__main__":
show(sys.argv[1:])
| {
"content_hash": "010042f2017256cbdbbf2ee59926db97",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 94,
"avg_line_length": 39.05882352941177,
"alnum_prop": 0.6325301204819277,
"repo_name": "LSaldyt/qnp",
"id": "7f176153e9a651d0169c5e7f8ab487aeb222c65e",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/show.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Coq",
"bytes": "29785"
},
{
"name": "Makefile",
"bytes": "116"
},
{
"name": "Prolog",
"bytes": "1062"
},
{
"name": "Python",
"bytes": "25676"
},
{
"name": "TeX",
"bytes": "22586"
},
{
"name": "Verilog",
"bytes": "507264"
}
],
"symlink_target": ""
} |
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies file copies with a trailing slash in the destination directory.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('copies-slash.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('copies-slash.gyp', chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/file3',
'file3 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/file4',
'file4 contents\n',
chdir='relocate/src')
test.built_file_must_match('copies-out-slash-2/directory/subdir/file5',
'file5 contents\n',
chdir='relocate/src')
test.pass_test()
| {
"content_hash": "5f3c5c64fedd0dc149dbcda172ead398",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 72,
"avg_line_length": 38.1578947368421,
"alnum_prop": 0.5606896551724138,
"repo_name": "Jet-Streaming/gyp",
"id": "ec1d3cd4182c4f0bb0519d29d0735e706dca1422",
"size": "1473",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/copies/gyptest-slash.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1194"
},
{
"name": "Batchfile",
"bytes": "1133"
},
{
"name": "C",
"bytes": "38674"
},
{
"name": "C++",
"bytes": "41140"
},
{
"name": "Objective-C",
"bytes": "10353"
},
{
"name": "Objective-C++",
"bytes": "1958"
},
{
"name": "Python",
"bytes": "3290293"
},
{
"name": "Shell",
"bytes": "12644"
},
{
"name": "Swift",
"bytes": "124"
}
],
"symlink_target": ""
} |
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _, get_language
from cms.models.pagemodel import Page
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_htmlsitemap import models
import re
class HtmlSitemapPlugin(CMSPluginBase):
"""HTML Sitemap CMS plugin."""
name = _('HTML Sitemap')
model = models.HtmlSitemap
render_template = 'cmsplugin_htmlsitemap/sitemap.html'
def render(self, context, instance, placeholder):
site = Site.objects.get_current()
pages = Page.objects.public().published(site=site).order_by('tree_id', 'lft')
pages = pages.filter(level__gte=instance.level_min,
level__lte=instance.level_max)
if not instance.in_navigation is None:
pages = pages.filter(in_navigation=instance.in_navigation)
if instance.match_language:
pages = pages.filter(title_set__language=get_language())
if instance.match_created_by:
pages = pages.filter(created_by=instance.match_created_by)
if instance.match_title:
pages = pages.filter(title_set__title__contains=instance.match_title)
if instance.match_url:
pat = re.compile(instance.match_url, re.IGNORECASE)
pages = [ p for p in pages if pat.search(p.get_absolute_url()) ]
context.update({
'instance':instance,
'pages':pages,
})
return context
plugin_pool.register_plugin(HtmlSitemapPlugin)
| {
"content_hash": "7c4f0b9298bc75b32d56ea26667c506d",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 85,
"avg_line_length": 39.35,
"alnum_prop": 0.6639135959339263,
"repo_name": "raphaa/cmsplugin-htmlsitemap",
"id": "26dcb19b08e317d2bf1a711abe5d481632e3f12e",
"size": "1598",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cmsplugin_htmlsitemap/cms_plugins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "350"
},
{
"name": "Python",
"bytes": "12576"
}
],
"symlink_target": ""
} |
__license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <lucas@theis.io>'
__docformat__ = 'epytext'
from scipy.special import erf, erfinv
from scipy.stats import norm
from scipy.optimize import bisect
from numpy import mean, sqrt, asarray, max, min, any
from transforms import Transform
import pdb
class UnivariateGaussianization(Transform):
def __init__(self, mog):
self.mog = mog
def apply(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply model CDF
data = self.mog.cdf(data)
# apply inverse Gaussian CDF
result = erfinv(data * 2. - 1.)
result[result > 6.] = 6.
result[result < -6.] = -6.
return result * sqrt(2.)
def inverse(self, data, max_iter=100):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
# apply Gaussian CDF
data = norm.cdf(data)
# apply inverse model CDF
val_max = mean(self.mog.means) + 1.
val_min = mean(self.mog.means) - 1.
for t in range(data.shape[1]):
# make sure root lies between val_min and val_max
while float(self.mog.cdf(val_min)) > data[0, t]:
val_min -= 1.
while float(self.mog.cdf(val_max)) < data[0, t]:
val_max += 1.
# find root numerically
data[0, t] = bisect(
f=lambda x: float(self.mog.cdf(x)) - data[0, t],
a=val_min,
b=val_max,
maxiter=max_iter,
disp=False)
return data
def logjacobian(self, data):
# make sure data has right shape
data = asarray(data).reshape(1, -1)
data_ug = self.apply(data)
return self.mog.loglikelihood(data) - norm.logpdf(data_ug)
| {
"content_hash": "6e182a019de8412443ef736d1302c9dd",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 23.681159420289855,
"alnum_prop": 0.6505507955936353,
"repo_name": "lucastheis/isa",
"id": "85eb0ed34aec6c919cee82f5578985a62cf4bd41",
"size": "1634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "code/transforms/univariategaussianization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "225674"
}
],
"symlink_target": ""
} |
import math
import random
import ps6_visualize
import pylab
# === Provided classes
class Position(object):
"""
A Position represents a location in a two-dimensional room.
"""
def __init__(self, x, y):
"""
Initializes a position with coordinates (x, y).
"""
self.x = x
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getNewPosition(self, angle, speed):
"""
Computes and returns the new Position after a single clock-tick has
passed, with this object as the current position, and with the
specified angle and speed.
Does NOT test whether the returned position fits inside the room.
angle: float representing angle in degrees, 0 <= angle < 360
speed: positive float representing speed
Returns: a Position object representing the new position.
"""
old_x, old_y = self.getX(), self.getY()
# Compute the change in position
delta_y = speed * math.cos(math.radians(angle))
delta_x = speed * math.sin(math.radians(angle))
# Add that to the existing position
new_x = old_x + delta_x
new_y = old_y + delta_y
return Position(new_x, new_y)
# === Problems 1
class RectangularRoom(object):
"""
A RectangularRoom represents a rectangular region containing clean or dirty
tiles.
A room has a width and a height and contains (width * height) tiles. At any
particular time, each of these tiles is either clean or dirty.
"""
def __init__(self, width, height):
"""
Initializes a rectangular room with the specified width and height.
Initially, no tiles in the room have been cleaned.
width: an integer > 0
height: an integer > 0
"""
raise NotImplementedError
def cleanTileAtPosition(self, pos):
"""
Mark the tile under the position POS as cleaned.
Assumes that POS represents a valid position inside this room.
pos: a Position
"""
raise NotImplementedError
def isTileCleaned(self, m, n):
"""
Return True if the tile (m, n) has been cleaned.
Assumes that (m, n) represents a valid tile inside the room.
m: an integer
n: an integer
returns: True if (m, n) is cleaned, False otherwise
"""
raise NotImplementedError
def getNumTiles(self):
"""
Return the total number of tiles in the room.
returns: an integer
"""
raise NotImplementedError
def getNumCleanedTiles(self):
"""
Return the total number of clean tiles in the room.
returns: an integer
"""
raise NotImplementedError
def getRandomPosition(self):
"""
Return a random position inside the room.
returns: a Position object.
"""
raise NotImplementedError
def isPositionInRoom(self, pos):
"""
Return True if pos is inside the room.
pos: a Position object.
returns: True if pos is in the room, False otherwise.
"""
raise NotImplementedError
class Robot(object):
"""
Represents a robot cleaning a particular room.
At all times the robot has a particular position and direction in the room.
The robot also has a fixed speed.
Subclasses of Robot should provide movement strategies by implementing
updatePositionAndClean(), which simulates a single time-step.
"""
def __init__(self, room, speed):
"""
Initializes a Robot with the given speed in the specified room. The
robot initially has a random direction and a random position in the
room. The robot cleans the tile it is on.
room: a RectangularRoom object.
speed: a float (speed > 0)
"""
raise NotImplementedError
def getRobotPosition(self):
"""
Return the position of the robot.
returns: a Position object giving the robot's position.
"""
raise NotImplementedError
def getRobotDirection(self):
"""
Return the direction of the robot.
returns: an integer d giving the direction of the robot as an angle in
degrees, 0 <= d < 360.
"""
raise NotImplementedError
def setRobotPosition(self, position):
"""
Set the position of the robot to POSITION.
position: a Position object.
"""
raise NotImplementedError
def setRobotDirection(self, direction):
"""
Set the direction of the robot to DIRECTION.
direction: integer representing an angle in degrees
"""
raise NotImplementedError
def updatePositionAndClean(self):
"""
Simulate the raise passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
raise NotImplementedError
# === Problem 2
class StandardRobot(Robot):
"""
A StandardRobot is a Robot with the standard movement strategy.
At each time-step, a StandardRobot attempts to move in its current direction; when
it hits a wall, it chooses a new direction randomly.
"""
def updatePositionAndClean(self):
"""
Simulate the passage of a single time-step.
Move the robot to a new position and mark the tile it is on as having
been cleaned.
"""
raise NotImplementedError
# === Problem 3
def runSimulation(num_robots, speed, width, height, min_coverage, num_trials,
robot_type):
"""
Runs NUM_TRIALS trials of the simulation and returns the mean number of
time-steps needed to clean the fraction MIN_COVERAGE of the room.
The simulation is run with NUM_ROBOTS robots of type ROBOT_TYPE, each with
speed SPEED, in a room of dimensions WIDTH x HEIGHT.
num_robots: an int (num_robots > 0)
speed: a float (speed > 0)
width: an int (width > 0)
height: an int (height > 0)
min_coverage: a float (0 <= min_coverage <= 1.0)
num_trials: an int (num_trials > 0)
robot_type: class of robot to be instantiated (e.g. Robot or
RandomWalkRobot)
"""
raise NotImplementedError
# === Problem 4
#
# 1) How long does it take to clean 80% of a 20×20 room with each of 1-10 robots?
#
# 2) How long does it take two robots to clean 80% of rooms with dimensions
# 20×20, 25×16, 40×10, 50×8, 80×5, and 100×4?
def showPlot1():
"""
Produces a plot showing dependence of cleaning time on number of robots.
"""
raise NotImplementedError
def showPlot2():
"""
Produces a plot showing dependence of cleaning time on room shape.
"""
raise NotImplementedError
# === Problem 5
class RandomWalkRobot(Robot):
"""
A RandomWalkRobot is a robot with the "random walk" movement strategy: it
chooses a new direction at random after each time-step.
"""
raise NotImplementedError
# === Problem 6
# For the parameters tested below (cleaning 80% of a 20x20 square room),
# RandomWalkRobots take approximately twice as long to clean the same room as
# StandardRobots do.
def showPlot3():
"""
Produces a plot comparing the two robot strategies.
"""
raise NotImplementedError
| {
"content_hash": "10cb348d6a4348e72cd47de4db0e4a93",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 86,
"avg_line_length": 28.117424242424242,
"alnum_prop": 0.6289909739997306,
"repo_name": "marioluan/mit-opencourseware-cs",
"id": "065f97fff4ccc9e181b3c89cc72d2059bf246ea3",
"size": "7492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "600/unit-2/sampling-and-monte-carlo-simulation/problem-set/ps6/ps6.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "334769"
}
],
"symlink_target": ""
} |
import os
import sys
import subprocess
import time
import datetime
from Constantine import utils
import requests
def run(args):
GOOGLE_CALENDAR_API = "https://www.googleapis.com/calendar/v3/calendars/"
DAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
XELATEX_TIMEOUT = 15
if (not 2 <= len(sys.argv) <= 5) or (sys.argv[1] in ['-h', '--help']):
print("Usage: python main.py /path/to/output.pdf [options]")
print("Options:")
print("--date=YYYY-MM-DD : Provide a date as the second argument to fetch the calendar for the following week of that date (not for the week the date is in!) e.g. 2017-02-01 | Default: Today's date.")
print("--text=/path/to/special/text.txt : Provide a custom path to the special text file to be included in the poster. | Default: special_text.txt under the script directory.")
print("--config=/path/to/settings.json : The path to settings.json for configuring Constantine. | Default: the settings.json under the script directory, which may be strange if you installed Constantine with pip.")
print("See Docs/settings-example.json for an example.")
sys.exit(1)
# Process date param.
set_datetime = datetime.datetime.today()
# get a datetime that contains today's date, but no time.
set_date = datetime.datetime(year=set_datetime.year, month=set_datetime.month, day=set_datetime.day)
text_file_path = utils.get_project_full_path() + "/special_text.txt"
config_file_path = None
if len(sys.argv) > 2:
for argv in sys.argv[2:]:
# Date.
if (not argv.startswith('--')) or argv.startswith('--date='):
# Support old options format.
if argv.startswith('--date='):
set_date = argv[7:]
else:
set_date = argv
if not utils.check_date_string(set_date):
print("Error: the date you have set is invalid, it must be in YYYY-MM-DD form such as 2017-02-01.")
sys.exit(1)
else:
set_date = datetime.datetime.strptime(set_date, "%Y-%m-%d")
# Special text.
if argv.startswith('--text='):
text_file_path = argv[7:]
if text_file_path.startswith('~'):
text_file_path = os.path.expanduser('~') + '/' + text_file_path[1:]
# Settings file.
if argv.startswith('--config='):
config_file_path = argv[9:]
if config_file_path.startswith('~'):
config_file_path = os.path.expanduser('~') + '/' + text_file_path[1:]
output_dir = sys.argv[1].rsplit('/', 1)[0]
output_file = sys.argv[1]
if '/' not in output_file: # Current directory.
output_dir = './'
output_file = './' + output_file
if not os.path.isdir(output_dir):
print("The directory " + output_dir + " does not exist, exiting.")
sys.exit(1)
print("Reading settings.json...")
settings = utils.read_config(config_file_path)
# Find out which week is the next week we are working on.
next_monday = set_date + datetime.timedelta(days=(7 - set_date.weekday()))
monday_after = next_monday + datetime.timedelta(days=7)
start_date = next_monday.strftime("%Y-%m-%d") + "T00:00:00Z"
end_date = monday_after.strftime("%Y-%m-%d") + "T00:00:00Z"
# slight hack, add six days so that we are at the end of the week when searching
# for the term start date, since it might not be a monday
term_start = datetime.datetime.strptime(utils.get_closest_date_time(next_monday+datetime.timedelta(days=6), settings['term_start_dates']), "%Y-%m-%d")
week_number = int((next_monday - term_start).days / 7 + 1)
# Fetch data.
request_url = GOOGLE_CALENDAR_API + settings['calendar_id'] + "/events"
request_params = {'key': settings['google_api_key'], 'orderBy': 'startTime', 'singleEvents': 'true', 'timeMin': start_date, 'timeMax': end_date}
print("Reading calendar events for week starting {}...".format(next_monday.strftime("%Y-%m-%d")))
api_response = requests.get(request_url, params=request_params)
if api_response.status_code != 200:
print("Error fetching calendar data from Google, check your network connection and API key.\
If you have a valid API key, it may be missing Google Calendar Access.")
sys.exit(1)
events = api_response.json()
# Organise data.
events_organised = {DAYS.index(i):[] for i in DAYS}
for event in events['items']:
if any([(i not in event) for i in ['start', 'summary', 'location']]):
continue # Just in case a broken event returned in json.
event_date = event['start']['dateTime']
datetime_split = event_date.split('T')
event_datetime = datetime.datetime.strptime(datetime_split[0], "%Y-%m-%d")
event_start = datetime_split[1][:5]
event_day = int((event_datetime - next_monday).days)
event_item = {}
event_item['what'] = utils.tex_escape(event['summary'])
event_item['when'] = utils.tex_escape(event_start)
event_item['where'] = utils.tex_escape(event['location'])
events_organised[event_day].append(event_item)
# Read from and format the templates.
print("Generating LaTeX file from template...")
with open(utils.get_project_full_path() + "/latex_template.txt", 'r') as template_file:
latex_template = template_file.read()
try:
with open(text_file_path, 'r') as special_file:
special_text_lines = special_file.readlines()
except FileNotFoundError:
print("Error: special text file " + text_file_path + " cannot be found, the PDF cannot be generated. Exiting.")
sys.exit(1)
latex_formatting = utils.settings_to_formatting(settings)
latex_formatting['week_number'] = str(week_number)
event_content = ""
for day, day_events in events_organised.items():
if len(day_events) < 1:
continue
day_tex = "\\begin{minipage}{0.5\\textwidth}{\\fontsize{30}{40}\\selectfont %s}\\\\\\vspace{0.2cm}\\begin{addmargin}[1em]{0em}" % (DAYS[day])
for day_event in day_events:
day_tex += "{\\fontsize{24}{34}\\selectfont \\textcolor{emphasistext}{ %s }}\\\\\\vspace{0.05cm}\\\\{\\fontsize{20}{30}\\selectfont %s at %s} \\\\" % (day_event['what'], day_event['where'], day_event['when'])
if day_events.index(day_event) < len(day_events) - 1:
day_tex += "\\vspace{0.3cm}\\\\"
day_tex += "\\end{addmargin}\\end{minipage}\\vspace{0.75cm}\n"
event_content += day_tex
if len(event_content) > 0:
event_content += "\\\\" # Only add empty line when there is an event to prevent no line to end error.
latex_formatting['event_content'] = event_content
special_content = ''
if len(special_text_lines) > 0:
special_content = "\\begin{minipage}{0.45\\textwidth}{\\fontsize{30}{40}\\selectfont %s }\\\\\\begin{addmargin}[1em]{0em}" % (utils.tex_escape(special_text_lines[0]))
for line in special_text_lines[1:]:
special_content += "{\\fontsize{16}{20}\\selectfont %s \\par}" % (utils.tex_escape(line))
special_content += "\\end{addmargin}\\end{minipage}\\\\"
latex_formatting['special_text'] = special_content
latex_template = latex_template % latex_formatting
# Write PDF, finish.
print("Writing completed LaTeX to PDF...")
latex_target_path = utils.get_project_full_path() + "/tex/Constantine.tex"
with open(latex_target_path, 'w+') as latex_file:
latex_file.write(latex_template)
print("Generating PDF...")
working_dir = utils.get_project_full_path() + "/tex"
p = subprocess.Popen(['xelatex', latex_target_path], stdout=subprocess.PIPE, cwd=working_dir)
success = False
try:
output = p.communicate(timeout=XELATEX_TIMEOUT)
result_code = p.returncode
if result_code == 0:
success = True
print("Success! PDF saved at: " + latex_target_path[:-4] + ".pdf")
else:
print("Failure! Check " + latex_target_path[:-4] + ".log for details.")
except subprocess.TimeoutExpired:
print("Failure! Something has made XeLaTeX to wait. Check " + latex_target_path[:-4] + ".log for details.")
if not success:
sys.exit(1)
print("Copying PDF to " + output_file)
p = subprocess.Popen(['cp', latex_target_path[:-4] + ".pdf", output_file], stdout=subprocess.PIPE, cwd=os.getcwd())
result_code = p.returncode
print("PDF should have been copied to: " + output_file)
if __name__ == '__main__':
run(sys.argv)
| {
"content_hash": "de3064a7a5cea15da1e778c174e93797",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 222,
"avg_line_length": 47.17741935483871,
"alnum_prop": 0.6125356125356125,
"repo_name": "icydoge/Constantine",
"id": "2bb3100e032e11797dc8b230f3a0960b900791ce",
"size": "8844",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Constantine/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18609"
},
{
"name": "TeX",
"bytes": "5794"
}
],
"symlink_target": ""
} |
"""Show the contents of a FIFF file.
You can do for example:
$ mne show_fiff test_raw.fif
"""
# Authors : Eric Larson, PhD
import codecs
import sys
import mne
def run():
"""Run command."""
parser = mne.commands.utils.get_optparser(
__file__, usage='mne show_fiff <file>')
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
# This works around an annoying bug on Windows for show_fiff, see:
# https://pythonhosted.org/kitchen/unicode-frustrations.html
if int(sys.version[0]) < 3:
UTF8Writer = codecs.getwriter('utf8')
sys.stdout = UTF8Writer(sys.stdout)
print(mne.io.show_fiff(args[0]))
is_main = (__name__ == '__main__')
if is_main:
run()
| {
"content_hash": "07ecf8c6d5f8a08060f46a78f64051ea",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 23,
"alnum_prop": 0.6205533596837944,
"repo_name": "nicproulx/mne-python",
"id": "caad09bc10ee73e304011d044dacb0421afcf76c",
"size": "781",
"binary": false,
"copies": "2",
"ref": "refs/heads/placeholder",
"path": "mne/commands/mne_show_fiff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3723"
},
{
"name": "Python",
"bytes": "5866703"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
} |
"""This should at some point be a library with functions to import and
reconstruct Bruker MRI data.
2014, Joerg Doepfert
"""
import numpy as np
# ***********************************************************
# class definition
# ***********************************************************
class BrukerData:
"""Class to store and process data of a Bruker MRI Experiment"""
def __init__(self, path="", ExpNum=0, B0=9.4):
self.method = {}
self.acqp = {}
self.reco = {}
self.raw_fid = np.array([])
self.proc_data = np.array([])
self.k_data = np.array([])
self.reco_data = np.array([])
self.reco_data_norm = np.array([]) # normalized reco
self.B0 = B0 # only needed for UFZ method
self.GyroRatio = 0 # only needed for UFZ method
self.ConvFreqsFactor = 0 # reference to convert Hz <--> ppm
self.path = path
self.ExpNum = ExpNum
def GenerateKspace(self):
"""Reorder the data in raw_fid to a valid k-space."""
if self.method == {}:
raise NameError('No experiment loaded')
elif self.method["Method"] == 'jd_UFZ_RAREst':
self.k_data = self._GenKspace_UFZ_RARE()
elif (self.method["Method"] == 'FLASH' or
self.method["Method"] == 'mic_flash'):
self.k_data = self._GenKspace_FLASH()
else:
raise NameError("Unknown method")
def ReconstructKspace(self, **kwargs):
"""Transform the kspace data to image space. If it does not yet exist,
generate it from the raw fid. Keyword arguments [**kwargs] can
be supplied for some methods:
All methods:
- KspaceCutoffIdx: list lines to be set to zero in
kspace prior to FT reconstruction
jd_UFZ_RARExx:
- NEchoes: Number of Echoes to be averaged. If NEchoes="opt",
then the optimum number of echoes is calculated. If
NEchoes=0, then all echoes are averaged.
"""
# Generate k_data prior to reconstruction, if it does not yet
# exist
if self.k_data.size == 0:
self.GenerateKspace()
self._ReconstructKspace_(**kwargs)
else:
self._ReconstructKspace_(**kwargs)
return self.reco_data
def _ReconstructKspace_(self, **kwargs):
"""Select which function to use for the reco, depending on the
method."""
if self.method["Method"] == 'jd_UFZ_RAREst':
self._Reco_UFZ_RARE(**kwargs)
elif (self.method["Method"] == 'FLASH' or
self.method["Method"] == 'mic_flash'):
self. _Reco_FLASH(**kwargs)
else:
raise NameError("Unknown method")
# ***********************************************************
# method specific reordering and reco functions start here
# ***********************************************************
def _GenKspace_FLASH(self):
complexValues = self.raw_fid
NScans = (self.acqp["NI"] # no. of images
* self.acqp["NAE"] # no. of experiments
* self.acqp["NA"] # no. of averages
* self.acqp["NR"]) # no. of repetitions
Matrix = self.method["PVM_Matrix"]
kSpace = np.reshape(complexValues, (-1,Matrix[0]),
order="F")
kSpace = np.reshape(kSpace, (-1, Matrix[0], Matrix[1]))
kSpace = np.transpose(kSpace, (1,2,0))
return kSpace
def _Reco_FLASH(self, **kwargs):
k_data = self.k_data
reco_data = np.zeros(k_data.shape)
for i in range(0,self.k_data.shape[2]):
reco_data[:,:,i] = abs(fft_image(self.k_data[:,:,i]))
self.reco_data = reco_data
def _GenKspace_UFZ_RARE(self):
complexValues = self.raw_fid
complexValues = RemoveVoidEntries(complexValues,
self.acqp["ACQ_size"][0])
NEchoes = self.method["CEST_Number_Echoes"]
NPoints = self.method["CEST_Number_SatFreqs"]
NScans = self.method["PVM_NRepetitions"]
return np.reshape(complexValues, (NPoints, NEchoes, NScans),
order="F")
def _Reco_UFZ_RARE(self, **kwargs):
# use pop to set default values
KspaceCutoffIdx = kwargs.pop("KspaceCutoffIdx", [])
NEchoes = kwargs.pop("NEchoes", "opt")
NScans = self.method["PVM_NRepetitions"]
NPoints = self.method["CEST_Number_SatFreqs"]
NRecoEchoes = np.ones(NScans, dtype=np.int)
# Determine how many echoes should be averaged
if NEchoes == "opt": # calc opt num of echoes to be averaged
# choose to look at real, imag, or abs part of kspace
Data = self.k_data.real
# find the indizes of maximum kspace signal
MaxIndizes = []
MaxIndizes.append(np.argmax(Data[:, 0, 0]))
MaxIndizes.append(MaxIndizes[0] + 1
- 2*(Data[MaxIndizes[0]-1, 0, 0]
> Data[MaxIndizes[0]+1, 0, 0]))
# calc max of kspace echoes based on these indizes
MaxEchoSignals = np.sum(Data[MaxIndizes, :, :], axis=0)
# now calc opt num of echoes for each scan
for i in range(0, NScans):
NRecoEchoes[i] = CalcOptNEchoes(MaxEchoSignals[:, i])
# make sure that off and on scan have same amount of
# NRecoEchoes, i.e. echoes to be averaged
if self.method["CEST_AcqMode"] == "On_and_Off_Scan":
NRecoEchoes[1::2] = NRecoEchoes[0::2]
elif NEchoes == 0: # take all echoes
NRecoEchoes = NRecoEchoes*self.method["CEST_Number_Echoes"]
else: # take number given by user
NRecoEchoes = NRecoEchoes*NEchoes
# average the echoes
KspaceAveraged = np.zeros((NPoints, NScans), dtype=complex)
for i in range(0, NScans):
RecoEchoes = range(0, NRecoEchoes[i])
KspaceAveraged[:, i] = np.mean(
self.k_data[:, RecoEchoes, i], axis=1)
KspaceAveraged[KspaceCutoffIdx, i] = 0
# save reco as FFT of the averaged kspace data
self.reco_data, _ = FFT_center(KspaceAveraged)
# normalize the data if possible
if self.method["CEST_AcqMode"] == "On_and_Off_Scan":
self.reco_data_norm = np.divide(abs(self.reco_data[:,1::2]),
abs(self.reco_data[:,0::2]))
# ***********************************************************
# Functions
# ***********************************************************
def ReadExperiment(path, ExpNum):
"""Read in a Bruker MRI Experiment. Returns raw data, processed
data, and method and acqp parameters in a dictionary.
"""
data = BrukerData(path, ExpNum)
# parameter files
data.method = ReadParamFile(path + str(ExpNum) + "/method")
data.acqp = ReadParamFile(path + str(ExpNum) + "/acqp")
data.reco = ReadParamFile(path + str(ExpNum) + "/pdata/1/reco")
# processed data
data.proc_data = ReadProcessedData(path + str(ExpNum) + "/pdata/1/2dseq",
data.reco,
data.acqp)
# generate complex FID
raw_data = ReadRawData(path + str(ExpNum) + "/fid")
data.raw_fid = raw_data[0::2] + 1j * raw_data[1::2]
# calculate GyroRatio and ConvFreqsFactor
data.GyroRatio = data.acqp["SFO1"]*2*np.pi/data.B0*10**6 # in rad/Ts
data.ConvFreqsFactor = 1/(data.GyroRatio*data.B0/10**6/2/np.pi)
data.path = path
data.ExpNum =ExpNum
return data
def CalcOptNEchoes(s):
"""Find out how many echoes in an echo train [s] have to be
included into an averaging operation, such that the signal to
noise (SNR) of the resulting averaged signal is maximized.
Based on the formula shown in the supporting information of
the [Doepfert et al. ChemPhysChem, 15(2), 261-264, 2014]
"""
# init vars
s_sum = np.zeros(len(s))
s_sum[0] = s[0]
TestFn = np.zeros(len(s))
SNR_averaged = np.zeros(len(s)) # not needed for calculation
count = 1
for n in range(2, len(s)+1):
SNR_averaged = np.sum(s[0:n] / np.sqrt(n))
s_sum[n-1] = s[n-1] + s_sum[n-2]
TestFn[n-1] = s_sum[n-2]*(np.sqrt(float(n)/(float(n)-1))-1)
if s[n-1] < TestFn[n-1]:
break
count += 1
return count
def FFT_center(Kspace, sampling_rate=1, ax=0):
"""Calculate FFT of a time domain signal and shift the spectrum
so that the center frequency is in the center. Additionally
return the frequency axis, provided the right sampling frequency
is given.
If the data is 2D, then the FFT is performed succesively along an
axis [ax].
"""
FT = np.fft.fft(Kspace, axis=ax)
spectrum = np.fft.fftshift(FT, axes=ax)
n = FT.shape[ax]
freq_axis = np.fft.fftshift(
np.fft.fftfreq(n, 1/float(sampling_rate)))
return spectrum, freq_axis
def fft_image(Kspace):
return np.fft.fftshift(np.fft.fft2(Kspace))
def RemoveVoidEntries(datavector, acqsize0):
blocksize = int(np.ceil(float(acqsize0)/2/128)*128)
DelIdx = []
for i in range(0, len(datavector)/blocksize):
DelIdx.append(range(i * blocksize
+ acqsize0/2,
(i + 1) * blocksize))
return np.delete(datavector, DelIdx)
def ReadRawData(filepath):
with open(filepath, "r") as f:
return np.fromfile(f, dtype=np.int32)
def ReadProcessedData(filepath, reco, acqp):
with open(filepath, "r") as f:
data = np.fromfile(f, dtype=np.int16)
data = data.reshape(reco["RECO_size"][0],
reco["RECO_size"][1], -1, order="F")
if data.ndim == 3:
data_length = data.shape[2]
else:
data_length = 1
data_reshaped = np.zeros([data.shape[1], data.shape[0], data_length])
for i in range(0, data_length):
data_reshaped[:, :, i] = np.rot90(data[:, :, i])
return data_reshaped
def ReadParamFile(filepath):
"""
Read a Bruker MRI experiment's method or acqp file to a
dictionary.
"""
param_dict = {}
with open(filepath, "r") as f:
while True:
line = f.readline()
if not line:
break
# when line contains parameter
if line.startswith('##$'):
(param_name, current_line) = line[3:].split('=') # split at "="
# if current entry (current_line) is arraysize
if current_line[0:2] == "( " and current_line[-3:-1] == " )":
value = ParseArray(f, current_line)
# if current entry (current_line) is struct/list
elif current_line[0] == "(" and current_line[-3:-1] != " )":
# if neccessary read in multiple lines
while current_line[-2] != ")":
current_line = current_line[0:-1] + f.readline()
# parse the values to a list
value = [ParseSingleValue(x)
for x in current_line[1:-2].split(', ')]
# otherwise current entry must be single string or number
else:
value = ParseSingleValue(current_line)
# save parsed value to dict
param_dict[param_name] = value
return param_dict
def ParseArray(current_file, line):
# extract the arraysize and convert it to numpy
line = line[1:-2].replace(" ", "").split(",")
arraysize = np.array([int(x) for x in line])
# then extract the next line
vallist = current_file.readline().split()
# if the line was a string, then return it directly
try:
float(vallist[0])
except ValueError:
return " ".join(vallist)
# include potentially multiple lines
while len(vallist) != np.prod(arraysize):
vallist = vallist + current_file.readline().split()
# try converting to int, if error, then to float
try:
vallist = [int(x) for x in vallist]
except ValueError:
vallist = [float(x) for x in vallist]
# convert to numpy array
if len(vallist) > 1:
return np.reshape(np.array(vallist), arraysize)
# or to plain number
else:
return vallist[0]
def ParseSingleValue(val):
try: # check if int
result = int(val)
except ValueError:
try: # then check if float
result = float(val)
except ValueError:
# if not, should be string. Remove newline character.
result = val.rstrip('\n')
return result
# ***********************************************************
# -----------------------------------------------------------
# ***********************************************************
if __name__ == '__main__':
pass
| {
"content_hash": "253409c18ee9394c0974995f2c9760e8",
"timestamp": "",
"source": "github",
"line_count": 405,
"max_line_length": 79,
"avg_line_length": 32.41481481481482,
"alnum_prop": 0.5377818403412553,
"repo_name": "jdoepfert/brukerMRI",
"id": "4e8d80cbedd2287851b4d76071f1fb9a47b85596",
"size": "13156",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "BrukerMRI.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7410"
},
{
"name": "Perl",
"bytes": "26967"
},
{
"name": "Python",
"bytes": "13806"
},
{
"name": "R",
"bytes": "310"
},
{
"name": "Shell",
"bytes": "30229"
}
],
"symlink_target": ""
} |
import calendar
import datetime
import logging
import time
from google.appengine import runtime
from google.appengine.api import datastore_errors
from google.appengine.api import quota
from google.appengine.api import taskqueue
from google.appengine.ext import db
import config
import const
import delete
import model
import utils
CPU_MEGACYCLES_PER_REQUEST = 1000
EXPIRED_TTL = datetime.timedelta(delete.EXPIRED_TTL_DAYS, 0, 0)
FETCH_LIMIT = 100
class ScanForExpired(utils.BaseHandler):
"""Common logic for scanning the Person table looking for things to delete.
The common logic handles iterating through the query, updating the expiry
date and wiping/deleting as needed. The is_expired flag on all records whose
expiry_date has passed. Records that expired more than EXPIRED_TTL in the
past will also have their data fields, notes, and photos permanently
deleted.
Subclasses set the query and task_name."""
repo_required = False
def task_name(self):
"""Subclasses should implement this."""
pass
def query(self):
"""Subclasses should implement this."""
pass
def schedule_next_task(self, cursor):
"""Schedule the next task for to carry on with this query.
"""
self.add_task_for_repo(self.repo, self.task_name(), self.ACTION,
cursor=cursor, queue_name='expiry')
def get(self):
if self.repo:
query = self.query()
if self.params.cursor:
query.with_cursor(self.params.cursor)
cursor = self.params.cursor
try:
for person in query:
# query.cursor() returns a cursor which returns the entity
# next to this "person" as the first result.
next_cursor = query.cursor()
was_expired = person.is_expired
person.put_expiry_flags()
if (utils.get_utcnow() - person.get_effective_expiry_date()
> EXPIRED_TTL):
person.wipe_contents()
else:
# treat this as a regular deletion.
if person.is_expired and not was_expired:
delete.delete_person(self, person)
cursor = next_cursor
except runtime.DeadlineExceededError:
self.schedule_next_task(cursor)
except datastore_errors.Timeout:
# This exception is sometimes raised, maybe when the query
# object live too long?
self.schedule_next_task(cursor)
else:
for repo in model.Repo.list():
self.add_task_for_repo(repo, self.task_name(), self.ACTION)
class DeleteExpired(ScanForExpired):
"""Scan for person records with expiry date thats past."""
ACTION = 'tasks/delete_expired'
def task_name(self):
return 'delete-expired'
def query(self):
return model.Person.past_due_records(self.repo)
class DeleteOld(ScanForExpired):
"""Scan for person records with old source dates for expiration."""
ACTION = 'tasks/delete_old'
def task_name(self):
return 'delete-old'
def query(self):
return model.Person.potentially_expired_records(self.repo)
class CleanUpInTestMode(utils.BaseHandler):
"""If the repository is in "test mode", this task deletes all entries older
than DELETION_AGE_SECONDS (defined below), regardless of their actual
expiration specification.
We delete entries quickly so that most of the test data does not persist in
real mode, and to reduce the effect of spam.
"""
repo_required = False
ACTION = 'tasks/clean_up_in_test_mode'
# Entries older than this age in seconds are deleted in test mode.
#
# If you are maintaining a single repository and switching it between test
# mode (for drills) and real mode (for real crises), you should be sure to
# switch to real mode within DELETION_AGE_SECONDS after a real crisis
# occurs, because:
# - When the crisis happens, the users may be confused and enter real
# information on the repository, even though it's still in test mode.
# (All pages show "test mode" message, but some users may be still
# confused.)
# - If we fail to make the switch in DELETION_AGE_SECONDS, such real
# entries are deleted.
# - If we make the switch in DELETION_AGE_SECONDS, such entries are not
# deleted, and handled as a part of real mode data.
DELETION_AGE_SECONDS = 24 * 3600
def __init__(self, request, response, env):
utils.BaseHandler.__init__(self, request, response, env)
self.__listener = None
def task_name(self):
return 'clean-up-in-test-mode'
def schedule_next_task(self, cursor, utcnow):
"""Schedule the next task for to carry on with this query.
"""
self.add_task_for_repo(
self.repo,
self.task_name(),
self.ACTION,
utcnow=str(calendar.timegm(utcnow.utctimetuple())),
cursor=cursor,
queue_name='clean_up_in_test_mode')
def in_test_mode(self, repo):
"""Returns True if the repository is in test mode."""
return config.get('test_mode', repo=repo)
def get(self):
if self.repo:
# To reuse the cursor from the previous task, we need to apply
# exactly the same filter. So we use utcnow previously used
# instead of the current time.
utcnow = self.params.utcnow or utils.get_utcnow()
max_entry_date = (
utcnow -
datetime.timedelta(
seconds=CleanUpInTestMode.DELETION_AGE_SECONDS))
query = model.Person.all_in_repo(self.repo)
query.filter('entry_date <=', max_entry_date)
if self.params.cursor:
query.with_cursor(self.params.cursor)
cursor = self.params.cursor
# Uses query.get() instead of "for person in query".
# If we use for-loop, query.cursor() points to an unexpected
# position.
person = query.get()
# When the repository is no longer in test mode, aborts the
# deletion.
try:
while person and self.in_test_mode(self.repo):
if self.__listener:
self.__listener.before_deletion(person.key())
person.delete_related_entities(delete_self=True)
cursor = query.cursor()
person = query.get()
except runtime.DeadlineExceededError:
self.schedule_next_task(cursor, utcnow)
except datastore_errors.Timeout:
# This exception is sometimes raised, maybe when the query
# object live too long?
self.schedule_next_task(cursor, utcnow)
else:
for repo in model.Repo.list():
if self.in_test_mode(repo):
self.add_task_for_repo(repo, self.task_name(), self.ACTION)
def set_listener(self, listener):
self.__listener = listener
def run_count(make_query, update_counter, counter):
"""Scans the entities matching a query up to FETCH_LIMIT.
Returns False if we finished counting all entries."""
# Get the next batch of entities.
query = make_query()
if counter.last_key:
query = query.filter('__key__ >', db.Key(counter.last_key))
entities = query.order('__key__').fetch(FETCH_LIMIT)
if not entities:
counter.last_key = ''
return False
# Pass the entities to the counting function.
for entity in entities:
update_counter(counter, entity)
# Remember where we left off.
counter.last_key = str(entities[-1].key())
return True
class CountBase(utils.BaseHandler):
"""A base handler for counting tasks. Making a request to this handler
without a specified repo will start tasks for all repositories in parallel.
Each subclass of this class handles one scan through the datastore."""
repo_required = False # can run without a repo
SCAN_NAME = '' # Each subclass should choose a unique scan_name.
ACTION = '' # Each subclass should set the action path that it handles.
def get(self):
if self.repo: # Do some counting.
try:
counter = model.Counter.get_unfinished_or_create(
self.repo, self.SCAN_NAME)
entities_remaining = True
while entities_remaining:
# Batch the db updates.
for _ in xrange(100):
entities_remaining = run_count(
self.make_query, self.update_counter, counter)
if not entities_remaining:
break
# And put the updates at once.
counter.put()
except runtime.DeadlineExceededError:
# Continue counting in another task.
self.add_task_for_repo(self.repo, self.SCAN_NAME, self.ACTION)
else: # Launch counting tasks for all repositories.
for repo in model.Repo.list():
self.add_task_for_repo(repo, self.SCAN_NAME, self.ACTION)
def make_query(self):
"""Subclasses should implement this. This will be called to get the
datastore query; it should always return the same query."""
def update_counter(self, counter, entity):
"""Subclasses should implement this. This will be called once for
each entity that matches the query; it should call increment() on
the counter object for whatever accumulators it wants to increment."""
class CountPerson(CountBase):
SCAN_NAME = 'person'
ACTION = 'tasks/count/person'
def make_query(self):
return model.Person.all().filter('repo =', self.repo)
def update_counter(self, counter, person):
found = ''
if person.latest_found is not None:
found = person.latest_found and 'TRUE' or 'FALSE'
counter.increment('all')
counter.increment('original_domain=' + (person.original_domain or ''))
counter.increment('sex=' + (person.sex or ''))
counter.increment('home_country=' + (person.home_country or ''))
counter.increment('photo=' + (person.photo_url and 'present' or ''))
counter.increment('num_notes=%d' % len(person.get_notes()))
counter.increment('status=' + (person.latest_status or ''))
counter.increment('found=' + found)
if person.author_email: # author e-mail address present?
counter.increment('author_email')
if person.author_phone: # author phone number present?
counter.increment('author_phone')
counter.increment(
'linked_persons=%d' % len(person.get_linked_persons()))
class CountNote(CountBase):
SCAN_NAME = 'note'
ACTION = 'tasks/count/note'
def make_query(self):
return model.Note.all().filter('repo =', self.repo)
def update_counter(self, counter, note):
author_made_contact = ''
if note.author_made_contact is not None:
author_made_contact = note.author_made_contact and 'TRUE' or 'FALSE'
counter.increment('all')
counter.increment('status=' + (note.status or ''))
counter.increment('original_domain=' + (note.original_domain or ''))
counter.increment('author_made_contact=' + author_made_contact)
if note.last_known_location: # last known location specified?
counter.increment('last_known_location')
if note.author_email: # author e-mail address present?
counter.increment('author_email')
if note.author_phone: # author phone number present?
counter.increment('author_phone')
if note.linked_person_record_id: # linked to another person?
counter.increment('linked_person')
class AddReviewedProperty(CountBase):
"""Sets 'reviewed' to False on all notes that have no 'reviewed' property.
This task is for migrating datastores that were created before the
'reviewed' property existed; 'reviewed' has to be set to False so that
the Notes will be indexed."""
SCAN_NAME = 'unreview-note'
ACTION = 'tasks/count/unreview_note'
def make_query(self):
return model.Note.all().filter('repo =', self.repo)
def update_counter(self, counter, note):
if not note.reviewed:
note.reviewed = False
note.put()
class UpdateDeadStatus(CountBase):
"""This task looks for Person records with the status 'believed_dead',
checks for the last non-hidden Note, and updates the status if necessary.
This is designed specifically to address bogus 'believed_dead' notes that
are flagged as spam. (This is a cleanup task, not a counting task.)"""
SCAN_NAME = 'update-dead-status'
ACTION = 'tasks/count/update_dead_status'
def make_query(self):
return model.Person.all().filter('repo =', self.repo
).filter('latest_status =', 'believed_dead')
def update_counter(self, counter, person):
person.update_latest_status()
class UpdateStatus(CountBase):
"""This task scans Person records, looks for the last non-hidden Note, and
updates latest_status. (This is a cleanup task, not a counting task.)"""
SCAN_NAME = 'update-status'
ACTION = 'tasks/count/update_status'
def make_query(self):
return model.Person.all().filter('repo =', self.repo)
def update_counter(self, counter, person):
person.update_latest_status()
class Reindex(CountBase):
"""A handler for re-indexing Persons."""
SCAN_NAME = 'reindex'
ACTION = 'tasks/count/reindex'
def make_query(self):
return model.Person.all().filter('repo =', self.repo)
def update_counter(self, counter, person):
person.update_index(['old', 'new'])
person.put()
class NotifyManyUnreviewedNotes(utils.BaseHandler):
"""This task sends email notification when the number of unreviewed notes
exceeds threshold.
"""
repo_required = False # can run without a repo
ACTION = 'tasks/notify_many_unreviewed_notes'
def task_name(self):
return 'notify-bad-review-status'
def get(self):
if self.repo:
try:
count_of_unreviewed_notes = (
model.Note.get_unreviewed_notes_count(self.repo))
self._maybe_notify(count_of_unreviewed_notes)
except runtime.DeadlineExceededError:
logging.info("DeadlineExceededError occurs")
self.add_task_for_repo(
self.repo, self.task_name(), self.ACTION)
except datastore_errors.Timeout:
logging.info("Timeout occurs in datastore")
self.add_task_for_repo(
self.repo, self.task_name(), self.ACTION)
else:
for repo in model.Repo.list():
self.add_task_for_repo(
repo, self.task_name(), self.ACTION)
def _subject(self):
return "Please review your notes in %(repo_name)s" % {
"repo_name": self.env.repo,
}
def _body(self, count_of_unreviewed_notes):
return "%(repo_name)s has %(num_unreviewed)s unreviewed notes." % {
"repo_name": self.env.repo,
"num_unreviewed": count_of_unreviewed_notes,
}
def _maybe_notify(self, count_of_unreviewed_notes):
# TODO(yaboo@): Response should be modified
if self._should_notify(count_of_unreviewed_notes):
self.send_mail(self.config.get('notification_email'),
subject=self._subject(),
body=self._body(count_of_unreviewed_notes))
def _should_notify(self, count_of_unreviewed_notes):
if not self.config.get('notification_email'):
return False
return count_of_unreviewed_notes > self.config.get(
'unreviewed_notes_threshold')
| {
"content_hash": "af55009294a5e633548aa9468cda029a",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 80,
"avg_line_length": 38.653301886792455,
"alnum_prop": 0.6094941729208615,
"repo_name": "AwesomeTurtle/personfinder",
"id": "cc04b38c4e6593ea670085db680ff6d7d427d363",
"size": "16987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "593"
},
{
"name": "JavaScript",
"bytes": "4402"
},
{
"name": "Python",
"bytes": "1167128"
},
{
"name": "Shell",
"bytes": "1217"
}
],
"symlink_target": ""
} |
import logging
import re
from streamlink.exceptions import FatalPluginError, NoStreamsError, PluginError
from streamlink.plugin import Plugin, PluginArgument, PluginArguments, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://sketch\.pixiv\.net/@?(?P<user>[^/]+)"
))
class Pixiv(Plugin):
_post_key_re = re.compile(
r"""name=["']post_key["']\svalue=["'](?P<data>[^"']+)["']""")
_user_dict_schema = validate.Schema(
{
"user": {
"unique_name": validate.text,
"name": validate.text
},
validate.optional("hls_movie"): {
"url": validate.text
}
}
)
_user_schema = validate.Schema(
{
"owner": _user_dict_schema,
"performers": [
validate.any(_user_dict_schema, None)
]
}
)
_data_lives_schema = validate.Schema(
{
"data": {
"lives": [_user_schema]
}
},
validate.get("data"),
validate.get("lives")
)
api_lives = "https://sketch.pixiv.net/api/lives.json"
login_url_get = "https://accounts.pixiv.net/login"
login_url_post = "https://accounts.pixiv.net/api/login"
arguments = PluginArguments(
PluginArgument(
"sessionid",
requires=["devicetoken"],
sensitive=True,
metavar="SESSIONID",
help="""
The pixiv.net sessionid that's used in pixivs PHPSESSID cookie.
can be used instead of the username/password login process.
"""
),
PluginArgument(
"devicetoken",
sensitive=True,
metavar="DEVICETOKEN",
help="""
The pixiv.net device token that's used in pixivs device_token cookie.
can be used instead of the username/password login process.
"""
),
PluginArgument(
"purge-credentials",
action="store_true",
help="""
Purge cached Pixiv credentials to initiate a new session
and reauthenticate.
"""),
PluginArgument(
"performer",
metavar="USER",
help="""
Select a co-host stream instead of the owner stream.
""")
)
def __init__(self, url):
super().__init__(url)
self._authed = (self.session.http.cookies.get("PHPSESSID")
and self.session.http.cookies.get("device_token"))
self.session.http.headers.update({"Referer": self.url})
def _login_using_session_id_and_device_token(self, session_id, device_token):
self.session.http.get(self.login_url_get)
self.session.http.cookies.set('PHPSESSID', session_id, domain='.pixiv.net', path='/')
self.session.http.cookies.set('device_token', device_token, domain='.pixiv.net', path='/')
self.save_cookies()
log.info("Successfully set sessionId and deviceToken")
def hls_stream(self, hls_url):
log.debug("URL={0}".format(hls_url))
yield from HLSStream.parse_variant_playlist(self.session, hls_url).items()
def get_streamer_data(self):
headers = {
"X-Requested-With": "https://sketch.pixiv.net/lives",
}
res = self.session.http.get(self.api_lives, headers=headers)
data = self.session.http.json(res, schema=self._data_lives_schema)
log.debug("Found {0} streams".format(len(data)))
for item in data:
if item["owner"]["user"]["unique_name"] == self.match.group("user"):
return item
raise NoStreamsError(self.url)
def _get_streams(self):
login_session_id = self.get_option("sessionid")
login_device_token = self.get_option("devicetoken")
if self.options.get("purge_credentials"):
self.clear_cookies()
self._authed = False
log.info("All credentials were successfully removed.")
if self._authed:
log.debug("Attempting to authenticate using cached cookies")
elif not self._authed and login_session_id and login_device_token:
self._login_using_session_id_and_device_token(login_session_id, login_device_token)
streamer_data = self.get_streamer_data()
performers = streamer_data.get("performers")
log.trace("{0!r}".format(streamer_data))
if performers:
co_hosts = []
# create a list of all available performers
for p in performers:
co_hosts += [(p["user"]["unique_name"], p["user"]["name"])]
log.info("Available hosts: {0}".format(", ".join(
["{0} ({1})".format(k, v) for k, v in co_hosts])))
# control if the host from --pixiv-performer is valid,
# if not let the User select a different host
if (self.get_option("performer")
and not self.get_option("performer") in [v[0] for v in co_hosts]):
# print the owner as 0
log.info("0 - {0} ({1})".format(
streamer_data["owner"]["user"]["unique_name"],
streamer_data["owner"]["user"]["name"]))
# print all other performer
for i, item in enumerate(co_hosts, start=1):
log.info("{0} - {1} ({2})".format(i, item[0], item[1]))
try:
number = int(self.input_ask(
"Enter the number you'd like to watch").split(" ")[0])
if number == 0:
# default stream
self.set_option("performer", None)
else:
# other co-hosts
self.set_option("performer", co_hosts[number - 1][0])
except FatalPluginError:
raise PluginError("Selected performer is invalid.")
except (IndexError, ValueError, TypeError):
raise PluginError("Input is invalid")
# ignore the owner stream, if a performer is selected
# or use it when there are no other performers
if not self.get_option("performer") or not performers:
return self.hls_stream(streamer_data["owner"]["hls_movie"]["url"])
# play a co-host stream
if performers and self.get_option("performer"):
for p in performers:
if p["user"]["unique_name"] == self.get_option("performer"):
# if someone goes online at the same time as Streamlink
# was used, the hls URL might not be in the JSON data
hls_movie = p.get("hls_movie")
if hls_movie:
return self.hls_stream(hls_movie["url"])
__plugin__ = Pixiv
| {
"content_hash": "be4589bad501a7dac221d716c0ac3e14",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 98,
"avg_line_length": 36.6282722513089,
"alnum_prop": 0.5437392795883362,
"repo_name": "melmorabity/streamlink",
"id": "30bf6bdc0adc2684043c447fad6d6cc49cfb92b1",
"size": "6996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/pixiv.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1537432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
import argparse
import os
import sys
from annotate_source import Annotator
from build_release import Builder
from generate_bom import BomGenerator
from refresh_source import Refresher
from spinnaker.run import check_run_quick
def __annotate_component(annotator, component):
"""Annotate the component's source but don't include it in the BOM.
Returns:
[VersionBump]: Version bump complete with commit hash.
"""
annotator.path = component
annotator.parse_git_tree()
version_bump = annotator.tag_head()
annotator.delete_unwanted_tags()
return version_bump
def __record_halyard_nightly_version(version_bump, options):
"""Record the version and commit hash at which Halyard was built in a bucket.
Assumes that gsutil is installed on the machine this script is run from.
This function uses `gsutil rsync` to read the GCS file, changes it in-place,
and then uses `gsutil rsync` to write the file again. `rsync` is eventually
consistent, so running this script (or manually manipulating the GCS file)
concurrently could likely result in file corruption. Don't parallelize this.
"""
bucket_uri = options.hal_nightly_bucket_uri
build_number = options.build_number
local_bucket_name = os.path.basename(bucket_uri)
# Copy all the bucket contents to local (-r) and get rid of extra stuff (-d).
if not os.path.exists(local_bucket_name):
os.mkdir(local_bucket_name)
check_run_quick('gsutil rsync -r -d {remote_uri} {local_bucket}'
.format(remote_uri=bucket_uri, local_bucket=local_bucket_name))
hal_version = version_bump.version_str.replace('version-', '')
full_hal_version = '{version}-{build}'.format(version=hal_version, build=build_number)
new_hal_nightly_entry = ('{full_hal_version}: {commit}'
.format(full_hal_version=full_hal_version, commit=version_bump.commit_hash))
nightly_entry_file = '{0}/nightly-version-commits.yml'.format(local_bucket_name)
with open(nightly_entry_file, 'a') as nef:
nef.write('{0}\n'.format(new_hal_nightly_entry))
# Now sync the local dir with the bucket again after the update.
check_run_quick('gsutil rsync -r -d {local_bucket} {remote_uri}'
.format(remote_uri=bucket_uri, local_bucket=local_bucket_name))
# Opening with 'w' stomps the old file.
with open(options.output_built_halyard_version, 'w') as hal_version_file:
hal_version_file.write('{}'.format(full_hal_version))
def init_argument_parser(parser):
parser.add_argument('--hal_nightly_bucket_uri', default='',
help='The URI of the bucket to record the version and commit at which we built Halyard.')
parser.add_argument('--output_built_halyard_version', default='',
help='A file path to record the last built Halyard version in.')
# Don't need to init args for Annotator since BomGenerator extends it.
BomGenerator.init_argument_parser(parser)
Builder.init_argument_parser(parser)
def main():
"""Build a Spinnaker release to be validated by Citest.
"""
parser = argparse.ArgumentParser()
init_argument_parser(parser)
options = parser.parse_args()
annotator = Annotator(options)
halyard_bump = __annotate_component(annotator, 'halyard')
bom_generator = BomGenerator(options)
bom_generator.determine_and_tag_versions()
if options.container_builder == 'gcb':
bom_generator.write_container_builder_gcr_config()
elif options.container_builder == 'gcb-trigger':
bom_generator.write_gcb_trigger_version_files()
elif options.container_builder == 'docker':
bom_generator.write_docker_version_files()
else:
raise NotImplementedError('container_builder="{0}"'
.format(options.container_builder))
Builder.do_build(options, build_number=options.build_number,
container_builder=options.container_builder)
# Load version information into memory and write BOM to disk. Don't publish yet.
bom_generator.write_bom()
bom_generator.publish_microservice_configs()
__record_halyard_nightly_version(halyard_bump, options)
bom_generator.publish_boms()
bom_generator.generate_changelog()
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "69f9b6c59db7a3de2b462c3216b641fd",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 111,
"avg_line_length": 42.464646464646464,
"alnum_prop": 0.7140818268315889,
"repo_name": "jtk54/spinnaker",
"id": "44df6441036175e6b02904718c2ce1a63465943e",
"size": "4821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/build_prevalidation.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "8845"
},
{
"name": "Python",
"bytes": "995170"
},
{
"name": "Shell",
"bytes": "217258"
}
],
"symlink_target": ""
} |
"""SPI support for USB-ISS"""
from .usbiss import USBISS
from .usbiss import USBISSError
class SPI(object):
"""SPI operating mode of USBISS
"""
def __init__(self, port, mode=0, max_speed_hz=3000000):
self._usbiss = USBISS(port)
self.sck_divisor = 1
# Select the SPI mode of USB-ISS's SPI operating mode
self.mode = mode
# Select frequency of USB-ISS's SPI operating mode
self.max_speed_hz = max_speed_hz
def open(self):
self._usbiss.open()
def close(self):
self._usbiss.close()
def configure(self):
"""
Configure SPI controller with the SPI mode and operating frequency
"""
# Convert standard SPI sheme to USBISS scheme
lookup_table = [0, 2, 1, 3]
mode = lookup_table[self._mode]
# Add signal for SPI switch
iss_mode = self._usbiss.SPI_MODE + mode
# Configure USB-ISS
self._usbiss.mode = [iss_mode, self.sck_divisor]
@property
def mode(self):
"""
Property that gets / sets the SPI mode as two bit pattern of Clock
Polarity and Phase [CPOL|CPHA].
Standard SPI mode scheme ia used. USBISS SPI mode scheme have 1 and 2
swapped compared with standard scheme.
Emulates spidev.SpiDev.mode
:getter: Gets SPI mode
:setter: Sets SPI mode
:type: int (byte 0xnn)
"""
return self._mode
@mode.setter
def mode(self, val):
if 0 <= val < 4:
self._mode = val
self.configure()
else:
error = (
"The value of SPI mode, {}, is not between 0 and 3".format(val)
)
raise ValueError(error)
@property
def max_speed_hz(self):
"""
Property that gets / sets the maximum bus speed in Hz.
Emulates spidev.SpiDev.max_speed_hz.
:getter: Gets the SPI operating frequency
:setter: Sets the SPI operating frequency
:type: int
"""
return self._max_speed_hz
@max_speed_hz.setter
def max_speed_hz(self, val):
self._max_speed_hz = val
self.sck_divisor = self.iss_spi_divisor(val)
self.configure()
def iss_spi_divisor(self, sck):
"""
Calculate a USBISS SPI divisor value from the input SPI clock speed
:param sck: SPI clock frequency
:type sck: int
:returns: ISS SCK divisor
:rtype: int
"""
_divisor = (6000000 / sck) - 1
divisor = int(_divisor)
if divisor != _divisor:
raise ValueError('Non-integer SCK divisor.')
if not 1 <= divisor < 256:
error = (
"The value of sck_divisor, {}, "
"is not between 0 and 255".format(divisor)
)
raise ValueError(error)
return divisor
def exchange(self, data):
"""
Perform SPI transaction.
The first received byte is either ACK or NACK.
:TODO: enforce rule that up to 63 bytes of data can be sent.
:TODO: enforce rule that there is no gaps in data bytes (what define a gap?)
:param data: List of bytes
:returns: List of bytes
:rtype: List of bytes
"""
self._usbiss.write_data([self._usbiss.SPI_CMD] + data)
response = self._usbiss.read_data(1 + len(data))
if len(response) != 0:
response = self._usbiss.decode(response)
status = response.pop(0)
if status == 0:
raise USBISSError('SPI Transmission Error')
return response
else:
raise USBISSError('SPI Transmission Error: No bytes received!')
def xfer(self, data):
"""
Perform a SPI transection
:param data: List of bytes
:returns: List of bytes
:rtype: List of bytes
"""
return self.exchange(data)
def xfer2(self, data):
"""
Write bytes to SPI device.
:param data: List of bytes
:returns: List of bytes
:rtype: List of bytes
"""
return self.exchange(data)
def readbytes(self, readLen):
"""
Read readLen bytes from SPI device.
:param readLen: Number of bytes
:returns: List of bytes
:rtype: List of bytes
"""
return self.exchange([0] * readLen)
def writebytes(self, data):
"""
Write bytes to SPI device.
:param data: List of bytes
"""
self.exchange(data)
| {
"content_hash": "2a8615740d8dfb6cafcc075c0808817b",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 84,
"avg_line_length": 26.385057471264368,
"alnum_prop": 0.5536920060988891,
"repo_name": "DancingQuanta/pyusbiss",
"id": "8f137506d1161f6e2f09c3a1d79256fc1c04c13f",
"size": "4787",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "usbiss/spi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2363"
},
{
"name": "Python",
"bytes": "13720"
}
],
"symlink_target": ""
} |
"""Support for composite queries without indexes."""
import pickle
from . import common
from .util import patch
from google.appengine.api import datastore
from google.appengine.datastore import datastore_index
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import datastore_query
class _FakeBatch(object):
"""A fake datastore_query.Batch that returns canned results.
This class intentionally does not inherit from datastore_query.Batch because
the default implementation is more likely to hurt than to help.
Attributes:
results: The list of results (entities or keys).
"""
def __init__(self, results):
self.results = results
class _FakeBatcher(datastore_query.Batcher):
"""A fake datastore_query.Batcher that consists of a single _FakeBatch."""
def __init__(self, results):
self._batch = _FakeBatch(results)
def next_batch(self, unused_min_batch_size): # pylint: disable-msg=C6409
"""Return the next batch, or None if no more batches remain."""
batch = self._batch
self._batch = None # future calls will return None
return batch
def _WidenQueryProto(query_pb):
"""Return a simple query that is a superset of the requested query.
Args:
query_pb: A datastore_pb.Query object that requires a composite index.
Returns:
A datastore_pb.Query object that does not require a composit index, or
None if the original query cannot be widened.
"""
# Check for features that cannot be handled.
if (query_pb.has_compiled_cursor() or
query_pb.has_end_compiled_cursor()):
return None
# Assume that most fields carry over intact.
wide_pb = datastore_pb.Query()
wide_pb.CopyFrom(query_pb)
# Remove any offset/limit since we'll apply those later.
wide_pb.clear_offset()
wide_pb.clear_limit()
# Only keep EQUAL filters.
eq_filters = [f for f in query_pb.filter_list() if f.op == f.EQUAL]
wide_pb.clear_filter()
for f in eq_filters:
wide_pb.add_filter().CopyFrom(f)
# Remove orders.
#
# TODO: technically we could support a single ascending
# order, but since we're going to buffer everything in memory it
# doesn't matter if we leave any orders in the widened query. If in
# the future we stream results for queries that are only widened due
# to filters then it might be beneficial to leave the orders intact
# if they consist of a single ascending order.
wide_pb.clear_order()
# The keys-only field must be set to False since the full entities are
# requires for post-processing.
wide_pb.set_keys_only(False)
return wide_pb
@patch.NeedsOriginal
def _CustomQueryRun(original, query, conn, query_options=None):
"""Patched datastore_query.Query.run() method."""
query_pb = query._to_pb(conn, query_options) # pylint: disable-msg=W0212
# Check if composite index is required.
req, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query_pb)
if req:
# Keep track of the composite index for generation of index.yaml text.
props = datastore_index.GetRecommendedIndexProperties(props)
index_yaml = datastore_index.IndexYamlForQuery(kind, ancestor, props)
_RecordIndex(index_yaml)
wide_pb = _WidenQueryProto(query_pb)
if wide_pb is not None:
# pylint: disable-msg=W0212
wide_query = datastore_query.Query._from_pb(wide_pb)
# TODO: query_options are ignored here since we pass None.
# It might be possible to pass query_options through - future
# investigation is required.
batcher = original(wide_query, conn, None)
results = []
for batch in batcher:
results.extend([entity.ToPb() for entity in batch.results])
# Apply the original query and slice.
results = datastore_query.apply_query(query, results)
offset = query_options.offset or 0
limit = query_options.limit
if limit is None:
limit = len(results)
results = results[offset:offset+limit]
# Convert protos to to entities or keys.
if query_pb.keys_only():
results = [datastore.Entity.FromPb(pb).key() for pb in results]
else:
results = [datastore.Entity.FromPb(pb) for pb in results]
return _FakeBatcher(results)
# The query is either a simple query or a composite query that cannot be
# widened - invoke the normal Query.run() implementation and let it fulfill
# the request or raise an exception.
return original(query, conn, query_options=query_options)
def CompositeQueryPatch():
"""Return a Patch that enables composite queries without indexes."""
return patch.AttributePatch(datastore_query.Query, 'run', _CustomQueryRun)
def _WriteIndexes(indexes):
"""Persist the set of index entries.
Args:
indexes: A set of strings, each of which defines a composite index.
"""
encoded = pickle.dumps(indexes, pickle.HIGHEST_PROTOCOL)
common.SetPersistent(common.PERSIST_INDEX_NAME, encoded)
def _ReadIndexes():
"""Retrieve the set of index entries.
Returns:
A set of strings, each of which defines a composite index.
"""
encoded = common.GetPersistent(common.PERSIST_INDEX_NAME)
if encoded is not None:
try:
return pickle.loads(encoded)
except Exception: # pylint: disable-msg=W0703
pass
return set()
def _RecordIndex(index):
"""Add the index spec (a string) to the set of indexes used."""
indexes = _ReadIndexes()
indexes.add(index)
_WriteIndexes(indexes)
def ClearIndexYaml():
"""Reset the index.yaml data to contain no indexes."""
_WriteIndexes(set())
def GetIndexYaml():
"""Retrieve the specifications for all composite indexes used so far.
Returns:
A string suitable for use in an index.yaml file.
"""
indexes = _ReadIndexes()
return 'indexes:\n\n' + '\n\n'.join(sorted(indexes))
| {
"content_hash": "1cf5d4a0c0e5ca8443e9c591e0538243",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 79,
"avg_line_length": 31.85164835164835,
"alnum_prop": 0.7088149042608246,
"repo_name": "googlearchive/mimic",
"id": "bbe9a376c65fd07b6ff11358461e4c5d70caee1b",
"size": "6394",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__mimic/composite_query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4076"
},
{
"name": "Python",
"bytes": "285112"
},
{
"name": "Shell",
"bytes": "1058"
}
],
"symlink_target": ""
} |
"""
@file
@brief Creates a custom log (open a text file and flushes everything in it).
"""
import datetime
import os
class CustomLog:
"""
Implements a custom logging function.
This class is not protected against multithreading.
Usage:
::
clog = CustomLog("folder")
clog('[fct]', info)
"""
def __init__(self, folder=None, filename=None, create=True, parent=None):
"""
initialisation
@param folder folder (created if not exists)
@param filename new filename
@param create force the creation
@param parent logging function (called after this one if not None)
"""
folder = os.path.abspath(folder)
self._folder = folder
self._parent = parent
if not os.path.exists(folder):
os.makedirs(folder) # pragma: no cover
typstr = str
if filename is None:
i = 0
filename = "log_custom_%03d.txt" % i
fullpath = os.path.join(folder, filename)
while os.path.exists(fullpath):
i += 1
filename = "custom_log_%03d.txt" % i
fullpath = os.path.join(folder, filename)
self._filename = filename
self._fullpath = fullpath
self._handle = open(self._fullpath, "w", encoding="utf-8")
self._close = True
elif isinstance(filename, typstr):
self._filename = filename
self._fullpath = os.path.join(folder, filename)
self._handle = open(self._fullpath, "w", encoding="utf-8")
self._close = True
else:
self._handle = filename
self._close = False
self._filename = None
self._fullpath = None
@property
def filename(self):
"""
returns *_filename*
"""
return self._filename
@property
def fullpath(self):
"""
returns *_fullpath*
"""
return self._fullpath
def __del__(self):
"""
Closes the stream if needed.
"""
if self._close:
self._handle.close()
def __call__(self, *args, **kwargs):
"""
Log anything.
"""
self.fLOG(*args, **kwargs)
if self._parent is not None:
self._parent(*args, **kwargs)
def fLOG(self, *args, **kwargs):
"""
Builds a message on a single line with the date, it deals with encoding issues.
@param args list of fields
@param kwargs dictionary of fields
"""
dt = datetime.datetime(2009, 1, 1).now()
typstr = str
if len(args) > 0:
def _str_process(s):
if isinstance(s, str):
return s
if isinstance(s, bytes):
return s.decode("utf8") # pragma: no cover
try:
return str(s)
except Exception as e: # pragma: no cover
raise Exception(
f"Unable to convert s into string: type(s)={type(s)!r}") from e
message = (str(dt).split(".", maxsplit=1)[0] + " " +
" ".join([_str_process(s) for s in args]) + "\n")
self._handle.write(message)
st = " "
else:
st = typstr(dt).split(".", maxsplit=1)[0] + " " # pragma: no cover
for k, v in kwargs.items():
message = st + \
"%s = %s%s" % (
typstr(k), typstr(v), "\n")
if "INNER JOIN" in message:
break # pragma: no cover
self._handle.write(message)
self._handle.flush()
| {
"content_hash": "6798601426a2ee2f444a303264d3cec0",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 88,
"avg_line_length": 30.75,
"alnum_prop": 0.48623131392604246,
"repo_name": "sdpython/pyquickhelper",
"id": "7231acd344608ee0ad43d064161c163c091c0221",
"size": "3837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/pyquickhelper/loghelper/custom_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1362"
},
{
"name": "CSS",
"bytes": "164881"
},
{
"name": "HTML",
"bytes": "70034"
},
{
"name": "JavaScript",
"bytes": "273028"
},
{
"name": "Jupyter Notebook",
"bytes": "4659927"
},
{
"name": "Python",
"bytes": "3099479"
},
{
"name": "SCSS",
"bytes": "65612"
},
{
"name": "Sass",
"bytes": "11826"
},
{
"name": "Shell",
"bytes": "694"
},
{
"name": "Smarty",
"bytes": "27674"
},
{
"name": "TeX",
"bytes": "22447"
}
],
"symlink_target": ""
} |
"""
wasi-sdk installation and maintenance
"""
from mod import log, wasisdk
def run(fips_dir, proj_dir, args):
if len(args) > 0:
cmd = args[0]
if cmd == 'install':
wasisdk.install(fips_dir)
elif cmd == 'uninstall':
wasisdk.uninstall(fips_dir)
else:
log.error("unknown subcommand '{}' (run './fips help wasisdk')".format(cmd))
else:
log.error("expected a subcommand (install or uninstall)")
def help():
log.info(log.YELLOW +
"fips wasisdk install\n"
"fips wasisdk uninstall\n"
+ log.DEF +
" install or uninstall the WASI SDK")
| {
"content_hash": "351ba0edf65b5e7460424fbb5a85e776",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 88,
"avg_line_length": 29.652173913043477,
"alnum_prop": 0.5454545454545454,
"repo_name": "floooh/fips",
"id": "fcc6b82cb357e436d6176f908968f3a6519d9ee3",
"size": "682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "verbs/wasisdk.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "34"
},
{
"name": "CMake",
"bytes": "126512"
},
{
"name": "Java",
"bytes": "162"
},
{
"name": "Python",
"bytes": "648132"
},
{
"name": "Shell",
"bytes": "2581"
},
{
"name": "Vim Script",
"bytes": "211"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.