text stringlengths 4 1.02M | meta dict |
|---|---|
import pytest
from baluster import Baluster, placeholders
class CompositeRootCase(Baluster):
_value = 0
_closed = False
_closed_resources = None
place = placeholders.value()
place_with_default = placeholders.value(1)
place_with_callable = placeholders.value(lambda: 1)
@placeholders.factory
def value(self, root):
return self._value
@placeholders.factory
def value_plus_100(self, root):
return self._value + 100
@placeholders.factory(cache=False)
def value_no_cache(self, root):
self._value += 1
return self._value
@placeholders.factory(readonly=True)
def value_readonly(self, root):
return self._value
@placeholders.factory
def resource_1(self, root):
return 1
@resource_1.close
def _close_resource_1(self, root, resource):
if self._closed_resources is None:
self._closed_resources = []
self._closed_resources.append(resource)
@placeholders.factory(cache=False)
def resource_2(self, root):
return 2
@resource_2.close
def _close_resource_2(self, root, resource):
if self._closed_resources is None:
self._closed_resources = []
self._closed_resources.append(resource)
class TestPlaceholder:
def test_getter(self):
root = CompositeRootCase()
with pytest.raises(AttributeError):
assert root.place is None
root.place = 'New value'
assert root.place == 'New value'
assert root.place_with_default == 1
assert root.place_with_callable == 1
def test_class_level_access(self):
assert CompositeRootCase.place._name == 'place'
def test_set_new_value(self):
root = CompositeRootCase()
root.place = 'New value'
with pytest.raises(AttributeError):
root.place = 'Other value'
class TestBaluster:
def test_sanity(self):
obj = CompositeRootCase()
assert obj.value == 0
def test_class_level_access(self):
assert CompositeRootCase.value._name == 'value'
def test_closed_without_invoke(self):
obj = CompositeRootCase()
exception_raised = False
try:
with obj.enter():
raise ZeroDivisionError()
except ZeroDivisionError:
exception_raised = True
assert obj._closed_resources is None
assert exception_raised is True
def test_closed_with_invoke(self):
obj = CompositeRootCase()
exception_raised = False
try:
with obj.enter() as o:
o.resource_1
o.resource_2
o.resource_2
raise ZeroDivisionError()
except ZeroDivisionError:
exception_raised = True
assert exception_raised is True
def test_no_cahce(self):
obj = CompositeRootCase()
assert obj.value == 0
assert obj.value_no_cache == 1
assert obj.value_no_cache == 2
assert obj.value == 0
def test_set_new_value(self):
obj = CompositeRootCase()
obj.value = 3
assert obj.value == 3
assert obj.value_no_cache == 1
with pytest.raises(AttributeError):
obj.value = 5
def test_readonly(self):
obj = CompositeRootCase()
with pytest.raises(AttributeError):
obj.value_readonly = 3
def test_cannot_be_deleted(self):
obj = CompositeRootCase()
with pytest.raises(AttributeError):
del obj.value
def test_copy(self):
obj = CompositeRootCase()
obj.value = 3
assert obj.value == 3
copy = obj.partial_copy()
assert copy.value == 0
def test_copy_keep_values(self):
obj = CompositeRootCase()
obj.resource_1
obj.resource_2
copy = obj.partial_copy('resource_1', 'unknown')
copy.close()
assert copy._closed_resources is None
assert obj._closed_resources is None
def test_close_action_copy(self):
root = CompositeRootCase()
with root.enter() as obj:
obj.resource_1
obj.resource_2
assert obj._closed_resources == [2, 1]
| {
"content_hash": "90c849fe83caca80bd377bbaa7887af1",
"timestamp": "",
"source": "github",
"line_count": 170,
"max_line_length": 56,
"avg_line_length": 25.029411764705884,
"alnum_prop": 0.5967097532314923,
"repo_name": "palankai/baluster",
"id": "913a6e2f7120460145669d7cfc28137b509c6108",
"size": "4255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_holder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42504"
}
],
"symlink_target": ""
} |
"""Tests for process_profiles.py."""
import collections
import unittest
import process_profiles
from test_utils import (ProfileFile,
SimpleTestSymbol,
TestSymbolOffsetProcessor,
TestProfileManager)
class ProcessProfilesTestCase(unittest.TestCase):
START_SYMBOL = 'linker_script_start_of_text'
def setUp(self):
self.symbol_0 = SimpleTestSymbol(self.START_SYMBOL, 0, 0)
self.symbol_1 = SimpleTestSymbol('1', 6, 16)
self.symbol_2 = SimpleTestSymbol('2', 32, 8)
self.symbol_3 = SimpleTestSymbol('3', 40, 12)
self.offset_to_symbol_info = (
[None] * 3 + [self.symbol_1] * 8 + [None] * 5 + [self.symbol_2] * 4 +
[self.symbol_3] * 6)
self.symbol_infos = [self.symbol_0, self.symbol_1,
self.symbol_2, self.symbol_3]
self._file_counter = 0
def MakeAnnotatedOffset(self, offset, counts):
ao = process_profiles.ProfileManager.AnnotatedOffset(offset)
ao._count = counts
return ao
def testGetOffsetToSymbolInfo(self):
processor = TestSymbolOffsetProcessor(self.symbol_infos)
self.assertListEqual(self.offset_to_symbol_info,
processor.GetDumpOffsetToSymbolInfo())
def testOverlappingSymbols(self):
symbol_1 = SimpleTestSymbol(self.START_SYMBOL, 6, 8)
symbol_2 = SimpleTestSymbol('2', 10, 10)
processor = TestSymbolOffsetProcessor([symbol_1, symbol_2])
self.assertListEqual([symbol_1] * 4 + [symbol_2] * 3,
processor.GetDumpOffsetToSymbolInfo())
def testSymbolsBeforeStart(self):
self.symbol_infos = [SimpleTestSymbol(s.name, s.offset + 8, s.size)
for s in self.symbol_infos]
self.symbol_infos.append(SimpleTestSymbol('early', 0, 4))
processor = TestSymbolOffsetProcessor(self.symbol_infos)
self.assertRaises(AssertionError, processor.GetDumpOffsetToSymbolInfo)
def testGetReachedOffsetsFromDump(self):
processor = TestSymbolOffsetProcessor(self.symbol_infos)
# 2 hits for symbol_1, 0 for symbol_2, 1 for symbol_3
dump = [8, 12, 48]
reached = processor.GetReachedOffsetsFromDump(dump)
self.assertListEqual([self.symbol_1.offset, self.symbol_3.offset], reached)
# Ordering matters, no repetitions
dump = [48, 12, 8, 12, 8, 16]
reached = processor.GetReachedOffsetsFromDump(dump)
self.assertListEqual([self.symbol_3.offset, self.symbol_1.offset], reached)
def testSymbolNameToPrimary(self):
symbol_infos = [SimpleTestSymbol('1', 8, 16),
SimpleTestSymbol('AnAlias', 8, 16),
SimpleTestSymbol('Another', 40, 16)]
processor = TestSymbolOffsetProcessor(symbol_infos)
self.assertDictEqual({8: symbol_infos[0],
40: symbol_infos[2]}, processor.OffsetToPrimaryMap())
def testGetOrderedSymbols(self):
processor = TestSymbolOffsetProcessor(self.symbol_infos)
self.assertListEqual(['1', '3', self.START_SYMBOL],
processor.GetOrderedSymbols([7, 41, 5, 0]))
def testOffsetToSymbolsMap(self):
symbol_infos = [SimpleTestSymbol('1', 8, 16),
SimpleTestSymbol('AnAlias', 8, 16),
SimpleTestSymbol('Another', 40, 16)]
processor = TestSymbolOffsetProcessor(symbol_infos)
self.assertDictEqual({8: [symbol_infos[0], symbol_infos[1]],
40: [symbol_infos[2]]},
processor.OffsetToSymbolsMap())
def testPrimarySizeMismatch(self):
symbol_infos = [SimpleTestSymbol('1', 8, 16),
SimpleTestSymbol('AnAlias', 8, 32)]
processor = TestSymbolOffsetProcessor(symbol_infos)
self.assertRaises(AssertionError, processor.OffsetToPrimaryMap)
symbol_infos = [SimpleTestSymbol('1', 8, 0),
SimpleTestSymbol('2', 8, 32),
SimpleTestSymbol('3', 8, 32),
SimpleTestSymbol('4', 8, 0),]
processor = TestSymbolOffsetProcessor(symbol_infos)
self.assertDictEqual({8: symbol_infos[1]}, processor.OffsetToPrimaryMap())
def testMatchSymbols(self):
symbols = [SimpleTestSymbol('W', 30, 10),
SimpleTestSymbol('Y', 60, 5),
SimpleTestSymbol('X', 100, 10)]
processor = TestSymbolOffsetProcessor(symbols)
self.assertListEqual(sorted(symbols[1:3]),
processor.MatchSymbolNames(['Y', 'X']))
def testSymbolsSize(self):
symbols = [SimpleTestSymbol('W', 10, 1),
SimpleTestSymbol('X', 20, 2),
SimpleTestSymbol('Y', 30, 4),
SimpleTestSymbol('Z', 40, 8)]
processor = TestSymbolOffsetProcessor(symbols)
self.assertEqual(13, processor.SymbolsSize(['W', 'Y', 'Z']))
def testMedian(self):
self.assertEquals(None, process_profiles._Median([]))
self.assertEquals(5, process_profiles._Median([5]))
self.assertEquals(5, process_profiles._Median([1, 5, 20]))
self.assertEquals(5, process_profiles._Median([4, 6]))
self.assertEquals(5, process_profiles._Median([1, 4, 6, 100]))
self.assertEquals(5, process_profiles._Median([1, 4, 5, 6, 100]))
def testRunGroups(self):
files = [ProfileFile(40, 0), ProfileFile(100, 0),
ProfileFile(200, 1), ProfileFile(35, 1),
ProfileFile(42, 0), ProfileFile(95, 0)]
mgr = process_profiles.ProfileManager(files)
mgr._ComputeRunGroups()
self.assertEquals(3, len(mgr._run_groups))
self.assertEquals(3, len(mgr._run_groups[0].Filenames()))
self.assertEquals(2, len(mgr._run_groups[1].Filenames()))
self.assertEquals(1, len(mgr._run_groups[2].Filenames()))
self.assertTrue(files[0] in mgr._run_groups[0].Filenames())
self.assertTrue(files[3] in mgr._run_groups[0].Filenames())
self.assertTrue(files[4] in mgr._run_groups[0].Filenames())
self.assertTrue(files[1] in mgr._run_groups[1].Filenames())
self.assertTrue(files[5] in mgr._run_groups[1].Filenames())
self.assertTrue(files[2] in mgr._run_groups[2].Filenames())
def testRunGroupSanity(self):
files = []
# Generate 20 sets of files in groups separated by 60s.
for ts_base in range(0, 20):
ts = ts_base * 60
files.extend([ProfileFile(ts, 0, 'browser'),
ProfileFile(ts + 1, 0, 'renderer'),
ProfileFile(ts + 2, 1, 'browser'),
ProfileFile(ts + 3, 0, 'gpu'),
ProfileFile(ts + 2, 1, 'renderer'),
ProfileFile(ts + 5, 1, 'gpu')])
# The following call should not assert.
process_profiles.ProfileManager(files)._ComputeRunGroups()
files.extend([
ProfileFile(20 * 60, 0, 'browser'),
ProfileFile(20 * 60 + 2, 1, 'renderer'),
ProfileFile(21 * 60, 0, 'browser')
] + [ProfileFile(22 * 60, 0, 'renderer') for _ in range(0, 10)])
self.assertRaises(AssertionError,
process_profiles.ProfileManager(files)._ComputeRunGroups)
def testReadOffsets(self):
mgr = TestProfileManager({
ProfileFile(30, 0): [1, 3, 5, 7],
ProfileFile(40, 1): [8, 10],
ProfileFile(50, 0): [13, 15]})
self.assertListEqual([1, 3, 5, 7, 8, 10, 13, 15],
mgr.GetMergedOffsets())
self.assertListEqual([8, 10], mgr.GetMergedOffsets(1))
self.assertListEqual([], mgr.GetMergedOffsets(2))
def testRunGroupOffsets(self):
mgr = TestProfileManager({
ProfileFile(30, 0): [1, 2, 3, 4],
ProfileFile(150, 0): [9, 11, 13],
ProfileFile(40, 1): [5, 6, 7]})
offsets_list = mgr.GetRunGroupOffsets()
self.assertEquals(2, len(offsets_list))
self.assertListEqual([1, 2, 3, 4, 5, 6, 7], offsets_list[0])
self.assertListEqual([9, 11, 13], offsets_list[1])
offsets_list = mgr.GetRunGroupOffsets(0)
self.assertEquals(2, len(offsets_list))
self.assertListEqual([1, 2, 3, 4], offsets_list[0])
self.assertListEqual([9, 11, 13], offsets_list[1])
offsets_list = mgr.GetRunGroupOffsets(1)
self.assertEquals(2, len(offsets_list))
self.assertListEqual([5, 6, 7], offsets_list[0])
self.assertListEqual([], offsets_list[1])
def testSorted(self):
# The fact that the ProfileManager sorts by filename is implicit in the
# other tests. It is tested explicitly here.
mgr = TestProfileManager({
ProfileFile(40, 0): [1, 2, 3, 4],
ProfileFile(150, 0): [9, 11, 13],
ProfileFile(30, 1): [5, 6, 7]})
offsets_list = mgr.GetRunGroupOffsets()
self.assertEquals(2, len(offsets_list))
self.assertListEqual([5, 6, 7, 1, 2, 3, 4], offsets_list[0])
def testPhases(self):
mgr = TestProfileManager({
ProfileFile(40, 0): [],
ProfileFile(150, 0): [],
ProfileFile(30, 1): [],
ProfileFile(30, 2): [],
ProfileFile(30, 0): []})
self.assertEquals(set([0,1,2]), mgr.GetPhases())
def testGetAnnotatedOffsets(self):
mgr = TestProfileManager({
ProfileFile(40, 0, ''): [1, 2, 3],
ProfileFile(50, 1, ''): [3, 4, 5],
ProfileFile(51, 0, 'renderer'): [2, 3, 6],
ProfileFile(51, 1, 'gpu-process'): [6, 7],
ProfileFile(70, 0, ''): [2, 8, 9],
ProfileFile(70, 1, ''): [9]})
offsets = list(mgr.GetAnnotatedOffsets())
self.assertListEqual([
self.MakeAnnotatedOffset(1, {(0, 'browser'): 1}),
self.MakeAnnotatedOffset(2, {(0, 'browser'): 2,
(0, 'renderer'): 1}),
self.MakeAnnotatedOffset(3, {(0, 'browser'): 1,
(1, 'browser'): 1,
(0, 'renderer'): 1}),
self.MakeAnnotatedOffset(4, {(1, 'browser'): 1}),
self.MakeAnnotatedOffset(5, {(1, 'browser'): 1}),
self.MakeAnnotatedOffset(6, {(0, 'renderer'): 1,
(1, 'gpu-process'): 1}),
self.MakeAnnotatedOffset(7, {(1, 'gpu-process'): 1}),
self.MakeAnnotatedOffset(8, {(0, 'browser'): 1}),
self.MakeAnnotatedOffset(9, {(0, 'browser'): 1,
(1, 'browser'): 1})],
offsets)
self.assertListEqual(['browser', 'renderer'],
sorted(offsets[1].Processes()))
self.assertListEqual(['browser'], list(offsets[0].Processes()))
self.assertListEqual([0], list(offsets[1].Phases()))
self.assertListEqual([0, 1], sorted(offsets[2].Phases()))
self.assertListEqual([0, 1], sorted(mgr.GetPhases()))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "b9372c93e2ccc5711e2552dbc9f82efe",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 79,
"avg_line_length": 43.390946502057616,
"alnum_prop": 0.6067905918057663,
"repo_name": "ric2b/Vivaldi-browser",
"id": "5c07797d50848ffd9f7017f2734949ba102b5372",
"size": "10730",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "chromium/tools/cygprofile/process_profiles_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Stuff that everybody else needs to know about."""
PLATFORM_WINDOWS_STARTS_WITH = 'win32'
PLATFORM_LINUX_STARTS_WITH = 'linux'
PLATFORM_MACOS_STARTS_WITH = 'darwin'
SEPRTR = ':' # Delimiter between library:component, distributor:field, etc.
# Default language used by GUI and spreadsheet generation and number presentation.
DEFAULT_LANGUAGE = 'en_US'
DEFAULT_CURRENCY = 'USD' # Default currency assigned.
# Default maximum column width for the cell adjust
DEF_MAX_COLUMN_W = 32
# Error codes
ERR_INTERNAL = 1 # Unhandled exceptions
ERR_ARGS = 2 # Command line arguments
ERR_KICADCONFIG = 3 # An error related to KiCad configuration
ERR_KICOSTCONFIG = 4 # An error related to KiCost configuration
ERR_SCRAPE = 5 # Error trying to get prices
ERR_INPUTFILE = 6 # Error parsing input files
ERR_FIELDS = 7 # Some inconsistency with the fields
# Warning codes
W_TRANS = '(WC001) ' # Problem with field translate
W_NOMANP = '(WC002) ' # No manf# or distributor#
W_CONF = '(WC003) ' # Problem during --un/setup
W_NOPURCH = '(WC004) ' # No valid field for purchase
W_NOQTY = '(WC005) ' # No valid qty for purchase
W_ASSQTY = '(WC006) ' # Assigned qty during scrape
W_NOINFO = '(WC007) ' # No info during scrape
NO_PRICE = '(WC008) ' # No price during scrape
W_BADPRICE = '(WC009) ' # Some problem with the local price tier
W_FLDOVR = '(WC010) ' # Field overwrite
W_DUPWRONG = '(WC011) ' # Inconsistency in duplicated data
W_INCQTY = '(WC012) ' # Inconsistency in qty
W_REPMAN = '(WC013) ' # Asking to repeat a manufacturer
W_MANQTY = '(WC014) ' # Malformed manf#_qty
W_AMBIPN = '(WC015) ' # Ambiguous mpn, needs better manf
W_LOCFAIL = '(WC016) ' # Failed to set the locale
W_APIFAIL = '(WC017) ' # Failed to init an API
W_CONFIG = '(WC018) ' # Config file warning
W_CMDLINE = '(WC019) ' # Command line warning
W_NOAPI = '(WC020) ' # No API with this name
# Data types for the options common to various APIs
BASE_OP_TYPES = {'enable': bool, 'cache_ttl': (int, float), 'cache_path': str}
class wxPythonNotPresent(Exception):
'''Exception for failed retrieval of an HTML parse tree for a part.'''
pass
class KiCostError(Exception):
'''Exception for any error while running kicost().'''
def __init__(self, msg, id):
super(self.__class__, self).__init__(msg)
self.msg = msg
self.id = id
class DistData(object):
'''@brief Data from a distributor related to a part.'''
def __init__(self):
self.part_num = None # Distributor catalogue number.
self.url = None # Purchase distributor URL for the spefic part.
self.price_tiers = {} # Price break tiers; [[qty1, price1][qty2, price2]...]
self.qty_avail = None # Available quantity.
self.qty_avail_comment = None # A comment about the qty_avail field (optional)
self.qty_increment = None
# self.info_dist = None # Currently unused.
self.currency = None # Default currency.
self.moq = None # Minimum order quantity allowd by the distributor.
self.extra_info = {}
# Class for storing part group information.
class PartGroup(object):
'''@brief Class to group components.'''
def __init__(self):
# None by default, here to avoid try/except in the code
self.datasheet = None
self.lifecycle = None
self.specs = {} # Miscellaneous data from the queries
self.min_price = None # Filled by the spreadsheet code, expressed in the main currency
# Values derived from manf#_qty
self.qty = None # Quantity for each project, just a number if only 1 project
self.qty_str = None # Formulas to compute the quantity in the spreadsheet
self.qty_total_spreadsheet = 0 # Total quantity for all projects for the spreadsheet
# Distributor data
self.dd = {}
def update_specs(self, specs):
for code, inf in specs.items():
name, value = inf
if code in self.specs:
# Already here
old_name, old_value = self.specs[code]
if name not in old_name:
name = old_name + ', ' + name
if old_value is not None and value not in old_value:
value = old_value + ', ' + value
self.specs[code] = (name, value)
| {
"content_hash": "f76445366390b649ae1f7426cf24df27",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 95,
"avg_line_length": 41.22857142857143,
"alnum_prop": 0.6465696465696466,
"repo_name": "hildogjr/KiCost",
"id": "b3dd191bac1943bb3b45865b6e5124bdf5dae78f",
"size": "5495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kicost/global_vars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1112"
},
{
"name": "HTML",
"bytes": "595"
},
{
"name": "Makefile",
"bytes": "2836"
},
{
"name": "Python",
"bytes": "554207"
},
{
"name": "Shell",
"bytes": "11027"
}
],
"symlink_target": ""
} |
def extractTheabyssdreamsBlogspotCom(item):
'''
Parser for 'theabyssdreams.blogspot.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
| {
"content_hash": "1491907a8f3b8a964f5df40b99088fc9",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 26.904761904761905,
"alnum_prop": 0.6389380530973451,
"repo_name": "fake-name/ReadableWebProxy",
"id": "131ae392887c6d395cf4873fda15bb456be94dfb",
"size": "566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractTheabyssdreamsBlogspotCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
import os
import gym
import numpy as np
import tensorflow as tf
from gym import spaces
from collections import deque
def sample(logits):
noise = tf.random_uniform(tf.shape(logits))
return tf.argmax(logits - tf.log(-tf.log(noise)), 1)
def cat_entropy(logits):
a0 = logits - tf.reduce_max(logits, 1, keep_dims=True)
ea0 = tf.exp(a0)
z0 = tf.reduce_sum(ea0, 1, keep_dims=True)
p0 = ea0 / z0
return tf.reduce_sum(p0 * (tf.log(z0) - a0), 1)
def cat_entropy_softmax(p0):
return - tf.reduce_sum(p0 * tf.log(p0 + 1e-6), axis = 1)
def mse(pred, target):
return tf.square(pred-target)/2.
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def conv(x, scope, nf, rf, stride, pad='VALID', act=tf.nn.relu, init_scale=1.0):
with tf.variable_scope(scope):
nin = x.get_shape()[3].value
w = tf.get_variable("w", [rf, rf, nin, nf], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nf], initializer=tf.constant_initializer(0.0))
z = tf.nn.conv2d(x, w, strides=[1, stride, stride, 1], padding=pad)+b
h = act(z)
return h
def fc(x, scope, nh, act=tf.nn.relu, init_scale=1.0):
with tf.variable_scope(scope):
nin = x.get_shape()[1].value
w = tf.get_variable("w", [nin, nh], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh], initializer=tf.constant_initializer(0.0))
z = tf.matmul(x, w)+b
h = act(z)
return h
def batch_to_seq(h, nbatch, nsteps, flat=False):
if flat:
h = tf.reshape(h, [nbatch, nsteps])
else:
h = tf.reshape(h, [nbatch, nsteps, -1])
return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=1), [-1])
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
def _ln(x, g, b, e=1e-5, axes=[1]): # Comments by Fei: In total, this function does re-centering and re-scaling
u, s = tf.nn.moments(x, axes=axes, keep_dims=True) # Comments by Fei: u and s are mean and variance, each size nenv vector
x = (x-u)/tf.sqrt(s+e) # Comments by Fei: this is just normalization
x = x*g+b # operations here must be element wise with broadcast on dim 0, which is re-centering and re-scaling
return x
def lnlstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()] # Comments by Fei: nbatch = nenv, nin = 512, nh = nlstm
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
gx = tf.get_variable("gx", [nh*4], initializer=tf.constant_initializer(1.0))
bx = tf.get_variable("bx", [nh*4], initializer=tf.constant_initializer(0.0))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
gh = tf.get_variable("gh", [nh*4], initializer=tf.constant_initializer(1.0))
bh = tf.get_variable("bh", [nh*4], initializer=tf.constant_initializer(0.0))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
gc = tf.get_variable("gc", [nh], initializer=tf.constant_initializer(1.0))
bc = tf.get_variable("bc", [nh], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s) # Comments by Fei: c h are both nenv * nlstm
for idx, (x, m) in enumerate(zip(xs, ms)): # Comments by Fei: xs is list of nstep, each value nenv * 512, ms is list of nstep, each value (nenv)
c = c*(1-m) # Comments by Fei: so x is nenv * 512(nin), m is nenv vector, not sure but this has to be broadcast + element_wise
h = h*(1-m)
z = _ln(tf.matmul(x, wx), gx, bx) + _ln(tf.matmul(h, wh), gh, bh) + b # Comments by Fei: z is nenv by 4*nlstm
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z) # Comments by Fei, then z is split into 4 gates, each nenv * nlstm
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u # Comments by Fei: gated operation of the next c (this is the flow through state of LSTM)
h = o*tf.tanh(_ln(c, gc, bc)) # Comments by Fei: gated operation of the next h (this is the output state of LSTM)
xs[idx] = h # Comments by Fei: xs accumuates all output states for return
s = tf.concat(axis=1, values=[c, h]) # Comments by Fei: this is the final LSTM states (both flow through and output)
return xs, s
def conv_to_fc(x):
nh = np.prod([v.value for v in x.get_shape()[1:]])
x = tf.reshape(x, [-1, nh])
return x
def discount_with_dones(rewards, dones, gamma):
discounted = []
r = 0
for reward, done in zip(rewards[::-1], dones[::-1]):
r = reward + gamma*r*(1.-done) # fixed off by one bug
discounted.append(r)
return discounted[::-1]
def find_trainable_variables(key):
with tf.variable_scope(key):
return tf.trainable_variables()
def make_path(f):
return os.makedirs(f, exist_ok=True)
def constant(p):
return 1
def linear(p):
return 1-p
schedules = {
'linear':linear,
'constant':constant
}
class Scheduler(object):
def __init__(self, v, nvalues, schedule):
self.n = 0.
self.v = v
self.nvalues = nvalues
self.schedule = schedules[schedule]
def value(self):
current_value = self.v*self.schedule(self.n/self.nvalues)
self.n += 1.
return current_value
def value_steps(self, steps):
return self.v*self.schedule(steps/self.nvalues)
class EpisodeStats:
def __init__(self, nsteps, nenvs):
self.episode_rewards = []
for i in range(nenvs):
self.episode_rewards.append([])
self.lenbuffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.nsteps = nsteps
self.nenvs = nenvs
def feed(self, rewards, masks):
rewards = np.reshape(rewards, [self.nenvs, self.nsteps])
masks = np.reshape(masks, [self.nenvs, self.nsteps])
for i in range(0, self.nenvs):
for j in range(0, self.nsteps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
l = len(self.episode_rewards[i])
s = sum(self.episode_rewards[i])
self.lenbuffer.append(l)
self.rewbuffer.append(s)
self.episode_rewards[i] = []
def mean_length(self):
if self.lenbuffer:
return np.mean(self.lenbuffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
# For ACER
def get_by_index(x, idx):
assert(len(x.get_shape()) == 2)
assert(len(idx.get_shape()) == 1)
idx_flattened = tf.range(0, x.shape[0]) * x.shape[1] + idx
y = tf.gather(tf.reshape(x, [-1]), # flatten input
idx_flattened) # use flattened indices
return y
def check_shape(ts,shapes):
i = 0
for (t,shape) in zip(ts,shapes):
assert t.get_shape().as_list()==shape, "id " + str(i) + " shape " + str(t.get_shape()) + str(shape)
i += 1
def avg_norm(t):
return tf.reduce_mean(tf.sqrt(tf.reduce_sum(tf.square(t), axis=-1)))
def myadd(g1, g2, param):
print([g1, g2, param.name])
assert (not (g1 is None and g2 is None)), param.name
if g1 is None:
return g2
elif g2 is None:
return g1
else:
return g1 + g2
def my_explained_variance(qpred, q):
_, vary = tf.nn.moments(q, axes=[0, 1])
_, varpred = tf.nn.moments(q - qpred, axes=[0, 1])
check_shape([vary, varpred], [[]] * 2)
return 1.0 - (varpred / vary)
| {
"content_hash": "8a2c4b6838ca4a6de4179f7ee4263e01",
"timestamp": "",
"source": "github",
"line_count": 255,
"max_line_length": 148,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.5880270692347735,
"repo_name": "feiwang3311/baseline",
"id": "c80f4d412a5dc49eab8629af9f8cd17b223cc317",
"size": "9605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baselines/a2c/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "380475"
}
],
"symlink_target": ""
} |
"""
Option Parser sorting module.
This module implements a sorting method for options in a
configuration file.
"""
from operator import itemgetter, attrgetter, methodcaller
class Options:
"""
Collection of options.
"""
def __init__(self):
self.options = []
def insert(self, option):
"""
Insert option in to options
"""
self.options.append(option)
def __repr__(self):
return repr(self.options)
def dict(self):
"""
Return unstructured dictionary with key, value of options.
"""
optionsdict = {}
for option in self.options:
optionsdict[option.key] = option.value
return optionsdict
def compare(self, comparable):
if comparable.getvar() == None: # Non variable options pass
return 0
for option in self.options:
if comparable.getvar() == option.key:
return 1 # Variables are lower
return 0 # Non resolvable variables can go high
def sort(self):
"""
Sort options based on definition before use.
"""
self.options = sorted(self.options,
key=self.compare)
return self.options
class Option:
"""
Class to store one option in a section of a ConfigParser.
"""
def __init__(self, key, value):
self.key = key.strip()
self.value = value.strip()
def __repr__(self):
return self.key
def getvar(self):
"""
Find variable in a string.
"""
variable = "".join(self.value.split("$")[1:])
variable = variable.split("/")[0]
return variable
def reorder(fname):
"""
Reorder fields in a configuration file so that
assignments of variables comes before use.
"""
fp = open(fname, 'r+')
options = Options()
configresult = {}
section = ""
configresult[section] = Options()
for line in fp.readlines():
line = line.strip()
if line.startswith("["):
# New section
section = line
configresult[section] = Options()
elif line.startswith("#"):
pass
# Lonely comments are removed
else:
# Store an option
try:
key, value = line.split("=")
configresult[section].insert(Option(key, value))
except ValueError:
pass # Ignore all weird lines
fp.seek(0)
fp.truncate()
for section in configresult:
fp.write("{}\n".format(section))
configresult[section].sort() # Sort options in this section
for option in configresult[section].options:
fp.write("{}={}\n".format(option.key, option.value))
fp.close()
| {
"content_hash": "959f1ada5df2f72b8fe3328f0bfe207e",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 68,
"avg_line_length": 26.055555555555557,
"alnum_prop": 0.5540156361051883,
"repo_name": "josrolgil/exjobbCalvin",
"id": "a7ff09acaa9696ef8176ce3ba8ff4f3de2cc24ac",
"size": "3435",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "calvin/utilities/confsort.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1727"
},
{
"name": "HTML",
"bytes": "7958"
},
{
"name": "JavaScript",
"bytes": "59355"
},
{
"name": "Python",
"bytes": "1579174"
},
{
"name": "Shell",
"bytes": "12920"
}
],
"symlink_target": ""
} |
"""Internal information about the audio plugin."""
from tensorboard.compat.proto import summary_pb2
from tensorboard.plugins.audio import plugin_data_pb2
PLUGIN_NAME = "audio"
# The most recent value for the `version` field of the `AudioPluginData`
# proto.
PROTO_VERSION = 0
# Expose the `Encoding` enum constants.
Encoding = plugin_data_pb2.AudioPluginData.Encoding
def create_summary_metadata(
display_name, description, encoding, *, converted_to_tensor=None
):
"""Create a `SummaryMetadata` proto for audio plugin data.
Returns:
A `SummaryMetadata` protobuf object.
"""
content = plugin_data_pb2.AudioPluginData(
version=PROTO_VERSION,
encoding=encoding,
converted_to_tensor=converted_to_tensor,
)
metadata = summary_pb2.SummaryMetadata(
display_name=display_name,
summary_description=description,
plugin_data=summary_pb2.SummaryMetadata.PluginData(
plugin_name=PLUGIN_NAME, content=content.SerializeToString()
),
)
return metadata
def parse_plugin_metadata(content):
"""Parse summary metadata to a Python object.
Arguments:
content: The `content` field of a `SummaryMetadata` proto
corresponding to the audio plugin.
Returns:
An `AudioPluginData` protobuf object.
"""
if not isinstance(content, bytes):
raise TypeError("Content type must be bytes")
result = plugin_data_pb2.AudioPluginData.FromString(content)
if result.version == 0:
return result
# No other versions known at this time, so no migrations to do.
return result
| {
"content_hash": "e000573d75a3fd2edfbe5eed83d6e1bf",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 72,
"avg_line_length": 29.017857142857142,
"alnum_prop": 0.696,
"repo_name": "tensorflow/tensorboard",
"id": "5ab35efb162b36b60e99681a5b3b1a92dbda9462",
"size": "2314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorboard/plugins/audio/metadata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16222"
},
{
"name": "Dockerfile",
"bytes": "1226"
},
{
"name": "HTML",
"bytes": "154824"
},
{
"name": "Java",
"bytes": "20643"
},
{
"name": "JavaScript",
"bytes": "11869"
},
{
"name": "Jupyter Notebook",
"bytes": "7697"
},
{
"name": "Python",
"bytes": "2922179"
},
{
"name": "Rust",
"bytes": "311041"
},
{
"name": "SCSS",
"bytes": "136834"
},
{
"name": "Shell",
"bytes": "36731"
},
{
"name": "Starlark",
"bytes": "541743"
},
{
"name": "TypeScript",
"bytes": "5930550"
}
],
"symlink_target": ""
} |
import sys
input_file = sys.argv[1]
##### FASTA File
if (input_file.endswith('fastq')):
from fastAQ.fastqInfo import FastqParser
out = FastqParser(input_file)
### Print sequence names
def seqNames():
return out.seqNames()
## Get sequences for the names in the file.
def seq():
try:
nameFile = open(sys.argv[3])
except:
return "Error : Provide a file containing names of the sequences."
for line in nameFile:
record = line.strip()
print out.sequenceDict()[record]
return ''
## Base Qualities
def qual():
try:
nameFile = open(sys.argv[3])
except:
return "Error : Provide a file containing names of the sequences."
for line in nameFile:
record = line.strip()
print out.baseQualities()[record]
return ''
## Trimming given sequences accroding to the intervals.
def trim():
try:
nameFile = open(sys.argv[3])
intervalFile = open(sys.argv[4])
except:
return "Error : Sequence and/or intervals file missing."
for line1, line2 in zip(nameFile, intervalFile):
record1 = line1.strip()
record2 = eval(line2)
byInterval = True
print out.trimSeq(record1, byInterval, interval=record2)
return ''
## Trim sequences in the file by Mott's algo.
def mottTrim():
try:
nameFile = open(sys.argv[3])
limitValue = sys.argv[4].split('=')[1]
except:
return "Error : Sequence file missing."
for line in nameFile:
record = line.strip()
print out.trimSeq(name=record, mott=True, limitValue=float(limitValue))
return ''
## Trim sequences in the file by removing low quality bases.
def trimLowQuality():
try:
nameFile = open(sys.argv[3])
qualityCutOff = sys.argv[4].split('=')[1]
except:
return "Error : Sequence file missing."
for line in nameFile:
record = line.strip()
print out.trimSeq(name=record, qualityCutOff = float(qualityCutOff))
return ''
## Mask Sequences in the file.
def mask():
try:
nameFile = open(sys.argv[3])
intervalFile = open(sys.argv[4])
except:
return "Error : Sequence and/or intervals file missing."
for line1, line2 in zip(nameFile, intervalFile):
record1 = line1.strip()
record2 = eval(line2)
print out.maskSeq(name=record1, interval=record2)
return ''
## reverse complement the sequences in the given file.
def reverseComplement():
try:
nameFile = open(sys.argv[3])
except:
return "Error : Provide a file containing names of the sequences."
for line in nameFile:
record = line.strip()
print out.reverseComplement(record)
return ''
# trim all the sequences according to the interval
def trimAll():
try:
intervalStart = sys.argv[3].split('=')[1]
intervalEnd = sys.argv[4].split('=')[1]
except:
return "Error : Interval for trimming mising."
interval = (int(intervalStart), int(intervalEnd))
intervals = [interval for x in out.seqNames()]
return out.trimAll(byInterval=True, intervals=intervals)
## Mott trim all the sequences.
def mottTrimAll():
try:
limitValue = sys.argv[3].split('=')[1]
except:
return "Error : limit Value missing."
return out.trimAll(mott=True, limitValue=float(limitValue))
# trim low quality bases from all the sequences.
def trimAllLowQuality():
try:
qualityCutOff = sys.argv[3].split('=')[1]
except:
return "Error : qualityCutOff missing."
return out.trimAll(float(qualityCutOff))
# mask all the sequences.
def maskAll():
try:
intervalStart = sys.argv[3].split('=')[1]
intervalEnd = sys.argv[4].split('=')[1]
except:
return "Interval is missing."
interval = (int(intervalStart), int(intervalEnd))
intervals = [interval for x in out.seqNames()]
return out.maskAll(intervals)
# reverse complment all the sequences.
def reverseComplementAll():
return out.reverseComplementAll()
optionsDict = {'-seqNames':seqNames, '-seq':seq, '-qual':qual, '-trim':trim, '-trimLowQuality':trimLowQuality,
'-mottTrim':mottTrim, '-mask':mask, '-trimAll':trimAll, '-mottTrimAll':mottTrimAll, '-trimAllLowQuality':trimAllLowQuality,
'-maskAll':maskAll, '-reverseComplement': reverseComplement, '-reverseComplementAll':reverseComplementAll}
#####Choose Options
options = sys.argv[2]
print optionsDict[options]()
| {
"content_hash": "a4e0be7372bb6463b3da7b9d46f16c39",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 124,
"avg_line_length": 22.322916666666668,
"alnum_prop": 0.6768548763415773,
"repo_name": "Jverma/fastAQ",
"id": "bcd833f8c03c55493f2d08236980c63ff6c22f86",
"size": "4286",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastAQ/commandLineTools/fastq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23594"
}
],
"symlink_target": ""
} |
MAX_MAP_ASYNC_THREADS = 20
try:
import Queue
except ImportError:
import queue as Queue
import sys
import threading
try:
import eventlet
import eventlet.greenpool
import eventlet.tpool
import eventlet.patcher
_has_eventlet = True
import eventlet.debug
eventlet.debug.hub_exceptions(False)
except ImportError:
_has_eventlet = False
from mapproxy.config import base_config
from mapproxy.config import local_base_config
from mapproxy.compat import PY2
import logging
log_system = logging.getLogger('mapproxy.system')
class AsyncResult(object):
def __init__(self, result=None, exception=None):
self.result = result
self.exception = exception
def __repr__(self):
return "<AsyncResult result='%s' exception='%s'>" % (
self.result, self.exception)
def _result_iter(results, use_result_objects=False):
for result in results:
if use_result_objects:
exception = None
if (isinstance(result, tuple) and len(result) == 3 and
isinstance(result[1], Exception)):
exception = result
result = None
yield AsyncResult(result, exception)
else:
yield result
class EventletPool(object):
def __init__(self, size=100):
self.size = size
self.base_config = base_config()
def shutdown(self, force=False):
# there is not way to stop a GreenPool
pass
def map(self, func, *args, **kw):
return list(self.imap(func, *args, **kw))
def imap(self, func, *args, **kw):
use_result_objects = kw.get('use_result_objects', False)
def call(*args):
with local_base_config(self.base_config):
try:
return func(*args)
except Exception:
if use_result_objects:
return sys.exc_info()
else:
raise
if len(args[0]) == 1:
eventlet.sleep()
return _result_iter([call(*list(zip(*args))[0])], use_result_objects)
pool = eventlet.greenpool.GreenPool(self.size)
return _result_iter(pool.imap(call, *args), use_result_objects)
def starmap(self, func, args, **kw):
use_result_objects = kw.get('use_result_objects', False)
def call(*args):
with local_base_config(self.base_config):
try:
return func(*args)
except Exception:
if use_result_objects:
return sys.exc_info()
else:
raise
if len(args) == 1:
eventlet.sleep()
return _result_iter([call(*args[0])], use_result_objects)
pool = eventlet.greenpool.GreenPool(self.size)
return _result_iter(pool.starmap(call, args), use_result_objects)
def starcall(self, args, **kw):
use_result_objects = kw.get('use_result_objects', False)
def call(func, *args):
with local_base_config(self.base_config):
try:
return func(*args)
except Exception:
if use_result_objects:
return sys.exc_info()
else:
raise
if len(args) == 1:
eventlet.sleep()
return _result_iter([call(args[0][0], *args[0][1:])], use_result_objects)
pool = eventlet.greenpool.GreenPool(self.size)
return _result_iter(pool.starmap(call, args), use_result_objects)
class ThreadWorker(threading.Thread):
def __init__(self, task_queue, result_queue):
threading.Thread.__init__(self)
self.task_queue = task_queue
self.result_queue = result_queue
self.base_config = base_config()
def run(self):
with local_base_config(self.base_config):
while True:
task = self.task_queue.get()
if task is None:
self.task_queue.task_done()
break
exec_id, func, args = task
try:
result = func(*args)
except Exception:
result = sys.exc_info()
self.result_queue.put((exec_id, result))
self.task_queue.task_done()
def _consume_queue(queue):
"""
Get all items from queue.
"""
while not queue.empty():
try:
queue.get(block=False)
queue.task_done()
except Queue.Empty:
pass
class ThreadPool(object):
def __init__(self, size=4):
self.pool_size = size
self.task_queue = Queue.Queue()
self.result_queue = Queue.Queue()
self.pool = None
def map_each(self, func_args, raise_exceptions):
"""
args should be a list of function arg tuples.
map_each calls each function with the given arg.
"""
if self.pool_size < 2:
for func, arg in func_args:
try:
yield func(*arg)
except Exception:
yield sys.exc_info()
raise StopIteration()
self.pool = self._init_pool()
i = 0
for i, (func, arg) in enumerate(func_args):
self.task_queue.put((i, func, arg))
results = {}
next_result = 0
for value in self._get_results(next_result, results, raise_exceptions):
yield value
next_result += 1
self.task_queue.join()
for value in self._get_results(next_result, results, raise_exceptions):
yield value
next_result += 1
self.shutdown()
def _single_call(self, func, args, use_result_objects):
try:
result = func(*args)
except Exception:
if not use_result_objects:
raise
result = sys.exc_info()
return _result_iter([result], use_result_objects)
def map(self, func, *args, **kw):
return list(self.imap(func, *args, **kw))
def imap(self, func, *args, **kw):
use_result_objects = kw.get('use_result_objects', False)
if len(args[0]) == 1:
return self._single_call(func, next(iter(zip(*args))), use_result_objects)
return _result_iter(self.map_each([(func, arg) for arg in zip(*args)], raise_exceptions=not use_result_objects),
use_result_objects)
def starmap(self, func, args, **kw):
use_result_objects = kw.get('use_result_objects', False)
if len(args[0]) == 1:
return self._single_call(func, args[0], use_result_objects)
return _result_iter(self.map_each([(func, arg) for arg in args], raise_exceptions=not use_result_objects),
use_result_objects)
def starcall(self, args, **kw):
def call(func, *args):
return func(*args)
return self.starmap(call, args, **kw)
def _get_results(self, next_result, results, raise_exceptions):
for i, value in self._fetch_results(raise_exceptions):
if i == next_result:
yield value
next_result += 1
while next_result in results:
yield results.pop(next_result)
next_result += 1
else:
results[i] = value
def _fetch_results(self, raise_exceptions):
while not self.task_queue.empty() or not self.result_queue.empty():
task_result = self.result_queue.get()
if (raise_exceptions and isinstance(task_result[1], tuple) and
len(task_result[1]) == 3 and
isinstance(task_result[1][1], Exception)):
self.shutdown(force=True)
exc_class, exc, tb = task_result[1]
if PY2:
exec('raise exc_class, exc, tb')
else:
raise exc.with_traceback(tb)
yield task_result
def shutdown(self, force=False):
"""
Send shutdown sentinel to all executor threads. If `force` is True,
clean task_queue and result_queue.
"""
if force:
_consume_queue(self.task_queue)
_consume_queue(self.result_queue)
for _ in range(self.pool_size):
self.task_queue.put(None)
def _init_pool(self):
if self.pool_size < 2:
return []
pool = []
for _ in range(self.pool_size):
t = ThreadWorker(self.task_queue, self.result_queue)
t.daemon = True
t.start()
pool.append(t)
return pool
def imap_async_eventlet(func, *args):
pool = EventletPool()
return pool.imap(func, *args)
def imap_async_threaded(func, *args):
pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS))
return pool.imap(func, *args)
def starmap_async_eventlet(func, args):
pool = EventletPool()
return pool.starmap(func, args)
def starmap_async_threaded(func, args):
pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS))
return pool.starmap(func, args)
def starcall_async_eventlet(args):
pool = EventletPool()
return pool.starcall(args)
def starcall_async_threaded(args):
pool = ThreadPool(min(len(args[0]), MAX_MAP_ASYNC_THREADS))
return pool.starcall(args)
def run_non_blocking_eventlet(func, args, kw={}):
return eventlet.tpool.execute(func, *args, **kw)
def run_non_blocking_threaded(func, args, kw={}):
return func(*args, **kw)
def import_module(module):
"""
Import ``module``. Import patched version if eventlet is used.
"""
if uses_eventlet:
return eventlet.import_patched(module)
else:
return __import__(module)
uses_eventlet = False
# socket should be monkey patched when MapProxy runs inside eventlet
if _has_eventlet and eventlet.patcher.is_monkey_patched('socket'):
uses_eventlet = True
log_system.info('using eventlet for asynchronous operations')
imap = imap_async_eventlet
starmap = starmap_async_eventlet
starcall = starcall_async_eventlet
Pool = EventletPool
run_non_blocking = run_non_blocking_eventlet
else:
imap = imap_async_threaded
starmap = starmap_async_threaded
starcall = starcall_async_threaded
Pool = ThreadPool
run_non_blocking = run_non_blocking_threaded
| {
"content_hash": "d01f8843739791aefd91d1a7986db93e",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 120,
"avg_line_length": 32.12232415902141,
"alnum_prop": 0.5632140137090632,
"repo_name": "vrsource/mapproxy",
"id": "f0e98c0a0018e3a24d140d81e4c972a246a0aea7",
"size": "11149",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mapproxy/util/async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "HTML",
"bytes": "18782"
},
{
"name": "Makefile",
"bytes": "1045"
},
{
"name": "Python",
"bytes": "1760225"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, Iterable, Optional, TypeVar, Union
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
"storageName": _SERIALIZER.url("storage_name", storage_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_create_or_update_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
storage_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
content_type = kwargs.pop('content_type', None) # type: Optional[str]
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
"storageName": _SERIALIZER.url("storage_name", storage_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
_header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=_url,
params=_query_parameters,
headers=_header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
"storageName": _SERIALIZER.url("storage_name", storage_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_request(
subscription_id: str,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages") # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"serviceName": _SERIALIZER.url("service_name", service_name, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class StoragesOperations(object):
"""StoragesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.appplatform.v2022_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> "_models.StorageResource":
"""Get the storage resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param storage_name: The name of the storage resource.
:type storage_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageResource, or the result of cls(response)
:rtype: ~azure.mgmt.appplatform.v2022_05_01_preview.models.StorageResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('StorageResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
storage_resource: "_models.StorageResource",
**kwargs: Any
) -> "_models.StorageResource":
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(storage_resource, 'StorageResource')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('StorageResource', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('StorageResource', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('StorageResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}"} # type: ignore
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
storage_name: str,
storage_resource: "_models.StorageResource",
**kwargs: Any
) -> LROPoller["_models.StorageResource"]:
"""Create or update storage resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param storage_name: The name of the storage resource.
:type storage_name: str
:param storage_resource: Parameters for the create or update operation.
:type storage_resource: ~azure.mgmt.appplatform.v2022_05_01_preview.models.StorageResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either StorageResource or the result of
cls(response)
:rtype:
~azure.core.polling.LROPoller[~azure.mgmt.appplatform.v2022_05_01_preview.models.StorageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResource"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
storage_resource=storage_resource,
api_version=api_version,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('StorageResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
api_version=api_version,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}"} # type: ignore
@distributed_trace
def begin_delete( # pylint: disable=inconsistent-return-statements
self,
resource_group_name: str,
service_name: str,
storage_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Delete the storage resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:param storage_name: The name of the storage resource.
:type storage_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
storage_name=storage_name,
api_version=api_version,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages/{storageName}"} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
service_name: str,
**kwargs: Any
) -> Iterable["_models.StorageResourceCollection"]:
"""List all the storages of one Azure Spring Apps resource.
:param resource_group_name: The name of the resource group that contains the resource. You can
obtain this value from the Azure Resource Manager API or the portal.
:type resource_group_name: str
:param service_name: The name of the Service resource.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageResourceCollection or the result of
cls(response)
:rtype:
~azure.core.paging.ItemPaged[~azure.mgmt.appplatform.v2022_05_01_preview.models.StorageResourceCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
api_version = kwargs.pop('api_version', "2022-05-01-preview") # type: str
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageResourceCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
api_version=api_version,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
service_name=service_name,
api_version=api_version,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("StorageResourceCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.AppPlatform/Spring/{serviceName}/storages"} # type: ignore
| {
"content_hash": "e04b9b472dc536e616bc49f9a3325037",
"timestamp": "",
"source": "github",
"line_count": 597,
"max_line_length": 216,
"avg_line_length": 42.93634840871022,
"alnum_prop": 0.6424921000273085,
"repo_name": "Azure/azure-sdk-for-python",
"id": "ec3d2a7980104cff01c152a1c6741b71820705ef",
"size": "26133",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/appplatform/azure-mgmt-appplatform/azure/mgmt/appplatform/v2022_05_01_preview/operations/_storages_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from django import template
from django.conf import settings
from django.db import models
from django.contrib.comments import Comment, CommentForm
import re
from datetime import datetime
import urllib
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
Post = models.get_model('blog', 'post')
Category = models.get_model('blog', 'category')
register = template.Library()
class LatestPosts(template.Node):
def __init__(self, limit, var_name):
self.limit = limit
self.var_name = var_name
def render(self, context):
posts = Post.objects.published()[:int(self.limit)]
if posts and (int(self.limit) == 1):
context[self.var_name] = posts[0]
else:
context[self.var_name] = posts
return ''
@register.tag
def get_latest_posts(parser, token):
"""
Gets any number of latest posts and stores them in a varable.
Syntax::
{% get_latest_posts [limit] as [var_name] %}
Example usage::
{% get_latest_posts 10 as latest_post_list %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%s tag requires arguments" % token.contents.split()[0]
m = re.search(r'(.*?) as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError, "%s tag had invalid arguments" % tag_name
format_string, var_name = m.groups()
return LatestPosts(format_string, var_name)
class BlogCategories(template.Node):
def __init__(self, var_name):
self.var_name = var_name
def render(self, context):
categories = Category.objects.all()
context[self.var_name] = categories
return ''
@register.tag
def get_blog_categories(parser, token):
"""
Gets all blog categories.
Syntax::
{% get_blog_categories as [var_name] %}
Example usage::
{% get_blog_categories as category_list %}
"""
try:
tag_name, arg = token.contents.split(None, 1)
except ValueError:
raise template.TemplateSyntaxError, "%s tag requires arguments" % token.contents.split()[0]
m = re.search(r'as (\w+)', arg)
if not m:
raise template.TemplateSyntaxError, "%s tag had invalid arguments" % tag_name
var_name = m.groups()[0]
return BlogCategories(var_name)
@register.filter
def get_links(value):
"""
Extracts links from a ``Post`` body and returns a list.
Template Syntax::
{{ post.body|markdown:"safe"|get_links }}
"""
try:
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
from beautifulsoup import BeautifulSoup
soup = BeautifulSoup(value)
return soup.findAll('a')
except ImportError:
if settings.DEBUG:
raise template.TemplateSyntaxError, "Error in 'get_links' filter: BeautifulSoup isn't installed."
return value
@register.filter
def getDetailLink(post):
return "/news/%s/%s" % (post.publish.strftime('%Y/%b/%d'),post.slug)
@register.filter
def getComments(post):
return Comment.objects.get(content_type='post',objectid=post.id)
@register.filter
def formatBody(body):
targets = [
'\\\\link{(.+),(.+)}', #\link{href,text}
'\n',
'\\\\strong{',
'\\\\emph{',
'\\\\underline{',
' - ',
] #allow some fancy formatting, just becauuuse
terminators = [
'}',
]
replacementsA = [
'',# link takes special case
'</p><p>',
'<span style=\'font-weight:bold;\'>',
'<span style=\'font-style:italic;\'>',
'<span style=\'text-decoration:underline\'>',
' - ',
]
replacementsB = '</span>'
body = re.sub(r'<','<',body)
body = re.sub(r'>','>',body)
body = re.sub(r'@','\@',body)
body = "<p>"+body+"</p>"
for target in targets:
counter = 0
regexp = re.compile('('+target+')')
explosion = re.split(regexp,body)
for i in range(0,len(explosion)):
if re.match(regexp,explosion[i]):
if target == targets[0]: # OH SHIT A LINK
#format the url:
if re.match(re.compile('^(http://)'),explosion[i+1]):
url = "http://%s" % urllib.quote(explosion[i+1][7:])
elif re.match(re.compile('^(mailto:)'), explosion[i+1]):
url ="mailto:%s" % urllib.quote(explosion[i+1][7:])
url = url.replace('\\@','@')
elif re.match(re.compile('^(smb://)'),explosion[i+1]):
url = "smb://%s" % urllib.quote(explosion[i+1][6:])
else:
url = "http://%s"%urllib.quote(explosion[i+1])
explosion[i] = "<a href='%s'>%s</a>"%(url,explosion[i+2])
explosion[i+1] = ''
explosion[i+2] = ''
else:
explosion[i] = replacementsA[targets.index(target)]
explosion[i] = explosion[i].replace(
terminators[0],
replacementsB
)
body = ''.join(explosion)
return mark_safe(body)
formatBody.is_safe = True
@register.filter
def timeTag(pubDate):
'''formats a pubdate correctly for a <time> tag'''
return pubDate.strftime("%Y-%m-%d")
| {
"content_hash": "bc144eeb4e114e1ed9ebffca3a51f449",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 109,
"avg_line_length": 30.331550802139038,
"alnum_prop": 0.5424894217207334,
"repo_name": "HM2MC/Webfront",
"id": "76a5fabd42ddcfeeded3942c1fc1b523bf0dfd12",
"size": "5672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "basic/blog/templatetags/blog.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "752717"
},
{
"name": "C++",
"bytes": "2019"
},
{
"name": "CSS",
"bytes": "57094"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "JavaScript",
"bytes": "21800"
},
{
"name": "PHP",
"bytes": "1010"
},
{
"name": "Perl",
"bytes": "10825"
},
{
"name": "Python",
"bytes": "6389530"
},
{
"name": "Shell",
"bytes": "7277"
}
],
"symlink_target": ""
} |
import argparse
import os
import logging
import platform
import re
import subprocess
import urllib2
import json
from core import path_util
from telemetry import benchmark
from telemetry.core import discover
from telemetry.util import command_line
from telemetry.util import matching
CHROMIUM_CONFIG_FILENAME = 'tools/run-perf-test.cfg'
BLINK_CONFIG_FILENAME = 'Tools/run-perf-test.cfg'
SUCCESS, NO_CHANGES, ERROR = range(3)
# Unsupported Perf bisect bots.
EXCLUDED_BOTS = {
'win_xp_perf_bisect', # Goma issues: crbug.com/330900
'win_perf_bisect_builder',
'win64_nv_tester',
'winx64_bisect_builder',
'linux_perf_bisect_builder',
'mac_perf_bisect_builder',
'android_perf_bisect_builder',
'android_arm64_perf_bisect_builder',
# Bisect FYI bots are not meant for testing actual perf regressions.
# Hardware configuration on these bots is different from actual bisect bot
# and these bots runs E2E integration tests for auto-bisect
# using dummy benchmarks.
'linux_fyi_perf_bisect',
'mac_fyi_perf_bisect',
'win_fyi_perf_bisect',
# CQ bots on tryserver.chromium.perf
'android_s5_perf_cq',
'winx64_10_perf_cq',
'mac_retina_perf_cq',
'linux_perf_cq',
}
INCLUDE_BOTS = [
'all',
'all-win',
'all-mac',
'all-linux',
'all-android'
]
# Default try bot to use incase builbot is unreachable.
DEFAULT_TRYBOTS = [
'linux_perf_bisect',
'mac_10_11_perf_bisect',
'winx64_10_perf_bisect',
'android_s5_perf_bisect',
]
assert not set(DEFAULT_TRYBOTS) & set(EXCLUDED_BOTS), ( 'A trybot cannot '
'present in both Default as well as Excluded bots lists.')
class TrybotError(Exception):
def __str__(self):
return '%s\nError running tryjob.' % self.args[0]
def _GetTrybotList(builders):
builders = ['%s' % bot.replace('_perf_bisect', '').replace('_', '-')
for bot in builders]
builders.extend(INCLUDE_BOTS)
return sorted(builders)
def _GetBuilderNames(trybot_name, builders):
""" Return platform and its available bot name as dictionary."""
os_names = ['linux', 'android', 'mac', 'win']
if 'all' not in trybot_name:
bot = ['%s_perf_bisect' % trybot_name.replace('-', '_')]
try:
bot_platform = next(b for b in os_names if b in trybot_name)
except StopIteration:
raise TrybotError('Trybot "%s" unsupported for tryjobs.' % trybot_name)
if 'x64' in trybot_name:
bot_platform += '-x64'
return {bot_platform: bot}
platform_and_bots = {}
for os_name in os_names:
platform_and_bots[os_name] = [bot for bot in builders if os_name in bot]
# Special case for Windows x64, consider it as separate platform
# config config should contain target_arch=x64 and --browser=release_x64.
win_x64_bots = [
win_bot for win_bot in platform_and_bots['win']
if 'x64' in win_bot]
# Separate out non x64 bits win bots
platform_and_bots['win'] = list(
set(platform_and_bots['win']) - set(win_x64_bots))
platform_and_bots['win-x64'] = win_x64_bots
if 'all-win' in trybot_name:
return {'win': platform_and_bots['win'],
'win-x64': platform_and_bots['win-x64']}
if 'all-mac' in trybot_name:
return {'mac': platform_and_bots['mac']}
if 'all-android' in trybot_name:
return {'android': platform_and_bots['android']}
if 'all-linux' in trybot_name:
return {'linux': platform_and_bots['linux']}
return platform_and_bots
def _RunProcess(cmd):
logging.debug('Running process: "%s"', ' '.join(cmd))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
returncode = proc.poll()
return (returncode, out, err)
_GIT_CMD = 'git'
if platform.system() == 'Windows':
# On windows, the git command is installed as 'git.bat'
_GIT_CMD = 'git.bat'
class Trybot(command_line.ArgParseCommand):
""" Run telemetry perf benchmark on trybot """
usage = 'botname benchmark_name [<benchmark run options>]'
_builders = None
def __init__(self):
self._builder_names = None
@classmethod
def _GetBuilderList(cls):
if not cls._builders:
try:
f = urllib2.urlopen(
('https://build.chromium.org/p/tryserver.chromium.perf/json/'
'builders'),
timeout=5)
# In case of any kind of exception, allow tryjobs to use default trybots.
# Possible exception are ssl.SSLError, urllib2.URLError,
# socket.timeout, socket.error.
except Exception:
# Incase of any exception return default trybots.
print ('WARNING: Unable to reach builbot to retrieve trybot '
'information, tryjob will use default trybots.')
cls._builders = DEFAULT_TRYBOTS
else:
builders = json.loads(f.read()).keys()
# Exclude unsupported bots like win xp and some dummy bots.
cls._builders = [bot for bot in builders if bot not in EXCLUDED_BOTS]
return cls._builders
def _InitializeBuilderNames(self, trybot):
self._builder_names = _GetBuilderNames(trybot, self._GetBuilderList())
@classmethod
def CreateParser(cls):
parser = argparse.ArgumentParser(
('Run telemetry benchmarks on trybot. You can add all the benchmark '
'options available except the --browser option'),
formatter_class=argparse.RawTextHelpFormatter)
return parser
@classmethod
def ProcessCommandLineArgs(cls, parser, options, extra_args, environment):
del environment # unused
for arg in extra_args:
if arg == '--browser' or arg.startswith('--browser='):
parser.error('--browser=... is not allowed when running trybot.')
all_benchmarks = discover.DiscoverClasses(
start_dir=path_util.GetPerfBenchmarksDir(),
top_level_dir=path_util.GetPerfDir(),
base_class=benchmark.Benchmark).values()
all_benchmark_names = [b.Name() for b in all_benchmarks]
if options.benchmark_name not in all_benchmark_names:
possible_benchmark_names = matching.GetMostLikelyMatchedObject(
all_benchmark_names, options.benchmark_name)
parser.error(
'No benchmark named "%s". Do you mean any of those benchmarks '
'below?\n%s' %
(options.benchmark_name, '\n'.join(possible_benchmark_names)))
@classmethod
def AddCommandLineArgs(cls, parser, environment):
del environment # unused
available_bots = _GetTrybotList(cls._GetBuilderList())
parser.add_argument(
'trybot', choices=available_bots,
help=('specify which bots to run telemetry benchmarks on. '
' Allowed values are:\n' + '\n'.join(available_bots)),
metavar='<trybot name>')
parser.add_argument(
'benchmark_name', type=str,
help=('specify which benchmark to run. To see all available benchmarks,'
' run `run_benchmark list`'),
metavar='<benchmark name>')
def Run(self, options, extra_args=None):
"""Sends a tryjob to a perf trybot.
This creates a branch, telemetry-tryjob, switches to that branch, edits
the bisect config, commits it, uploads the CL to rietveld, and runs a
tryjob on the given bot.
"""
if extra_args is None:
extra_args = []
self._InitializeBuilderNames(options.trybot)
arguments = [options.benchmark_name] + extra_args
# First check if there are chromium changes to upload.
status = self._AttemptTryjob(CHROMIUM_CONFIG_FILENAME, arguments)
if status not in [SUCCESS, ERROR]:
# If we got here, there are no chromium changes to upload. Try blink.
os.chdir('third_party/WebKit/')
status = self._AttemptTryjob(BLINK_CONFIG_FILENAME, arguments)
os.chdir('../..')
if status not in [SUCCESS, ERROR]:
logging.error('No local changes found in chromium or blink trees. '
'browser=%s argument sends local changes to the '
'perf trybot(s): %s.', options.trybot,
self._builder_names.values())
return 1
return 0
def _UpdateConfigAndRunTryjob(self, bot_platform, cfg_file_path, arguments):
"""Updates perf config file, uploads changes and excutes perf try job.
Args:
bot_platform: Name of the platform to be generated.
cfg_file_path: Perf config file path.
Returns:
(result, msg) where result is one of:
SUCCESS if a tryjob was sent
NO_CHANGES if there was nothing to try,
ERROR if a tryjob was attempted but an error encountered
and msg is an error message if an error was encountered, or rietveld
url if success, otherwise throws TrybotError exception.
"""
config = self._GetPerfConfig(bot_platform, arguments)
config_to_write = 'config = %s' % json.dumps(
config, sort_keys=True, indent=2, separators=(',', ': '))
try:
with open(cfg_file_path, 'r') as config_file:
if config_to_write == config_file.read():
return NO_CHANGES, ''
except IOError:
msg = 'Cannot find %s. Please run from src dir.' % cfg_file_path
return (ERROR, msg)
with open(cfg_file_path, 'w') as config_file:
config_file.write(config_to_write)
# Commit the config changes locally.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'commit', '-a', '-m', 'bisect config: %s' % bot_platform])
if returncode:
raise TrybotError('Could not commit bisect config change for %s,'
' error %s' % (bot_platform, err))
# Upload the CL to rietveld and run a try job.
returncode, out, err = _RunProcess([
_GIT_CMD, 'cl', 'upload', '-f', '--bypass-hooks', '-m',
'CL for perf tryjob on %s' % bot_platform
])
if returncode:
raise TrybotError('Could not upload to rietveld for %s, error %s' %
(bot_platform, err))
match = re.search(r'https://codereview.chromium.org/[\d]+', out)
if not match:
raise TrybotError('Could not upload CL to rietveld for %s! Output %s' %
(bot_platform, out))
rietveld_url = match.group(0)
# Generate git try command for available bots.
git_try_command = [_GIT_CMD, 'cl', 'try', '-m', 'tryserver.chromium.perf']
for bot in self._builder_names[bot_platform]:
git_try_command.extend(['-b', bot])
returncode, out, err = _RunProcess(git_try_command)
if returncode:
raise TrybotError('Could not try CL for %s, error %s' %
(bot_platform, err))
return (SUCCESS, rietveld_url)
def _GetPerfConfig(self, bot_platform, arguments):
"""Generates the perf config for try job.
Args:
bot_platform: Name of the platform to be generated.
Returns:
A dictionary with perf config parameters.
"""
# To make sure that we don't mutate the original args
arguments = arguments[:]
# Always set verbose logging for later debugging
if '-v' not in arguments and '--verbose' not in arguments:
arguments.append('--verbose')
# Generate the command line for the perf trybots
target_arch = 'ia32'
if any(arg == '--chrome-root' or arg.startswith('--chrome-root=') for arg
in arguments):
raise ValueError(
'Trybot does not suport --chrome-root option set directly '
'through command line since it may contain references to your local '
'directory')
if bot_platform in ['win', 'win-x64']:
arguments.insert(0, 'python tools\\perf\\run_benchmark')
else:
arguments.insert(0, './tools/perf/run_benchmark')
if bot_platform == 'android':
arguments.insert(1, '--browser=android-chromium')
elif any('x64' in bot for bot in self._builder_names[bot_platform]):
arguments.insert(1, '--browser=release_x64')
target_arch = 'x64'
else:
arguments.insert(1, '--browser=release')
command = ' '.join(arguments)
return {
'command': command,
'repeat_count': '1',
'max_time_minutes': '120',
'truncate_percent': '0',
'target_arch': target_arch,
}
def _AttemptTryjob(self, cfg_file_path, arguments):
"""Attempts to run a tryjob from the current directory.
This is run once for chromium, and if it returns NO_CHANGES, once for blink.
Args:
cfg_file_path: Path to the config file for the try job.
Returns:
Returns SUCCESS if a tryjob was sent, NO_CHANGES if there was nothing to
try, ERROR if a tryjob was attempted but an error encountered.
"""
source_repo = 'chromium'
if cfg_file_path == BLINK_CONFIG_FILENAME:
source_repo = 'blink'
# TODO(prasadv): This method is quite long, we should consider refactor
# this by extracting to helper methods.
returncode, original_branchname, err = _RunProcess(
[_GIT_CMD, 'rev-parse', '--abbrev-ref', 'HEAD'])
if returncode:
msg = 'Must be in a git repository to send changes to trybots.'
if err:
msg += '\nGit error: %s' % err
logging.error(msg)
return ERROR
original_branchname = original_branchname.strip()
# Check if the tree is dirty: make sure the index is up to date and then
# run diff-index
_RunProcess([_GIT_CMD, 'update-index', '--refresh', '-q'])
returncode, out, err = _RunProcess([_GIT_CMD, 'diff-index', 'HEAD'])
if out:
logging.error(
'Cannot send a try job with a dirty tree. Commit locally first.')
return ERROR
# Make sure the tree does have local commits.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'log', 'origin/master..HEAD'])
if not out:
return NO_CHANGES
# Create/check out the telemetry-tryjob branch, and edit the configs
# for the tryjob there.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'checkout', '-b', 'telemetry-tryjob'])
if returncode:
logging.error('Error creating branch telemetry-tryjob. '
'Please delete it if it exists.\n%s', err)
return ERROR
try:
returncode, out, err = _RunProcess(
[_GIT_CMD, 'branch', '--set-upstream-to', 'origin/master'])
if returncode:
logging.error('Error in git branch --set-upstream-to: %s', err)
return ERROR
for bot_platform in self._builder_names:
if not self._builder_names[bot_platform]:
logging.warning('No builder is found for %s', bot_platform)
continue
try:
results, output = self._UpdateConfigAndRunTryjob(
bot_platform, cfg_file_path, arguments)
if results == ERROR:
logging.error(output)
return ERROR
elif results == NO_CHANGES:
print ('Skip the try job run on %s because it has been tried in '
'previous try job run. ' % bot_platform)
else:
print ('Uploaded %s try job to rietveld for %s platform. '
'View progress at %s' % (source_repo, bot_platform, output))
except TrybotError, err:
print err
logging.error(err)
finally:
# Checkout original branch and delete telemetry-tryjob branch.
# TODO(prasadv): This finally block could be extracted out to be a
# separate function called _CleanupBranch.
returncode, out, err = _RunProcess(
[_GIT_CMD, 'checkout', original_branchname])
if returncode:
logging.error('Could not check out %s. Please check it out and '
'manually delete the telemetry-tryjob branch. '
': %s', original_branchname, err)
return ERROR # pylint: disable=lost-exception
logging.info('Checked out original branch: %s', original_branchname)
returncode, out, err = _RunProcess(
[_GIT_CMD, 'branch', '-D', 'telemetry-tryjob'])
if returncode:
logging.error('Could not delete telemetry-tryjob branch. '
'Please delete it manually: %s', err)
return ERROR # pylint: disable=lost-exception
logging.info('Deleted temp branch: telemetry-tryjob')
return SUCCESS
| {
"content_hash": "40c66e971ecf880e23bee930ec114546",
"timestamp": "",
"source": "github",
"line_count": 439,
"max_line_length": 80,
"avg_line_length": 36.676537585421414,
"alnum_prop": 0.6371032855102168,
"repo_name": "was4444/chromium.src",
"id": "79a009b501bbb822672c66e58ad3292efcef5aa1",
"size": "16264",
"binary": false,
"copies": "2",
"ref": "refs/heads/nw15",
"path": "tools/perf/core/trybot_command.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""
:codeauthor: Mike Place <mp@saltstack.com>
"""
import errno
import logging
import os
import threading
import pytest
import salt.config
import salt.exceptions
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.ext.tornado.testing
import salt.transport.ipc
import salt.utils.platform
from salt.ext.tornado.iostream import StreamClosedError
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import skipIf
pytestmark = [
pytest.mark.skip_on_darwin,
pytest.mark.skip_on_freebsd,
pytest.mark.skip_on_windows,
]
log = logging.getLogger(__name__)
@skipIf(salt.utils.platform.is_windows(), "Windows does not support Posix IPC")
class IPCMessagePubSubCase(salt.ext.tornado.testing.AsyncTestCase):
"""
Test all of the clear msg stuff
"""
def setUp(self):
super().setUp()
self.opts = {"ipc_write_buffer": 0}
if not os.path.exists(RUNTIME_VARS.TMP):
os.mkdir(RUNTIME_VARS.TMP)
self.socket_path = os.path.join(RUNTIME_VARS.TMP, "ipc_test.ipc")
self.pub_channel = self._get_pub_channel()
self.sub_channel = self._get_sub_channel()
def _get_pub_channel(self):
pub_channel = salt.transport.ipc.IPCMessagePublisher(
self.opts,
self.socket_path,
)
pub_channel.start()
return pub_channel
def _get_sub_channel(self):
sub_channel = salt.transport.ipc.IPCMessageSubscriber(
socket_path=self.socket_path,
io_loop=self.io_loop,
)
sub_channel.connect(callback=self.stop)
self.wait()
return sub_channel
def tearDown(self):
super().tearDown()
try:
self.pub_channel.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
try:
self.sub_channel.close()
except OSError as exc:
if exc.errno != errno.EBADF:
# If its not a bad file descriptor error, raise
raise
os.unlink(self.socket_path)
del self.pub_channel
del self.sub_channel
def test_multi_client_reading(self):
# To be completely fair let's create 2 clients.
client1 = self.sub_channel
client2 = self._get_sub_channel()
call_cnt = []
# Create a watchdog to be safe from hanging in sync loops (what old code did)
evt = threading.Event()
def close_server():
if evt.wait(1):
return
client2.close()
self.stop()
watchdog = threading.Thread(target=close_server)
watchdog.start()
# Runs in ioloop thread so we're safe from race conditions here
def handler(raw):
call_cnt.append(raw)
if len(call_cnt) >= 2:
evt.set()
self.stop()
# Now let both waiting data at once
client1.read_async(handler)
client2.read_async(handler)
self.pub_channel.publish("TEST")
self.wait()
self.assertEqual(len(call_cnt), 2)
self.assertEqual(call_cnt[0], "TEST")
self.assertEqual(call_cnt[1], "TEST")
def test_sync_reading(self):
# To be completely fair let's create 2 clients.
client1 = self.sub_channel
client2 = self._get_sub_channel()
call_cnt = []
# Now let both waiting data at once
self.pub_channel.publish("TEST")
ret1 = client1.read_sync()
ret2 = client2.read_sync()
self.assertEqual(ret1, "TEST")
self.assertEqual(ret2, "TEST")
@salt.ext.tornado.testing.gen_test
def test_async_reading_streamclosederror(self):
client1 = self.sub_channel
call_cnt = []
# Create a watchdog to be safe from hanging in sync loops (what old code did)
evt = threading.Event()
def close_server():
if evt.wait(0.001):
return
client1.close()
self.stop()
watchdog = threading.Thread(target=close_server)
watchdog.start()
# Runs in ioloop thread so we're safe from race conditions here
def handler(raw):
pass
try:
ret1 = yield client1.read_async(handler)
self.wait()
except StreamClosedError as ex:
assert False, "StreamClosedError was raised inside the Future"
| {
"content_hash": "c9a09d4e2f664abcb926fc81c814f69a",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 85,
"avg_line_length": 29.10967741935484,
"alnum_prop": 0.5966312056737588,
"repo_name": "saltstack/salt",
"id": "21ffb6aeecc281f9e3b104b6293de7670fd3c9c0",
"size": "4512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit/transport/test_ipc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wooey', '0043_update_model_protection'),
]
operations = [
migrations.AlterField(
model_name='scriptparameter',
name='choices',
field=models.TextField(blank=True, null=True),
),
]
| {
"content_hash": "844fac94c7a77d73e7550cac0b903dc0",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 58,
"avg_line_length": 22.375,
"alnum_prop": 0.5893854748603352,
"repo_name": "wooey/Wooey",
"id": "5ec89bd9bd5a542450a4c8d1fe710bfd3cfd64ff",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wooey/migrations/0044_change_script_parameter_choices_to_text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "8898"
},
{
"name": "Dockerfile",
"bytes": "1004"
},
{
"name": "HTML",
"bytes": "75964"
},
{
"name": "JavaScript",
"bytes": "811"
},
{
"name": "Makefile",
"bytes": "1212"
},
{
"name": "Python",
"bytes": "298550"
},
{
"name": "Shell",
"bytes": "513"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class appflowglobal_binding(base_resource):
""" Binding class showing the resources that can be bound to appflowglobal_binding.
"""
def __init__(self) :
self.appflowglobal_appflowpolicy_binding = []
@property
def appflowglobal_appflowpolicy_bindings(self) :
"""appflowpolicy that can be bound to appflowglobal.
"""
try :
return self._appflowglobal_appflowpolicy_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(appflowglobal_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.appflowglobal_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
return None
except Exception as e :
raise e
@classmethod
def get(self, service) :
""" Use this API to fetch a appflowglobal_binding resource .
"""
try :
obj = appflowglobal_binding()
response = obj.get_resource(service)
return response
except Exception as e:
raise e
class appflowglobal_binding_response(base_response) :
def __init__(self, length=1) :
self.appflowglobal_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.appflowglobal_binding = [appflowglobal_binding() for _ in range(length)]
| {
"content_hash": "9a58ce0adb56cfbfddb5e06dea4262eb",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 123,
"avg_line_length": 31.814285714285713,
"alnum_prop": 0.7260889088459811,
"repo_name": "mahabs/nitro",
"id": "64d52a2fd61285596c84762930b1e59c7158ba28",
"size": "2841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/appflow/appflowglobal_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
import json
import mock
import os
import tempfile
import testtools
from glanceclient.common import utils
from glanceclient.v2 import shell as test_shell
class ShellV2Test(testtools.TestCase):
def setUp(self):
super(ShellV2Test, self).setUp()
self._mock_utils()
self.gc = self._mock_glance_client()
def _make_args(self, args):
# NOTE(venkatesh): this conversion from a dict to an object
# is required because the test_shell.do_xxx(gc, args) methods
# expects the args to be attributes of an object. If passed as
# dict directly, it throws an AttributeError.
class Args(object):
def __init__(self, entries):
self.__dict__.update(entries)
return Args(args)
def _mock_glance_client(self):
my_mocked_gc = mock.Mock()
my_mocked_gc.schemas.return_value = 'test'
my_mocked_gc.get.return_value = {}
return my_mocked_gc
def _mock_utils(self):
utils.print_list = mock.Mock()
utils.print_dict = mock.Mock()
utils.save_image = mock.Mock()
def assert_exits_with_msg(self, func, func_args, err_msg):
with mock.patch.object(utils, 'exit') as mocked_utils_exit:
mocked_utils_exit.return_value = '%s' % err_msg
func(self.gc, func_args)
mocked_utils_exit.assert_called_once_with(err_msg)
def test_do_image_list(self):
input = {
'limit': None,
'page_size': 18,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': [],
'sort_key': ['name', 'id'],
'sort_dir': ['desc', 'asc'],
'sort': None,
'verbose': False
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag'
}
mocked_list.assert_called_once_with(page_size=18,
sort_key=['name', 'id'],
sort_dir=['desc', 'asc'],
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_list_with_single_sort_key(self):
input = {
'limit': None,
'page_size': 18,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': [],
'sort_key': ['name'],
'sort_dir': ['desc'],
'sort': None,
'verbose': False
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag'
}
mocked_list.assert_called_once_with(page_size=18,
sort_key=['name'],
sort_dir=['desc'],
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_list_new_sorting_syntax(self):
input = {
'limit': None,
'page_size': 18,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': [],
'sort': 'name:desc,size:asc',
'sort_key': [],
'sort_dir': [],
'verbose': False
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag'
}
mocked_list.assert_called_once_with(
page_size=18,
sort='name:desc,size:asc',
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_list_with_property_filter(self):
input = {
'limit': None,
'page_size': 1,
'visibility': True,
'member_status': 'Fake',
'owner': 'test',
'checksum': 'fake_checksum',
'tag': 'fake tag',
'properties': ['os_distro=NixOS', 'architecture=x86_64'],
'sort_key': ['name'],
'sort_dir': ['desc'],
'sort': None,
'verbose': False
}
args = self._make_args(input)
with mock.patch.object(self.gc.images, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_image_list(self.gc, args)
exp_img_filters = {
'owner': 'test',
'member_status': 'Fake',
'visibility': True,
'checksum': 'fake_checksum',
'tag': 'fake tag',
'os_distro': 'NixOS',
'architecture': 'x86_64'
}
mocked_list.assert_called_once_with(page_size=1,
sort_key=['name'],
sort_dir=['desc'],
filters=exp_img_filters)
utils.print_list.assert_called_once_with({}, ['ID', 'Name'])
def test_do_image_show_human_readable(self):
args = self._make_args({'id': 'pass', 'page_size': 18,
'human_readable': True,
'max_column_width': 120})
with mock.patch.object(self.gc.images, 'get') as mocked_list:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['size'] = 1024
mocked_list.return_value = expect_image
test_shell.do_image_show(self.gc, args)
mocked_list.assert_called_once_with('pass')
utils.print_dict.assert_called_once_with({'id': 'pass',
'size': '1kB'},
max_column_width=120)
def test_do_image_show(self):
args = self._make_args({'id': 'pass', 'page_size': 18,
'human_readable': False,
'max_column_width': 120})
with mock.patch.object(self.gc.images, 'get') as mocked_list:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['size'] = 1024
mocked_list.return_value = expect_image
test_shell.do_image_show(self.gc, args)
mocked_list.assert_called_once_with('pass')
utils.print_dict.assert_called_once_with({'id': 'pass',
'size': 1024},
max_column_width=120)
@mock.patch('sys.stdin', autospec=True)
def test_do_image_create_no_user_props(self, mock_stdin):
args = self._make_args({'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare',
'file': None})
with mock.patch.object(self.gc.images, 'create') as mocked_create:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
expect_image['container_format'] = 'bare'
mocked_create.return_value = expect_image
# Ensure that the test stdin is not considered
# to be supplying image data
mock_stdin.isatty = lambda: True
test_shell.do_image_create(self.gc, args)
mocked_create.assert_called_once_with(name='IMG-01',
disk_format='vhd',
container_format='bare')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare'})
def test_do_image_create_with_file(self):
try:
file_name = None
with open(tempfile.mktemp(), 'w+') as f:
f.write('Some data here')
f.flush()
f.seek(0)
file_name = f.name
temp_args = {'name': 'IMG-01',
'disk_format': 'vhd',
'container_format': 'bare',
'file': file_name,
'progress': False}
args = self._make_args(temp_args)
with mock.patch.object(self.gc.images, 'create') as mocked_create:
with mock.patch.object(self.gc.images, 'get') as mocked_get:
ignore_fields = ['self', 'access', 'schema']
expect_image = dict([(field, field) for field in
ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
expect_image['container_format'] = 'bare'
mocked_create.return_value = expect_image
mocked_get.return_value = expect_image
test_shell.do_image_create(self.gc, args)
temp_args.pop('file', None)
mocked_create.assert_called_once_with(**temp_args)
mocked_get.assert_called_once_with('pass')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare'})
finally:
try:
os.remove(f.name)
except Exception:
pass
@mock.patch('sys.stdin', autospec=True)
def test_do_image_create_with_user_props(self, mock_stdin):
args = self._make_args({'name': 'IMG-01',
'property': ['myprop=myval'],
'file': None})
with mock.patch.object(self.gc.images, 'create') as mocked_create:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['myprop'] = 'myval'
mocked_create.return_value = expect_image
# Ensure that the test stdin is not considered
# to be supplying image data
mock_stdin.isatty = lambda: True
test_shell.do_image_create(self.gc, args)
mocked_create.assert_called_once_with(name='IMG-01',
myprop='myval')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'myprop': 'myval'})
def test_do_image_update_no_user_props(self):
args = self._make_args({'id': 'pass', 'name': 'IMG-01',
'disk_format': 'vhd',
'container_format': 'bare'})
with mock.patch.object(self.gc.images, 'update') as mocked_update:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
expect_image['container_format'] = 'bare'
mocked_update.return_value = expect_image
test_shell.do_image_update(self.gc, args)
mocked_update.assert_called_once_with('pass',
None,
name='IMG-01',
disk_format='vhd',
container_format='bare')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd',
'container_format': 'bare'})
def test_do_image_update_with_user_props(self):
args = self._make_args({'id': 'pass', 'name': 'IMG-01',
'property': ['myprop=myval']})
with mock.patch.object(self.gc.images, 'update') as mocked_update:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['myprop'] = 'myval'
mocked_update.return_value = expect_image
test_shell.do_image_update(self.gc, args)
mocked_update.assert_called_once_with('pass',
None,
name='IMG-01',
myprop='myval')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'myprop': 'myval'})
def test_do_image_update_with_remove_props(self):
args = self._make_args({'id': 'pass', 'name': 'IMG-01',
'disk_format': 'vhd',
'remove-property': ['container_format']})
with mock.patch.object(self.gc.images, 'update') as mocked_update:
ignore_fields = ['self', 'access', 'file', 'schema']
expect_image = dict([(field, field) for field in ignore_fields])
expect_image['id'] = 'pass'
expect_image['name'] = 'IMG-01'
expect_image['disk_format'] = 'vhd'
mocked_update.return_value = expect_image
test_shell.do_image_update(self.gc, args)
mocked_update.assert_called_once_with('pass',
['container_format'],
name='IMG-01',
disk_format='vhd')
utils.print_dict.assert_called_once_with({
'id': 'pass', 'name': 'IMG-01', 'disk_format': 'vhd'})
def test_do_explain(self):
input = {
'page_size': 18,
'id': 'pass',
'schemas': 'test',
'model': 'test',
}
args = self._make_args(input)
with mock.patch.object(utils, 'print_list'):
test_shell.do_explain(self.gc, args)
self.gc.schemas.get.assert_called_once_with('test')
def test_do_location_add(self):
gc = self.gc
loc = {'url': 'http://foo.com/', 'metadata': {'foo': 'bar'}}
args = self._make_args({'id': 'pass',
'url': loc['url'],
'metadata': json.dumps(loc['metadata'])})
with mock.patch.object(gc.images, 'add_location') as mocked_addloc:
expect_image = {'id': 'pass', 'locations': [loc]}
mocked_addloc.return_value = expect_image
test_shell.do_location_add(self.gc, args)
mocked_addloc.assert_called_once_with('pass',
loc['url'],
loc['metadata'])
utils.print_dict.assert_called_once_with(expect_image)
def test_do_location_delete(self):
gc = self.gc
loc_set = set(['http://foo/bar', 'http://spam/ham'])
args = self._make_args({'id': 'pass', 'url': loc_set})
with mock.patch.object(gc.images, 'delete_locations') as mocked_rmloc:
test_shell.do_location_delete(self.gc, args)
mocked_rmloc.assert_called_once_with('pass', loc_set)
def test_do_location_update(self):
gc = self.gc
loc = {'url': 'http://foo.com/', 'metadata': {'foo': 'bar'}}
args = self._make_args({'id': 'pass',
'url': loc['url'],
'metadata': json.dumps(loc['metadata'])})
with mock.patch.object(gc.images, 'update_location') as mocked_modloc:
expect_image = {'id': 'pass', 'locations': [loc]}
mocked_modloc.return_value = expect_image
test_shell.do_location_update(self.gc, args)
mocked_modloc.assert_called_once_with('pass',
loc['url'],
loc['metadata'])
utils.print_dict.assert_called_once_with(expect_image)
def test_image_upload(self):
args = self._make_args(
{'id': 'IMG-01', 'file': 'test', 'size': 1024, 'progress': False})
with mock.patch.object(self.gc.images, 'upload') as mocked_upload:
utils.get_data_file = mock.Mock(return_value='testfile')
mocked_upload.return_value = None
test_shell.do_image_upload(self.gc, args)
mocked_upload.assert_called_once_with('IMG-01', 'testfile', 1024)
def test_image_download(self):
args = self._make_args(
{'id': 'IMG-01', 'file': 'test', 'progress': True})
with mock.patch.object(self.gc.images, 'data') as mocked_data:
def _data():
for c in 'abcedf':
yield c
mocked_data.return_value = utils.IterableWithLength(_data(), 5)
test_shell.do_image_download(self.gc, args)
mocked_data.assert_called_once_with('IMG-01')
def test_do_image_delete(self):
args = self._make_args({'id': 'pass', 'file': 'test'})
with mock.patch.object(self.gc.images, 'delete') as mocked_delete:
mocked_delete.return_value = 0
test_shell.do_image_delete(self.gc, args)
mocked_delete.assert_called_once_with('pass')
def test_do_image_delete_deleted(self):
image_id = 'deleted-img'
args = self._make_args({'id': image_id})
with mock.patch.object(self.gc.images, 'get') as mocked_get:
mocked_get.return_value = self._make_args({'id': image_id,
'status': 'deleted'})
msg = "No image with an ID of '%s' exists." % image_id
self.assert_exits_with_msg(func=test_shell.do_image_delete,
func_args=args,
err_msg=msg)
def test_do_member_list(self):
args = self._make_args({'image_id': 'IMG-01'})
with mock.patch.object(self.gc.image_members, 'list') as mocked_list:
mocked_list.return_value = {}
test_shell.do_member_list(self.gc, args)
mocked_list.assert_called_once_with('IMG-01')
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list.assert_called_once_with({}, columns)
def test_do_member_create(self):
args = self._make_args({'image_id': 'IMG-01', 'member_id': 'MEM-01'})
with mock.patch.object(self.gc.image_members, 'create') as mock_create:
mock_create.return_value = {}
test_shell.do_member_create(self.gc, args)
mock_create.assert_called_once_with('IMG-01', 'MEM-01')
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list.assert_called_once_with([{}], columns)
def test_do_member_create_with_few_arguments(self):
args = self._make_args({'image_id': None, 'member_id': 'MEM-01'})
msg = 'Unable to create member. Specify image_id and member_id'
self.assert_exits_with_msg(func=test_shell.do_member_create,
func_args=args,
err_msg=msg)
def test_do_member_update(self):
input = {
'image_id': 'IMG-01',
'member_id': 'MEM-01',
'member_status': 'status',
}
args = self._make_args(input)
with mock.patch.object(self.gc.image_members, 'update') as mock_update:
mock_update.return_value = {}
test_shell.do_member_update(self.gc, args)
mock_update.assert_called_once_with('IMG-01', 'MEM-01', 'status')
columns = ['Image ID', 'Member ID', 'Status']
utils.print_list.assert_called_once_with([{}], columns)
def test_do_member_update_with_few_arguments(self):
input = {
'image_id': 'IMG-01',
'member_id': 'MEM-01',
'member_status': None,
}
args = self._make_args(input)
msg = 'Unable to update member. Specify image_id, member_id' \
' and member_status'
self.assert_exits_with_msg(func=test_shell.do_member_update,
func_args=args,
err_msg=msg)
def test_do_member_delete(self):
args = self._make_args({'image_id': 'IMG-01', 'member_id': 'MEM-01'})
with mock.patch.object(self.gc.image_members, 'delete') as mock_delete:
test_shell.do_member_delete(self.gc, args)
mock_delete.assert_called_once_with('IMG-01', 'MEM-01')
def test_do_member_delete_with_few_arguments(self):
args = self._make_args({'image_id': None, 'member_id': 'MEM-01'})
msg = 'Unable to delete member. Specify image_id and member_id'
self.assert_exits_with_msg(func=test_shell.do_member_delete,
func_args=args,
err_msg=msg)
def test_image_tag_update(self):
args = self._make_args({'image_id': 'IMG-01', 'tag_value': 'tag01'})
with mock.patch.object(self.gc.image_tags, 'update') as mocked_update:
self.gc.images.get = mock.Mock(return_value={})
mocked_update.return_value = None
test_shell.do_image_tag_update(self.gc, args)
mocked_update.assert_called_once_with('IMG-01', 'tag01')
def test_image_tag_update_with_few_arguments(self):
args = self._make_args({'image_id': None, 'tag_value': 'tag01'})
msg = 'Unable to update tag. Specify image_id and tag_value'
self.assert_exits_with_msg(func=test_shell.do_image_tag_update,
func_args=args,
err_msg=msg)
def test_image_tag_delete(self):
args = self._make_args({'image_id': 'IMG-01', 'tag_value': 'tag01'})
with mock.patch.object(self.gc.image_tags, 'delete') as mocked_delete:
mocked_delete.return_value = None
test_shell.do_image_tag_delete(self.gc, args)
mocked_delete.assert_called_once_with('IMG-01', 'tag01')
def test_image_tag_delete_with_few_arguments(self):
args = self._make_args({'image_id': 'IMG-01', 'tag_value': None})
msg = 'Unable to delete tag. Specify image_id and tag_value'
self.assert_exits_with_msg(func=test_shell.do_image_tag_delete,
func_args=args,
err_msg=msg)
def test_do_md_namespace_create(self):
args = self._make_args({'namespace': 'MyNamespace',
'protected': True})
with mock.patch.object(self.gc.metadefs_namespace,
'create') as mocked_create:
expect_namespace = {}
expect_namespace['namespace'] = 'MyNamespace'
expect_namespace['protected'] = True
mocked_create.return_value = expect_namespace
test_shell.do_md_namespace_create(self.gc, args)
mocked_create.assert_called_once_with(namespace='MyNamespace',
protected=True)
utils.print_dict.assert_called_once_with(expect_namespace)
def test_do_md_namespace_import(self):
args = self._make_args({'file': 'test'})
expect_namespace = {}
expect_namespace['namespace'] = 'MyNamespace'
expect_namespace['protected'] = True
with mock.patch.object(self.gc.metadefs_namespace,
'create') as mocked_create:
mock_read = mock.Mock(return_value=json.dumps(expect_namespace))
mock_file = mock.Mock(read=mock_read)
utils.get_data_file = mock.Mock(return_value=mock_file)
mocked_create.return_value = expect_namespace
test_shell.do_md_namespace_import(self.gc, args)
mocked_create.assert_called_once_with(**expect_namespace)
utils.print_dict.assert_called_once_with(expect_namespace)
def test_do_md_namespace_import_invalid_json(self):
args = self._make_args({'file': 'test'})
mock_read = mock.Mock(return_value='Invalid')
mock_file = mock.Mock(read=mock_read)
utils.get_data_file = mock.Mock(return_value=mock_file)
self.assertRaises(SystemExit, test_shell.do_md_namespace_import,
self.gc, args)
def test_do_md_namespace_import_no_input(self):
args = self._make_args({'file': None})
utils.get_data_file = mock.Mock(return_value=None)
self.assertRaises(SystemExit, test_shell.do_md_namespace_import,
self.gc, args)
def test_do_md_namespace_update(self):
args = self._make_args({'id': 'MyNamespace',
'protected': True})
with mock.patch.object(self.gc.metadefs_namespace,
'update') as mocked_update:
expect_namespace = {}
expect_namespace['namespace'] = 'MyNamespace'
expect_namespace['protected'] = True
mocked_update.return_value = expect_namespace
test_shell.do_md_namespace_update(self.gc, args)
mocked_update.assert_called_once_with('MyNamespace',
id='MyNamespace',
protected=True)
utils.print_dict.assert_called_once_with(expect_namespace)
def test_do_md_namespace_show(self):
args = self._make_args({'namespace': 'MyNamespace',
'max_column_width': 80,
'resource_type': None})
with mock.patch.object(self.gc.metadefs_namespace,
'get') as mocked_get:
expect_namespace = {}
expect_namespace['namespace'] = 'MyNamespace'
mocked_get.return_value = expect_namespace
test_shell.do_md_namespace_show(self.gc, args)
mocked_get.assert_called_once_with('MyNamespace')
utils.print_dict.assert_called_once_with(expect_namespace, 80)
def test_do_md_namespace_show_resource_type(self):
args = self._make_args({'namespace': 'MyNamespace',
'max_column_width': 80,
'resource_type': 'RESOURCE'})
with mock.patch.object(self.gc.metadefs_namespace,
'get') as mocked_get:
expect_namespace = {}
expect_namespace['namespace'] = 'MyNamespace'
mocked_get.return_value = expect_namespace
test_shell.do_md_namespace_show(self.gc, args)
mocked_get.assert_called_once_with('MyNamespace',
resource_type='RESOURCE')
utils.print_dict.assert_called_once_with(expect_namespace, 80)
def test_do_md_namespace_list(self):
args = self._make_args({'resource_type': None,
'visibility': None,
'page_size': None})
with mock.patch.object(self.gc.metadefs_namespace,
'list') as mocked_list:
expect_namespaces = [{'namespace': 'MyNamespace'}]
mocked_list.return_value = expect_namespaces
test_shell.do_md_namespace_list(self.gc, args)
mocked_list.assert_called_once_with(filters={})
utils.print_list.assert_called_once_with(expect_namespaces,
['namespace'])
def test_do_md_namespace_list_page_size(self):
args = self._make_args({'resource_type': None,
'visibility': None,
'page_size': 2})
with mock.patch.object(self.gc.metadefs_namespace,
'list') as mocked_list:
expect_namespaces = [{'namespace': 'MyNamespace'}]
mocked_list.return_value = expect_namespaces
test_shell.do_md_namespace_list(self.gc, args)
mocked_list.assert_called_once_with(filters={}, page_size=2)
utils.print_list.assert_called_once_with(expect_namespaces,
['namespace'])
def test_do_md_namespace_list_one_filter(self):
args = self._make_args({'resource_types': ['OS::Compute::Aggregate'],
'visibility': None,
'page_size': None})
with mock.patch.object(self.gc.metadefs_namespace, 'list') as \
mocked_list:
expect_namespaces = [{'namespace': 'MyNamespace'}]
mocked_list.return_value = expect_namespaces
test_shell.do_md_namespace_list(self.gc, args)
mocked_list.assert_called_once_with(filters={
'resource_types': ['OS::Compute::Aggregate']})
utils.print_list.assert_called_once_with(expect_namespaces,
['namespace'])
def test_do_md_namespace_list_all_filters(self):
args = self._make_args({'resource_types': ['OS::Compute::Aggregate'],
'visibility': 'public',
'page_size': None})
with mock.patch.object(self.gc.metadefs_namespace,
'list') as mocked_list:
expect_namespaces = [{'namespace': 'MyNamespace'}]
mocked_list.return_value = expect_namespaces
test_shell.do_md_namespace_list(self.gc, args)
mocked_list.assert_called_once_with(filters={
'resource_types': ['OS::Compute::Aggregate'],
'visibility': 'public'})
utils.print_list.assert_called_once_with(expect_namespaces,
['namespace'])
def test_do_md_namespace_list_unknown_filter(self):
args = self._make_args({'resource_type': None,
'visibility': None,
'some_arg': 'some_value',
'page_size': None})
with mock.patch.object(self.gc.metadefs_namespace,
'list') as mocked_list:
expect_namespaces = [{'namespace': 'MyNamespace'}]
mocked_list.return_value = expect_namespaces
test_shell.do_md_namespace_list(self.gc, args)
mocked_list.assert_called_once_with(filters={})
utils.print_list.assert_called_once_with(expect_namespaces,
['namespace'])
def test_do_md_namespace_delete(self):
args = self._make_args({'namespace': 'MyNamespace',
'content': False})
with mock.patch.object(self.gc.metadefs_namespace, 'delete') as \
mocked_delete:
test_shell.do_md_namespace_delete(self.gc, args)
mocked_delete.assert_called_once_with('MyNamespace')
def test_do_md_resource_type_associate(self):
args = self._make_args({'namespace': 'MyNamespace',
'name': 'MyResourceType',
'prefix': 'PREFIX:'})
with mock.patch.object(self.gc.metadefs_resource_type,
'associate') as mocked_associate:
expect_rt = {}
expect_rt['namespace'] = 'MyNamespace'
expect_rt['name'] = 'MyResourceType'
expect_rt['prefix'] = 'PREFIX:'
mocked_associate.return_value = expect_rt
test_shell.do_md_resource_type_associate(self.gc, args)
mocked_associate.assert_called_once_with('MyNamespace',
**expect_rt)
utils.print_dict.assert_called_once_with(expect_rt)
def test_do_md_resource_type_deassociate(self):
args = self._make_args({'namespace': 'MyNamespace',
'resource_type': 'MyResourceType'})
with mock.patch.object(self.gc.metadefs_resource_type,
'deassociate') as mocked_deassociate:
test_shell.do_md_resource_type_deassociate(self.gc, args)
mocked_deassociate.assert_called_once_with('MyNamespace',
'MyResourceType')
def test_do_md_resource_type_list(self):
args = self._make_args({})
with mock.patch.object(self.gc.metadefs_resource_type,
'list') as mocked_list:
expect_objects = ['MyResourceType1', 'MyResourceType2']
mocked_list.return_value = expect_objects
test_shell.do_md_resource_type_list(self.gc, args)
self.assertEqual(1, mocked_list.call_count)
def test_do_md_namespace_resource_type_list(self):
args = self._make_args({'namespace': 'MyNamespace'})
with mock.patch.object(self.gc.metadefs_resource_type,
'get') as mocked_get:
expect_objects = [{'namespace': 'MyNamespace',
'object': 'MyObject'}]
mocked_get.return_value = expect_objects
test_shell.do_md_namespace_resource_type_list(self.gc, args)
mocked_get.assert_called_once_with('MyNamespace')
utils.print_list.assert_called_once_with(expect_objects,
['name', 'prefix',
'properties_target'])
def test_do_md_property_create(self):
args = self._make_args({'namespace': 'MyNamespace',
'name': "MyProperty",
'title': "Title",
'schema': '{}'})
with mock.patch.object(self.gc.metadefs_property,
'create') as mocked_create:
expect_property = {}
expect_property['namespace'] = 'MyNamespace'
expect_property['name'] = 'MyProperty'
expect_property['title'] = 'Title'
mocked_create.return_value = expect_property
test_shell.do_md_property_create(self.gc, args)
mocked_create.assert_called_once_with('MyNamespace',
name='MyProperty',
title='Title')
utils.print_dict.assert_called_once_with(expect_property)
def test_do_md_property_create_invalid_schema(self):
args = self._make_args({'namespace': 'MyNamespace',
'name': "MyProperty",
'title': "Title",
'schema': 'Invalid'})
self.assertRaises(SystemExit, test_shell.do_md_property_create,
self.gc, args)
def test_do_md_property_update(self):
args = self._make_args({'namespace': 'MyNamespace',
'property': 'MyProperty',
'name': 'NewName',
'title': "Title",
'schema': '{}'})
with mock.patch.object(self.gc.metadefs_property,
'update') as mocked_update:
expect_property = {}
expect_property['namespace'] = 'MyNamespace'
expect_property['name'] = 'MyProperty'
expect_property['title'] = 'Title'
mocked_update.return_value = expect_property
test_shell.do_md_property_update(self.gc, args)
mocked_update.assert_called_once_with('MyNamespace', 'MyProperty',
name='NewName',
title='Title')
utils.print_dict.assert_called_once_with(expect_property)
def test_do_md_property_update_invalid_schema(self):
args = self._make_args({'namespace': 'MyNamespace',
'property': 'MyProperty',
'name': "MyObject",
'title': "Title",
'schema': 'Invalid'})
self.assertRaises(SystemExit, test_shell.do_md_property_update,
self.gc, args)
def test_do_md_property_show(self):
args = self._make_args({'namespace': 'MyNamespace',
'property': 'MyProperty',
'max_column_width': 80})
with mock.patch.object(self.gc.metadefs_property, 'get') as mocked_get:
expect_property = {}
expect_property['namespace'] = 'MyNamespace'
expect_property['property'] = 'MyProperty'
expect_property['title'] = 'Title'
mocked_get.return_value = expect_property
test_shell.do_md_property_show(self.gc, args)
mocked_get.assert_called_once_with('MyNamespace', 'MyProperty')
utils.print_dict.assert_called_once_with(expect_property, 80)
def test_do_md_property_delete(self):
args = self._make_args({'namespace': 'MyNamespace',
'property': 'MyProperty'})
with mock.patch.object(self.gc.metadefs_property,
'delete') as mocked_delete:
test_shell.do_md_property_delete(self.gc, args)
mocked_delete.assert_called_once_with('MyNamespace', 'MyProperty')
def test_do_md_namespace_property_delete(self):
args = self._make_args({'namespace': 'MyNamespace'})
with mock.patch.object(self.gc.metadefs_property,
'delete_all') as mocked_delete_all:
test_shell.do_md_namespace_properties_delete(self.gc, args)
mocked_delete_all.assert_called_once_with('MyNamespace')
def test_do_md_property_list(self):
args = self._make_args({'namespace': 'MyNamespace'})
with mock.patch.object(self.gc.metadefs_property,
'list') as mocked_list:
expect_objects = [{'namespace': 'MyNamespace',
'property': 'MyProperty',
'title': 'MyTitle'}]
mocked_list.return_value = expect_objects
test_shell.do_md_property_list(self.gc, args)
mocked_list.assert_called_once_with('MyNamespace')
utils.print_list.assert_called_once_with(expect_objects,
['name', 'title', 'type'])
def test_do_md_object_create(self):
args = self._make_args({'namespace': 'MyNamespace',
'name': "MyObject",
'schema': '{}'})
with mock.patch.object(self.gc.metadefs_object,
'create') as mocked_create:
expect_object = {}
expect_object['namespace'] = 'MyNamespace'
expect_object['name'] = 'MyObject'
mocked_create.return_value = expect_object
test_shell.do_md_object_create(self.gc, args)
mocked_create.assert_called_once_with('MyNamespace',
name='MyObject')
utils.print_dict.assert_called_once_with(expect_object)
def test_do_md_object_create_invalid_schema(self):
args = self._make_args({'namespace': 'MyNamespace',
'name': "MyObject",
'schema': 'Invalid'})
self.assertRaises(SystemExit, test_shell.do_md_object_create,
self.gc, args)
def test_do_md_object_update(self):
args = self._make_args({'namespace': 'MyNamespace',
'object': 'MyObject',
'name': 'NewName',
'schema': '{}'})
with mock.patch.object(self.gc.metadefs_object,
'update') as mocked_update:
expect_object = {}
expect_object['namespace'] = 'MyNamespace'
expect_object['name'] = 'MyObject'
mocked_update.return_value = expect_object
test_shell.do_md_object_update(self.gc, args)
mocked_update.assert_called_once_with('MyNamespace', 'MyObject',
name='NewName')
utils.print_dict.assert_called_once_with(expect_object)
def test_do_md_object_update_invalid_schema(self):
args = self._make_args({'namespace': 'MyNamespace',
'object': 'MyObject',
'name': "MyObject",
'schema': 'Invalid'})
self.assertRaises(SystemExit, test_shell.do_md_object_update,
self.gc, args)
def test_do_md_object_show(self):
args = self._make_args({'namespace': 'MyNamespace',
'object': 'MyObject',
'max_column_width': 80})
with mock.patch.object(self.gc.metadefs_object, 'get') as mocked_get:
expect_object = {}
expect_object['namespace'] = 'MyNamespace'
expect_object['object'] = 'MyObject'
mocked_get.return_value = expect_object
test_shell.do_md_object_show(self.gc, args)
mocked_get.assert_called_once_with('MyNamespace', 'MyObject')
utils.print_dict.assert_called_once_with(expect_object, 80)
def test_do_md_object_property_show(self):
args = self._make_args({'namespace': 'MyNamespace',
'object': 'MyObject',
'property': 'MyProperty',
'max_column_width': 80})
with mock.patch.object(self.gc.metadefs_object, 'get') as mocked_get:
expect_object = {'name': 'MyObject',
'properties': {
'MyProperty': {'type': 'string'}
}}
mocked_get.return_value = expect_object
test_shell.do_md_object_property_show(self.gc, args)
mocked_get.assert_called_once_with('MyNamespace', 'MyObject')
utils.print_dict.assert_called_once_with({'type': 'string',
'name': 'MyProperty'},
80)
def test_do_md_object_property_show_non_existing(self):
args = self._make_args({'namespace': 'MyNamespace',
'object': 'MyObject',
'property': 'MyProperty',
'max_column_width': 80})
with mock.patch.object(self.gc.metadefs_object, 'get') as mocked_get:
expect_object = {'name': 'MyObject', 'properties': {}}
mocked_get.return_value = expect_object
self.assertRaises(SystemExit,
test_shell.do_md_object_property_show,
self.gc, args)
mocked_get.assert_called_once_with('MyNamespace', 'MyObject')
def test_do_md_object_delete(self):
args = self._make_args({'namespace': 'MyNamespace',
'object': 'MyObject'})
with mock.patch.object(self.gc.metadefs_object,
'delete') as mocked_delete:
test_shell.do_md_object_delete(self.gc, args)
mocked_delete.assert_called_once_with('MyNamespace', 'MyObject')
def test_do_md_namespace_objects_delete(self):
args = self._make_args({'namespace': 'MyNamespace'})
with mock.patch.object(self.gc.metadefs_object,
'delete_all') as mocked_delete_all:
test_shell.do_md_namespace_objects_delete(self.gc, args)
mocked_delete_all.assert_called_once_with('MyNamespace')
def test_do_md_object_list(self):
args = self._make_args({'namespace': 'MyNamespace'})
with mock.patch.object(self.gc.metadefs_object, 'list') as mocked_list:
expect_objects = [{'namespace': 'MyNamespace',
'object': 'MyObject'}]
mocked_list.return_value = expect_objects
test_shell.do_md_object_list(self.gc, args)
mocked_list.assert_called_once_with('MyNamespace')
utils.print_list.assert_called_once_with(
expect_objects,
['name', 'description'],
field_settings={
'description': {'align': 'l', 'max_width': 50}})
| {
"content_hash": "ccb978f09d8818b7725b5f2cdd483eab",
"timestamp": "",
"source": "github",
"line_count": 1083,
"max_line_length": 79,
"avg_line_length": 43.43582640812558,
"alnum_prop": 0.5021364341744435,
"repo_name": "mmasaki/python-glanceclient",
"id": "d5868055c1ead07b807826581f1aa1277b13fc13",
"size": "47709",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "glanceclient/tests/unit/v2/test_shell_v2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "576366"
},
{
"name": "Shell",
"bytes": "3174"
}
],
"symlink_target": ""
} |
from numpy import *
import theano
import theano.tensor as T
import theano.typed_list as tl
import theano.sparse as sparse
from scipy.misc import logsumexp
from scipy.optimize import fmin_ncg
import scipy.sparse as sp
import time
"""
Explore how to implement custom theano ops.
For now Python only
"""
class MySoftmax(theano.gof.Op):
__props__ = ()
def make_node(self, Eta):
return theano.Apply(self, [Eta], [Eta.type(), Eta.type()])
def perform(self, node, input_storage, output_storage):
print "perform"
Eta = input_storage[0]
lNorm = logsumexp(Eta, axis=1).reshape(N,1)
lP = Eta - lNorm
output_storage[0][0] = exp(lP)
output_storage[1][0] = exp(lP)
def grad(self, inputs, g):
print "gradnick"
Eta = input_storage[0]
mysoftmax = MySoftmax()
random.seed(1)
K = 5 #nClasses
N = 6 #nSamples
D = 3 #nFeatures
#single precision for now
theano.config.floatX = 'float32'
theano.config.scan.allow_gc = False
#setup toy example
W = random.randn(D,K)
X = random.randn(N,D)
Eta = dot(X,W)
lNorm = logsumexp(Eta, axis=1).reshape(N,1)
lP = Eta - lNorm
#take one sample from a multinomial distribution specified by a row of lP
_,y = apply_along_axis(lambda row: random.multinomial(1, exp(row)), axis=1, arr=lP).nonzero()
W = W.astype(float32)
X = X.astype(float32)
y = y.astype(int32)
#setup theano
tW = T.matrix('W')
tX = T.matrix('X')
ty = T.ivector('y')
tlambda = T.scalar('lambda')
#symbolic representation
tEta = T.dot(tX, tW)
Eta = Eta.astype(float32)
b = theano.function([tEta], mysoftmax(tEta))
c = b(Eta)
tP = T.nnet.softmax(tEta)
tP2 = mysoftmax(tEta)
hey = theano.function( [tX, tW], tP2)
terror = T.nnet.categorical_crossentropy(tP, ty).mean() #+ tlambda * tW.norm(2)**2 # we could add some Tikhonov regularization
terr2 = T.nnet.categorical_crossentropy(tP2[0], ty).mean()
tgrad = T.grad(terror, tW)
#f = theano.function([tW, tX, ty, tlambda], terror)
#g = theano.function([tW, tX, ty, tlambda], tgrad)
| {
"content_hash": "632e731a50f5ede7a5bbd298bcaa1deb",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 126,
"avg_line_length": 26.064935064935064,
"alnum_prop": 0.672645739910314,
"repo_name": "yjk21/theano-hsm",
"id": "f8726fcbdda1ef9c008edc9a5c67af19eec33ecb",
"size": "2007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "optest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14607"
}
],
"symlink_target": ""
} |
from bingads.v13.bulk.entities import *
from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V13
from bingads.v13.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity
from bingads.v13.internal.bulk.mappings import _SimpleBulkMapping
from bingads.v13.internal.bulk.string_table import _StringTable
from bingads.v13.internal.extensions import *
class BulkCampaignLocationIntentCriterion(_SingleRecordBulkEntity):
""" Represents an Campaign Location Intent Criterion that can be read or written in a bulk file.
This class exposes the :attr:`biddable_campaign_criterion` property that can be read and written as fields of the
Campaign Location Intent Criterion record in a bulk file.
For more information, see Campaign Location Intent Criterion at https://go.microsoft.com/fwlink/?linkid=846127.
*See also:*
* :class:`.BulkServiceManager`
* :class:`.BulkOperation`
* :class:`.BulkFileReader`
* :class:`.BulkFileWriter`
"""
def __init__(self,
biddable_campaign_criterion=None,
campaign_name=None, ):
super(BulkCampaignLocationIntentCriterion, self).__init__()
self._biddable_campaign_criterion = biddable_campaign_criterion
self._campaign_name = campaign_name
_MAPPINGS = [
_SimpleBulkMapping(
_StringTable.Status,
field_to_csv=lambda c: bulk_str(c.biddable_campaign_criterion.Status),
csv_to_field=lambda c, v: setattr(c.biddable_campaign_criterion, 'Status', v if v else None)
),
_SimpleBulkMapping(
_StringTable.Id,
field_to_csv=lambda c: bulk_str(c.biddable_campaign_criterion.Id),
csv_to_field=lambda c, v: setattr(c.biddable_campaign_criterion, 'Id', int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.ParentId,
field_to_csv=lambda c: bulk_str(c.biddable_campaign_criterion.CampaignId),
csv_to_field=lambda c, v: setattr(c.biddable_campaign_criterion, 'CampaignId', int(v) if v else None)
),
_SimpleBulkMapping(
_StringTable.Campaign,
field_to_csv=lambda c: c.campaign_name,
csv_to_field=lambda c, v: setattr(c, 'campaign_name', v)
),
_SimpleBulkMapping(
_StringTable.Target,
field_to_csv=lambda c: field_to_csv_LocationIntentTarget(c.biddable_campaign_criterion),
csv_to_field=lambda c, v: csv_to_field_LocationIntentTarget(c.biddable_campaign_criterion, v)
),
]
@property
def biddable_campaign_criterion(self):
""" Defines a Campaign Criterion """
return self._biddable_campaign_criterion
@biddable_campaign_criterion.setter
def biddable_campaign_criterion(self, biddable_campaign_criterion):
self._biddable_campaign_criterion = biddable_campaign_criterion
@property
def campaign_name(self):
""" The name of the Campaign
:rtype: str
"""
return self._campaign_name
@campaign_name.setter
def campaign_name(self, campaign_name):
self._campaign_name = campaign_name
def process_mappings_to_row_values(self, row_values, exclude_readonly_data):
self._validate_property_not_null(self.biddable_campaign_criterion, 'biddable_campaign_criterion')
self.convert_to_values(row_values, BulkCampaignLocationIntentCriterion._MAPPINGS)
def process_mappings_from_row_values(self, row_values):
self._biddable_campaign_criterion = _CAMPAIGN_OBJECT_FACTORY_V13.create('BiddableCampaignCriterion')
self._biddable_campaign_criterion.Type = 'BiddableCampaignCriterion'
self._biddable_campaign_criterion.Criterion = _CAMPAIGN_OBJECT_FACTORY_V13.create('LocationIntentCriterion')
self._biddable_campaign_criterion.Criterion.Type = 'LocationIntentCriterion'
row_values.convert_to_entity(self, BulkCampaignLocationIntentCriterion._MAPPINGS)
def read_additional_data(self, stream_reader):
super(BulkCampaignLocationIntentCriterion, self).read_additional_data(stream_reader)
| {
"content_hash": "61da2b70b6335438c67f2dc79c9d5b29",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 117,
"avg_line_length": 43.68421052631579,
"alnum_prop": 0.6913253012048193,
"repo_name": "bing-ads-sdk/BingAds-Python-SDK",
"id": "483455318348a35df7f18d33e94f9f5c0ca92c4f",
"size": "4150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bingads/v13/bulk/entities/target_criterions/bulk_campaign_location_intent_criterion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "947470"
}
],
"symlink_target": ""
} |
__author__ = 'Tom Schaul, tom@idsia.ch and Daan Wiertra, daan@idsia.ch'
from scipy import zeros, array, mean, randn, exp, dot, argmax
from pybrain.datasets import ReinforcementDataSet, ImportanceDataSet, SequentialDataSet
from pybrain.supervised import BackpropTrainer
from pybrain.utilities import drawIndex
from pybrain.rl.learners.directsearch.directsearch import DirectSearchLearner
# TODO: greedy runs: start once in every possible starting state!
# TODO: supervised: train-set, test-set, early stopping -> actual convergence!
class RWR(DirectSearchLearner):
""" Reward-weighted regression.
The algorithm is currently limited to discrete-action episodic tasks, subclasses of POMDPTasks.
"""
# parameters
batchSize = 20
# feedback settings
verbose = True
greedyRuns = 20
supervisedPlotting = False
# settings for the supervised training
learningRate = 0.005
momentum = 0.9
maxEpochs = 20
validationProportion = 0.33
continueEpochs = 2
# parameters for the variation that uses a value function
# TODO: split into 2 classes.
valueLearningRate = None
valueMomentum = None
#valueTrainEpochs = 5
resetAllWeights = False
netweights = 0.01
def __init__(self, net, task, valueNetwork=None, **args):
self.net = net
self.task = task
self.setArgs(**args)
if self.valueLearningRate == None:
self.valueLearningRate = self.learningRate
if self.valueMomentum == None:
self.valueMomentum = self.momentum
if self.supervisedPlotting:
from pylab import ion
ion()
# adaptive temperature:
self.tau = 1.
# prepare the datasets to be used
self.weightedDs = ImportanceDataSet(self.task.outdim, self.task.indim)
self.rawDs = ReinforcementDataSet(self.task.outdim, self.task.indim)
self.valueDs = SequentialDataSet(self.task.outdim, 1)
# prepare the supervised trainers
self.bp = BackpropTrainer(self.net, self.weightedDs, self.learningRate,
self.momentum, verbose=False,
batchlearning=True)
# CHECKME: outsource
self.vnet = valueNetwork
if valueNetwork != None:
self.vbp = BackpropTrainer(self.vnet, self.valueDs, self.valueLearningRate,
self.valueMomentum, verbose=self.verbose)
# keep information:
self.totalSteps = 0
self.totalEpisodes = 0
def shapingFunction(self, R):
return exp(self.tau * R)
def updateTau(self, R, U):
self.tau = sum(U) / dot((R - self.task.minReward), U)
def reset(self):
self.weightedDs.clear()
self.valueDs.clear()
self.rawDs.clear()
self.bp.momentumvector *= 0.0
if self.vnet != None:
self.vbp.momentumvector *= 0.0
if self.resetAllWeights:
self.vnet.params[:] = randn(len(self.vnet.params)) * self.netweights
def greedyEpisode(self):
""" run one episode with greedy decisions, return the list of rewards recieved."""
rewards = []
self.task.reset()
self.net.reset()
while not self.task.isFinished():
obs = self.task.getObservation()
act = self.net.activate(obs)
chosen = argmax(act)
self.task.performAction(chosen)
reward = self.task.getReward()
rewards.append(reward)
return rewards
def learn(self, batches):
self.greedyAvg = []
self.rewardAvg = []
self.lengthAvg = []
self.initr0Avg = []
for b in range(batches):
if self.verbose:
print
print 'Batch', b + 1
self.reset()
self.learnOneBatch()
self.totalEpisodes += self.batchSize
# greedy measure (avg over some greedy runs)
rws = 0.
for dummy in range(self.greedyRuns):
tmp = self.greedyEpisode()
rws += (sum(tmp) / float(len(tmp)))
self.greedyAvg.append(rws / self.greedyRuns)
if self.verbose:
print '::', round(rws / self.greedyRuns, 5), '::'
def learnOneBatch(self):
# collect a batch of runs as experience
r0s = []
lens = []
avgReward = 0.
for dummy in range(self.batchSize):
self.rawDs.newSequence()
self.valueDs.newSequence()
self.task.reset()
self.net.reset()
acts, obss, rewards = [], [], []
while not self.task.isFinished():
obs = self.task.getObservation()
act = self.net.activate(obs)
chosen = drawIndex(act)
self.task.performAction(chosen)
reward = self.task.getReward()
obss.append(obs)
y = zeros(len(act))
y[chosen] = 1
acts.append(y)
rewards.append(reward)
avgReward += sum(rewards) / float(len(rewards))
# compute the returns from the list of rewards
current = 0
returns = []
for r in reversed(rewards):
current *= self.task.discount
current += r
returns.append(current)
returns.reverse()
for i in range(len(obss)):
self.rawDs.addSample(obss[i], acts[i], returns[i])
self.valueDs.addSample(obss[i], returns[i])
r0s.append(returns[0])
lens.append(len(returns))
r0s = array(r0s)
self.totalSteps += sum(lens)
avgLen = sum(lens) / float(self.batchSize)
avgR0 = mean(r0s)
avgReward /= self.batchSize
if self.verbose:
print '***', round(avgLen, 3), '***', '(avg init exp. return:', round(avgR0, 5), ')',
print 'avg reward', round(avgReward, 5), '(tau:', round(self.tau, 3), ')'
print lens
# storage:
self.rewardAvg.append(avgReward)
self.lengthAvg.append(avgLen)
self.initr0Avg.append(avgR0)
# if self.vnet == None:
# # case 1: no value estimator:
# prepare the dataset for training the acting network
shaped = self.shapingFunction(r0s)
self.updateTau(r0s, shaped)
shaped /= max(shaped)
for i, seq in enumerate(self.rawDs):
self.weightedDs.newSequence()
for sample in seq:
obs, act, dummy = sample
self.weightedDs.addSample(obs, act, shaped[i])
# else:
# # case 2: value estimator:
#
#
# # train the value estimating network
# if self.verbose: print 'Old value error: ', self.vbp.testOnData()
# self.vbp.trainEpochs(self.valueTrainEpochs)
# if self.verbose: print 'New value error: ', self.vbp.testOnData()
#
# # produce the values and analyze
# rminusvs = []
# sizes = []
# for i, seq in enumerate(self.valueDs):
# self.vnet.reset()
# seq = list(seq)
# for sample in seq:
# obs, ret = sample
# val = self.vnet.activate(obs)
# rminusvs.append(ret-val)
# sizes.append(len(seq))
#
# rminusvs = array(rminusvs)
# shapedRminusv = self.shapingFunction(rminusvs)
# # CHECKME: here?
# self.updateTau(rminusvs, shapedRminusv)
# shapedRminusv /= array(sizes)
# shapedRminusv /= max(shapedRminusv)
#
# # prepare the dataset for training the acting network
# rvindex = 0
# for i, seq in enumerate(self.rawDs):
# self.weightedDs.newSequence()
# self.vnet.reset()
# for sample in seq:
# obs, act, ret = sample
# self.weightedDs.addSample(obs, act, shapedRminusv[rvindex])
# rvindex += 1
# train the acting network
tmp1, tmp2 = self.bp.trainUntilConvergence(maxEpochs=self.maxEpochs,
validationProportion=self.validationProportion,
continueEpochs=self.continueEpochs,
verbose=self.verbose)
if self.supervisedPlotting:
from pylab import plot, legend, figure, clf, draw
figure(1)
clf()
plot(tmp1, label='train')
plot(tmp2, label='valid')
legend()
draw()
return avgLen, avgR0
| {
"content_hash": "a5734508f5251d16181e5e4c837cc608",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 99,
"avg_line_length": 36.88844621513944,
"alnum_prop": 0.5258667242682795,
"repo_name": "rbalda/neural_ocr",
"id": "9bf200924e774ac46f783861cf1d549e47b9a7b3",
"size": "9259",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "env/lib/python2.7/site-packages/pybrain/rl/learners/directsearch/rwr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "497604"
},
{
"name": "C++",
"bytes": "3309990"
},
{
"name": "CSS",
"bytes": "135235"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "HTML",
"bytes": "215390"
},
{
"name": "JavaScript",
"bytes": "206780"
},
{
"name": "Jupyter Notebook",
"bytes": "16254"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "26980034"
},
{
"name": "Shell",
"bytes": "3895"
}
],
"symlink_target": ""
} |
import time
from os import environ
from twisted.internet import reactor
from twisted.internet.defer import DeferredList
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component using the time service.
"""
def onJoin(self, details):
print("session attached")
def got(res, started, msg):
duration = 1000. * (time.clock() - started)
print("{}: {} in {}".format(msg, res, duration))
t1 = time.clock()
d1 = self.call('com.math.slowsquare', 3)
d1.addCallback(got, t1, "Slow Square")
t2 = time.clock()
d2 = self.call('com.math.square', 3)
d2.addCallback(got, t2, "Quick Square")
def done(_):
print("All finished.")
self.leave()
DeferredList([d1, d2]).addBoth(done)
def onDisconnect(self):
print("disconnected")
reactor.stop()
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", "ws://localhost:8080/ws"),
u"crossbardemo",
debug_wamp=False, # optional; log many WAMP details
debug=False, # optional; log even more details
)
runner.run(Component)
| {
"content_hash": "c2f551f34aadb61ac0beb2362413dd0a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 71,
"avg_line_length": 26.6875,
"alnum_prop": 0.6057767369242779,
"repo_name": "dash-dash/AutobahnPython",
"id": "47042523aab52d11c6fb38a1d22969612c46ed03",
"size": "2558",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/twisted/wamp/rpc/slowsquare/frontend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "2711"
},
{
"name": "HTML",
"bytes": "86275"
},
{
"name": "JavaScript",
"bytes": "104724"
},
{
"name": "Makefile",
"bytes": "4770"
},
{
"name": "Python",
"bytes": "1304866"
},
{
"name": "Shell",
"bytes": "158"
}
],
"symlink_target": ""
} |
from mysql_sqlalchemy_class import *
from sqlalchemy import Column, Integer, String, ForeignKey
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import backref, mapper, relation, sessionmaker
from elixir import *
from sqlalchemy import *
from AnagraficaDb import *
class Odict(dict):
"""
implementa un' ordered dict estendendo la classe dict
crea una lista che memorizza le chiavi inserite nel dict con lo stesso ordine di inserimento
"""
def __init__(self):
self.order=[]
def clear(self):
"""
override del metodo clear
"""
dict.clear(self)
self.order=[]
def __setitem__(self, key, value):
"""
override di __setitem__
oltre alle normali operazioni di __setitem__ inserisce le chiavi in self.order
@param key: come in dict
@param value:come in dict
"""
#aggiungo le chiavi nell'ordine di inserimento
self.order.append(key)
dict.__setitem__(self, key, value)
def iterkeys(self):
"""
override del metodo iterkeys invece dell'iteratore di dict ritorna la lista delle chiavi
"""
return self.order
class Entity2Export():
"""
rappresenta le entita' che possono essere selezionate
per l'estrazione dell'anagrafica
"""
def initDB(self,dbfile):
"""
@param string:stringa di connessione al db, con il seguente formato di stringa "user:pwd@host/nomedb" esempio root:xxxxx@localhost/parafarmacie
inizializza il db
"""
metadata.bind = "mysql://%s"%dbfile
metadata.bind.echo = False
setup_all()
def fetchPv(self,Id):
"""
recupera il pv relativo allo Id memorizzato in self.pv_id
@param int:
@return
"""
return self.db.fetchPv(Id)
def setDb(self,db):
self.db=db
def populateFetchers(self,populate):
"""
popola il dict dei fetchers dei dati
"""
# instanzio una funzione lambda che ritorna l'attributo valore dell'oggetto sqlalchemy, se per il pv esiste il potenziale MMAS piuttosto che una stringa vuota se non esiste
l=lambda x:x.valore if type(x)!=type("") else ""
l1=lambda x,y:x if y==True else None
self.fetchers['codice MMAS']=l1(self.pv.codice,populate)
self.fetchers['potenziale MMAS']=l1(l(self.db.getPotentialValueById4Extraction(self.pv_id)),populate)
self.fetchers['ragione sociale']=l1(self.pv.ragione_sociale,populate)
self.fetchers['titolare']=l1(self.pv.titolare,populate)
self.fetchers['codFisc_P.IVA']=l1(self.pv.cf_pi,populate)
self.fetchers['cod cliente']=l1(self.pv.cod_cliente,populate)
self.fetchers['indirizzo']=l1(self.pv.indirizzo,populate)
self.fetchers['comune']=l1(self.pv.comune,populate)
self.fetchers['provincia']=l1(self.pv.provincia,populate)
self.fetchers['cap']=l1(self.pv.cap,populate)
self.fetchers['telefono']=l1(self.pv.tel1,populate)
self.fetchers['fax']=l1(self.pv.fax,populate)
self.fetchers['sito']=l1(self.pv.sito,populate)
self.fetchers['email']=l1(self.pv.email,populate)
self.fetchers['cliente']=l1(self.pv.cliente,populate)
self.fetchers['certificato']=l1(self.pv.certificato,populate)
self.fetchers['codice istat']=l1(self.pv.tc_istat_id,populate)
def setId(self,Id,populate=True):
"""
setter di self.pv_id
contestualmente allo id deve essere aggiornato self.pv e self.fetchers
"""
self.pv_id=Id
#aggiorno self.pv
self.pv=self.fetchPv(Id)
self.populateFetchers(populate)
def getAvailableEntity(self):
"""
ritorna la lista delle entità referenziabili
in pratica è la lista delle chiavi di self.fetchers
@return: [string]
"""
l=[]
for i in self.fetchers.iterkeys():
l.append(i)
return l
def setHeader(self, h):
"""
setter di self.header, questo attributo setta la classe, nel senso che
getValue ritorna il valore relativo all attributo di Pv che nel dict self.fetcher
è distinto dalla chiave corrispondente a self.header
@param string:uno degli attributi della tabella pv
"""
self.header=h
def getHeaders(self,subfix=True):
"""
ritorna gli headers per l'estrazione delle enagrafiche
@note: se l'entita' non necessita di configurazione ritorna self.header, valore che viene impostato dal costruttore, altrimenti ritorna gli header ottenuti invocando il metodo
getHeaders di Configurator
@return: [string]
"""
d=self.getHeader()
if self.Configurator is not None:
d=self.Configurator.getHeaders(subfix)
return d
def getValue(self,Id,fetcher,set=False):
"""
ritorna il valore relativo al campo desiderato, specificato dal parametro fetcher
@param string: header del fetcher dell'entita'
@param bool: specifica se e' un'entita' configurabile o meno, se configurabile deve invocare il metodo getData di configurator
@param int:Id del pv in esame
altrimenti ritorna il fetcher
return string
"""
if set:
self.setId(Id,set)
#self.setId(Id)
#d=[False]=self.fetchers[fetcher]
#d[True]=self.configurator.getData(self.getEntityId())
if self.Configurator is None:
d=self.fetchers[fetcher]
else:
d=self.Configurator.getData(Id)
return d
def setRequired(self,b):
""" setter di self.required
@param boolean:
"""
self.required=b
def isRequired(self):
"""
getter di self.required
@return: boolean
"""
return self.required
def setConfigurator(self,c):
"""
setter di self.configurator, questo è l'oggetto che si occupa di configurare le entità delle marche e dei parametri
@param Configurator:
"""
self.Configurator=c
def needsConfiguration(self):
"""
getter di self.toBeConfigured
@return: boolean
"""
l=lambda a,b:a and not b
a=self.toBeConfigured
b=self.configured
return l(a,b)
def getData(self,Id):
"""
ritorna i dati forniti dalle entita' configurabili
dovrebbe essere invocato solo se needsConfiguration e' settato a True e il configuratore dell'entita' impostato
@param int: pv_id del pv investigato
@return: [string]
"""
return self.configurator.getData(Id)
def isConfigured(self):
"""
getter di self.configured
@return: boolean
"""
return self.configured
def setConfigured(self,b):
"""
setter di self.configured
@param boolean:
"""
self.configured=b
def setListId(self,Id):
"""
setter di self.list_id
"""
self.list_id=Id
def getListId(self):
"""
getter di self.list_id
"""
return self.list_id
def setEntityId(self,Id):
"""
setter di entityId
@param Id:int
"""
#print "ww"*1000
#print Id
if not self.toBeConfigured:
self.entityId=Id
def getEntityId(self):
"""getter di self.entityId
@return: boolean
"""
return self.entityId
def getHeader(self):
"""
getter di self.header
@return: string
"""
return self.header
def __repr__(self):
return "Entity-%s: required:%s| toconfigure:%s,Id%d"%(self.header,self.required,self.toBeConfigured,self.Id)
def setOrder(self,order):
"""
setter di self.order
@param int: ordine con cui deve essere visualizzata l'entita'
"""
self.order=order
def getorder(self):
"""
getter di self.order, questo rappresenta l'ordine con cui verra' visualizzata l'entita'
@return: int
"""
return self.order
def reset(self):
self.configured=False
self.required=False
self.Configurator=None
print "reset"
def setType(self,t):
self.type=t
def getType(self):
return self.type
def __init__(self,header,toBeConfigured,entityId,user,activeDb,db,tipo,populate=True):
"""
@param header:string: rappresenta lo header della colonna
del campo quando viene esportato, e' importante che corrisponda alla chiave usata per il campo in self.fetchers
@param toBeConfigured:boolean indica se l'entita' deve essere configurata o meno, sempre False tranne che per marche e parametri
@param entityId:int: identifica l'entita' nella lista entityList di Profilo
"""
self.user=user
self.activeDb=activeDb
self.order=-1
self.type=tipo
self.header=header
self.pv_id=None
self.entityId=-1
self.db=db
self.Id=entityId
self.toBeConfigured=toBeConfigured
self.Configurator=None
self.required=False
self.configured=False
self.list_id=-1# distingue l'entita' in entityList in esportazione
# acquisisco un' istanza di mysql_sqlalchemy_class.Pv
self.pv=self.db.fetchPv(self.Id)
#instanzio il dict dei fetchers
self.fetchers=Odict()
#popolo il dict dei fetchers
self.populateFetchers(populate)
| {
"content_hash": "263add5d3bca1a9d9093d0e82cc2632c",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 178,
"avg_line_length": 27.2442996742671,
"alnum_prop": 0.7158058345289335,
"repo_name": "arpho/mmasgis5",
"id": "0de42fbf371b84da6711eff689abd4b91a92bdcc",
"size": "8412",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmasgis/Entity.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "617"
},
{
"name": "C++",
"bytes": "856"
},
{
"name": "CSS",
"bytes": "17115"
},
{
"name": "JavaScript",
"bytes": "21348"
},
{
"name": "Prolog",
"bytes": "1548"
},
{
"name": "Python",
"bytes": "13348612"
}
],
"symlink_target": ""
} |
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, overload
from urllib.parse import parse_qs, urljoin, urlparse
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._serialization import Serializer
from .._vendor import _convert_request, _format_url_section
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_list_request(
resource_group_name: str, workspace_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_get_request(
resource_group_name: str, workspace_name: str, bookmark_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks/{bookmarkId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"bookmarkId": _SERIALIZER.url("bookmark_id", bookmark_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str, workspace_name: str, bookmark_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks/{bookmarkId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"bookmarkId": _SERIALIZER.url("bookmark_id", bookmark_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str, workspace_name: str, bookmark_id: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2022-09-01-preview")) # type: str
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks/{bookmarkId}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"workspaceName": _SERIALIZER.url("workspace_name", workspace_name, "str", max_length=90, min_length=1),
"bookmarkId": _SERIALIZER.url("bookmark_id", bookmark_id, "str"),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
class BookmarksOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.securityinsight.SecurityInsights`'s
:attr:`bookmarks` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list(self, resource_group_name: str, workspace_name: str, **kwargs: Any) -> Iterable["_models.Bookmark"]:
"""Gets all bookmarks.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Bookmark or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.securityinsight.models.Bookmark]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.BookmarkList]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
# make call to next link with the client's api-version
_parsed_next_link = urlparse(next_link)
_next_request_params = case_insensitive_dict(parse_qs(_parsed_next_link.query))
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest("GET", urljoin(next_link, _parsed_next_link.path), params=_next_request_params)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("BookmarkList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks"} # type: ignore
@distributed_trace
def get(self, resource_group_name: str, workspace_name: str, bookmark_id: str, **kwargs: Any) -> _models.Bookmark:
"""Gets a bookmark.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param bookmark_id: Bookmark ID. Required.
:type bookmark_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Bookmark or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Bookmark
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[_models.Bookmark]
request = build_get_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
bookmark_id=bookmark_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("Bookmark", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks/{bookmarkId}"} # type: ignore
@overload
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
bookmark_id: str,
bookmark: _models.Bookmark,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Bookmark:
"""Creates or updates the bookmark.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param bookmark_id: Bookmark ID. Required.
:type bookmark_id: str
:param bookmark: The bookmark. Required.
:type bookmark: ~azure.mgmt.securityinsight.models.Bookmark
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Bookmark or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Bookmark
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
bookmark_id: str,
bookmark: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> _models.Bookmark:
"""Creates or updates the bookmark.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param bookmark_id: Bookmark ID. Required.
:type bookmark_id: str
:param bookmark: The bookmark. Required.
:type bookmark: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Bookmark or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Bookmark
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def create_or_update(
self,
resource_group_name: str,
workspace_name: str,
bookmark_id: str,
bookmark: Union[_models.Bookmark, IO],
**kwargs: Any
) -> _models.Bookmark:
"""Creates or updates the bookmark.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param bookmark_id: Bookmark ID. Required.
:type bookmark_id: str
:param bookmark: The bookmark. Is either a model type or a IO type. Required.
:type bookmark: ~azure.mgmt.securityinsight.models.Bookmark or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Bookmark or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.Bookmark
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.Bookmark]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(bookmark, (IO, bytes)):
_content = bookmark
else:
_json = self._serialize.body(bookmark, "Bookmark")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
bookmark_id=bookmark_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self.create_or_update.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Bookmark", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Bookmark", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks/{bookmarkId}"} # type: ignore
@distributed_trace
def delete( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, workspace_name: str, bookmark_id: str, **kwargs: Any
) -> None:
"""Delete the bookmark.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param workspace_name: The name of the workspace. Required.
:type workspace_name: str
:param bookmark_id: Bookmark ID. Required.
:type bookmark_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None or the result of cls(response)
:rtype: None
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", self._config.api_version)) # type: str
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
workspace_name=workspace_name,
bookmark_id=bookmark_id,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.delete.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/bookmarks/{bookmarkId}"} # type: ignore
| {
"content_hash": "30ccf7475c25c593b6a0114279a3a6a2",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 252,
"avg_line_length": 44.04081632653061,
"alnum_prop": 0.6459263627938326,
"repo_name": "Azure/azure-sdk-for-python",
"id": "573200dfc939987d2082c22cab4ce5292471cd17",
"size": "24238",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/operations/_bookmarks_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""
server config.
"""
from basinboa.system.loader import YamlLoader
class Config(object):
"""docstring for Config"""
def __init__(self, name):
super(Config, self).__init__()
self.name = name
self.items = 0
def __repr__(self):
return "Config: %s, items: %s" % \
(self.name, self.items)
class ConfigLoader(YamlLoader):
"""docstring for ConfigLoader"""
SERVER_CONFIG = 'server'
def __init__(self, data_dir):
super(ConfigLoader, self).__init__(data_dir)
def get(self, name):
"""docstring for get"""
data = self.load(name)
if data:
return self.register_attr(Config(name), data)
return None
def get_server_config(self):
"""docstring for get"""
return self.get(self.SERVER_CONFIG)
| {
"content_hash": "250d419919d8b5cf207df34275abb66f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 57,
"avg_line_length": 24.2,
"alnum_prop": 0.5572609208972845,
"repo_name": "marlboromoo/basinboa",
"id": "6fd650639d92b5fe43e5c213088ad9b263e80566",
"size": "869",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "basinboa/system/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "120279"
},
{
"name": "Shell",
"bytes": "134"
}
],
"symlink_target": ""
} |
"""Test for a simple Mifare NFC Authentication"""
# Pynfc is a python wrapper for the libnfc library
# Copyright (C) 2009 Mike Auty
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import time
from datetime import datetime,timedelta
import logging
import ctypes
import string
import nfc
def hex_dump(string):
"""Dumps data as hexstrings"""
return ' '.join(["%0.2X" % ord(x) for x in string])
### NFC device setup
class NFCReader(object):
MC_AUTH_A = 0x60
MC_AUTH_B = 0x61
MC_READ = 0x30
MC_WRITE = 0xA0
NUM_OF_POLLS = 1 # Number of times it should poll for a card
PERIOD_BW_POLLS = 1 # Periods between polls, in units of 150ms - eg 2 = 300ms
card_timeout = 0.25 # Wait for this many seconds for the reader to respond, or to wait for another poll
# card_timeout = 10
def __init__(self, logger):
self.__context = None
self.__device = None
self.log = logger
self._card_present = False
self._card_last_seen = None
self._card_uid = None
self._clean_card()
mods = [(nfc.NMT_ISO14443A, nfc.NBR_106)]
self.__modulations = (nfc.nfc_modulation * len(mods))()
for i in range(len(mods)):
self.__modulations[i].nmt = mods[i][0]
self.__modulations[i].nbr = mods[i][1]
def run(self, wait_for_clear = False, delay_for_clear = 0):
"""Starts the looping thread"""
self.__context = ctypes.pointer(nfc.nfc_context())
nfc.nfc_init(ctypes.byref(self.__context))
loop = True
try:
self._clean_card()
conn_strings = (nfc.nfc_connstring * 10)()
devices_found = nfc.nfc_list_devices(self.__context, conn_strings, 10)
if devices_found >= 1:
self.__device = nfc.nfc_open(self.__context, conn_strings[0])
try:
_ = nfc.nfc_initiator_init(self.__device)
# Wait until the reader has no card nearby
if wait_for_clear:
self._poll_loop()
start_time = datetime.now()
while (datetime.now() - start_time).seconds <= delay_for_clear:
self._poll_loop()
if self._card_uid is not None: # Keep resetting while there's a card on the reader
start_time = datetime.now() # Reset the timer
print "The NFC reader is now clear."
self.log("NFC has cleared for {} seconds.".format(delay_for_clear))
while not self._card_uid: # Wait while there is no card
self._poll_loop()
except (KeyboardInterrupt, SystemExit):
loop = False
self._clean_card()
finally:
nfc.nfc_close(self.__device)
else:
self.log("NFC Waiting for device.")
time.sleep(self.card_timeout)
except (KeyboardInterrupt, SystemExit):
loop = False
self._clean_card()
except IOError, e:
self.log("Exception: " + str(e))
loop = True # not str(e).startswith("NFC Error whilst polling")
# except Exception, e:
# loop = True
# print "[!]", str(e)
finally:
nfc.nfc_exit(self.__context)
self.log("NFC Clean shutdown called")
self.log("run() is done, ID: ", self._card_uid)
return loop
def run2(self, no_card_for_seconds=0):
"""Starts the looping thread"""
self.__context = ctypes.pointer(nfc.nfc_context())
nfc.nfc_init(ctypes.byref(self.__context))
loop = True
try:
self._clean_card()
conn_strings = (nfc.nfc_connstring * 10)()
devices_found = nfc.nfc_list_devices(self.__context, conn_strings, 10)
if devices_found >= 1:
self.__device = nfc.nfc_open(self.__context, conn_strings[0])
try:
_ = nfc.nfc_initiator_init(self.__device)
# If we need to wait until the reader clears, then do this loop
if no_card_for_seconds > 0:
done_time = datetime.now() + timedelta(seconds=no_card_for_seconds)
while (datetime.now()<done_time):
self._poll_loop()
if self._card_uid is not None: # Keep resetting while there's a card on the reader
done_time = datetime.now() + timedelta(seconds=no_card_for_seconds)
print "The NFC reader is now clear."
else: # Assert: no_card_for_seconds <= 0, treat negative values as 0
# Get a valid RFID
while self._card_uid is None:
self._poll_loop()
except (KeyboardInterrupt, SystemExit):
loop = False
self._clean_card()
finally:
nfc.nfc_close(self.__device)
else:
self.log("NFC Waiting for device.")
time.sleep(self.card_timeout)
except (KeyboardInterrupt, SystemExit):
loop = False
self._clean_card()
except IOError, e:
self.log("Exception: " + str(e))
loop = True # not str(e).startswith("NFC Error whilst polling")
# except Exception, e:
# loop = True
# print "[!]", str(e)
finally:
nfc.nfc_exit(self.__context)
self.log("NFC Clean shutdown called")
self.log("run() is done, ID: ", self._card_uid)
return loop
@staticmethod
def _sanitize(bytesin):
"""Returns guaranteed ascii text from the input bytes"""
# Used for converting raw byte data to a string. If the byte isn't a tame ASCII character, use . instead.
return "".join([x if 0x7f > ord(x) > 0x1f else '.' for x in bytesin])
@staticmethod
def _hashsanitize(bytesin):
"""Returns guaranteed hexadecimal digits from the input bytes"""
# Used for converting raw byte data into a hex string. If the byte isn't a hex digit, use nothing instead.
return "".join([x if x.lower() in 'abcdef0123456789' else '' for x in bytesin])
def _poll_loop(self):
"""Starts a loop that constantly polls for cards"""
nt = nfc.nfc_target()
#res = nfc.nfc_initiator_poll_target(self.__device, self.__modulations, len(self.__modulations), 10, 2,
# ctypes.byref(nt))
res = nfc.nfc_initiator_poll_target(self.__device, self.__modulations, len(self.__modulations),
NFCReader.NUM_OF_POLLS, NFCReader.PERIOD_BW_POLLS,
ctypes.byref(nt))
# print "RES", res
if res < 0:
raise IOError("NFC Error whilst polling")
elif res >= 1:
uid = None
if nt.nti.nai.szUidLen >= 1:
try:
uid = "".join([chr(nt.nti.nai.abtUid[i]) for i in range(nt.nti.nai.szUidLen)])
except IndexError:
raise IndexError("ERROR: index outside the range of nt.nti.nai.abtUid!")
if uid:
if not ((self._card_uid and
self._card_present and
uid == self._card_uid) and
time.mktime(time.gmtime()) <= self._card_last_seen + self.card_timeout):
self._setup_device()
# self.read_card(uid) # Remove the read -- don't need it, and it spews to the screen
self._card_uid = uid.encode("hex")
self.log("ID: ", uid.encode("hex"))
self._card_present = True
self._card_last_seen = time.mktime(time.gmtime())
else: # ASSERT: res == 0
self._card_present = False
self._clean_card()
self.log("Done _poll_loop, found ID: ", self._card_uid )
def _clean_card(self):
self._card_uid = None
def select_card(self):
"""Selects a card after a failed authentication attempt (aborted communications)
Returns the UID of the card selected
"""
nt = nfc.nfc_target()
_ = nfc.nfc_initiator_select_passive_target(self.__device, self.__modulations[0], None, 0, ctypes.byref(nt))
uid = "".join([chr(nt.nti.nai.abtUid[i]) for i in range(nt.nti.nai.szUidLen)])
return uid
def _setup_device(self):
"""Sets all the NFC device settings for reading from Mifare cards"""
if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_ACTIVATE_CRYPTO1, True) < 0:
raise Exception("Error setting Crypto1 enabled")
if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_INFINITE_SELECT, False) < 0:
raise Exception("Error setting Single Select option")
if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_AUTO_ISO14443_4, False) < 0:
raise Exception("Error setting No Auto ISO14443-A jiggery pokery")
if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_HANDLE_PARITY, True) < 0:
raise Exception("Error setting Easy Framing property")
def _read_block(self, block):
"""Reads a block from a Mifare Card after authentication
Returns the data read or raises an exception
"""
if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_EASY_FRAMING, True) < 0:
raise Exception("Error setting Easy Framing property")
abttx = (ctypes.c_uint8 * 2)()
abttx[0] = self.MC_READ
abttx[1] = block
abtrx = (ctypes.c_uint8 * 250)()
res = nfc.nfc_initiator_transceive_bytes(self.__device, ctypes.pointer(abttx), len(abttx),
ctypes.pointer(abtrx), len(abtrx), 0)
if res < 0:
raise IOError("Error reading data")
return "".join([chr(abtrx[i]) for i in range(res)])
def __write_block(self, block, data):
"""Writes a block of data to a Mifare Card after authentication
Raises an exception on error
"""
if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_EASY_FRAMING, True) < 0:
raise Exception("Error setting Easy Framing property")
if len(data) > 16:
raise ValueError("Data value to be written cannot be more than 16 characters.")
abttx = (ctypes.c_uint8 * 18)()
abttx[0] = self.MC_WRITE
abttx[1] = block
abtrx = (ctypes.c_uint8 * 250)()
for i in range(16):
abttx[i + 2] = ord((data + "\x00" * (16 - len(data)))[i])
return nfc.nfc_initiator_transceive_bytes(self.__device, ctypes.pointer(abttx), len(abttx),
ctypes.pointer(abtrx), len(abtrx), 0)
def _authenticate(self, block, uid, key = "\xff\xff\xff\xff\xff\xff", use_b_key = False):
"""Authenticates to a particular block using a specified key"""
if nfc.nfc_device_set_property_bool(self.__device, nfc.NP_EASY_FRAMING, True) < 0:
raise Exception("Error setting Easy Framing property")
abttx = (ctypes.c_uint8 * 12)()
abttx[0] = self.MC_AUTH_A if not use_b_key else self.MC_AUTH_B
abttx[1] = block
for i in range(6):
abttx[i + 2] = ord(key[i])
for i in range(4):
abttx[i + 8] = ord(uid[i])
abtrx = (ctypes.c_uint8 * 250)()
return nfc.nfc_initiator_transceive_bytes(self.__device, ctypes.pointer(abttx), len(abttx),
ctypes.pointer(abtrx), len(abtrx), 0)
def auth_and_read(self, block, uid, key = "\xff\xff\xff\xff\xff\xff"):
"""Authenticates and then reads a block
Returns '' if the authentication failed
"""
# Reselect the card so that we can reauthenticate
self.select_card()
res = self._authenticate(block, uid, key)
if res >= 0:
return self._read_block(block)
return ''
def auth_and_write(self, block, uid, data, key = "\xff\xff\xff\xff\xff\xff"):
"""Authenticates and then writes a block
"""
res = self._authenticate(block, uid, key)
if res >= 0:
return self.__write_block(block, data)
self.select_card()
return ""
def read_card(self, uid):
"""Takes a uid, reads the card and return data for use in writing the card"""
key = "\xff\xff\xff\xff\xff\xff"
print "Reading card", uid.encode("hex")
self._card_uid = self.select_card()
self._authenticate(0x00, uid, key)
block = 0
for block in range(64):
data = self.auth_and_read(block, uid, key)
print block, data.encode("hex"), "".join([ x if x in string.printable else "." for x in data])
def write_card(self, uid, data):
"""Accepts data of the recently read card with UID uid, and writes any changes necessary to it"""
raise NotImplementedError
if __name__ == '__main__':
logger = logging.getLogger("cardhandler").info
while NFCReader(logger).run():
pass
| {
"content_hash": "41da2cf54352e6fb60bc1d64fbdd589d",
"timestamp": "",
"source": "github",
"line_count": 319,
"max_line_length": 116,
"avg_line_length": 44.18808777429467,
"alnum_prop": 0.5554767309875142,
"repo_name": "kikiorg/TikiBot",
"id": "f6291ab7490d5caf2387884fb03a15f0db5f2932",
"size": "14096",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynfc/src/mifareauth.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "105348"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
} |
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
if sys.version_info[:2] < (2, 7):
sys.exit("conda is only meant for Python 2.7, with experimental support "
"for python 3. current version: %d.%d" % sys.version_info[:2])
versioneer.versionfile_source = 'conda_build/_version.py'
versioneer.versionfile_build = 'conda_build/_version.py'
versioneer.tag_prefix = ''
versioneer.parentdir_prefix = 'conda-build-'
setup(
name = "conda-build",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
author = "Continuum Analytics, Inc.",
author_email = "ilan@continuum.io",
url = "https://github.com/conda/conda-build",
license = "BSD",
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
],
description = "tools for building conda packages",
long_description = open('README.rst').read(),
packages = ['conda_build'],
scripts = [
'bin/conda-build',
'bin/conda-convert',
'bin/conda-index',
'bin/conda-skeleton',
'bin/conda-pipbuild',
'bin/conda-metapackage',
'bin/conda-develop',
'bin/conda-inspect',
],
install_requires = ['conda'],
package_data={'conda_build': ['templates/*', 'cli-*.exe']},
)
| {
"content_hash": "651d1dc7b43102169fb41ee2d1a7c989",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 30.88888888888889,
"alnum_prop": 0.6145083932853717,
"repo_name": "takluyver/conda-build",
"id": "2e002ed496e08e597816575c4c839914739771ea",
"size": "1690",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10"
},
{
"name": "Python",
"bytes": "243266"
},
{
"name": "Shell",
"bytes": "9009"
}
],
"symlink_target": ""
} |
import rospy
from std_msgs.msg import String
def callback(data):
rospy.loginfo(rospy.get_name() + ': I heard {}'.format(data.data))
def listener():
rospy.init_node('listener', anonymous=True)
rospy.Subscriber('chatter', String, callback)
rospy.spin()
if __name__ == '__main__':
listener()
| {
"content_hash": "b1300e7fbfb1867a93d8ca15a2060a88",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 70,
"avg_line_length": 20.933333333333334,
"alnum_prop": 0.6496815286624203,
"repo_name": "wkentaro/beginner_tutorials",
"id": "7416dfe18872dde4dcf52588a08b64ab1d528842",
"size": "336",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/listener.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3766"
},
{
"name": "Python",
"bytes": "4250"
},
{
"name": "XML",
"bytes": "2575"
}
],
"symlink_target": ""
} |
def scale_dataset(x_train):
return x_train
def build_model(weights_path=None):
import os
from keras.models import Sequential
from keras.layers import Dense, Embedding
from keras.layers import LSTM
model = Sequential()
model.add(Embedding(20000, 128, input_length=80))
model.add(LSTM(128, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))
if weights_path is not None:
pass
else:
this_dir = os.path.dirname(os.path.abspath(__file__))
weights_path = os.path.join(this_dir, 'networks', 'keras', 'imdb_lstm.h5')
model.load_weights(weights_path)
return model
| {
"content_hash": "878b25d255af1994a3178ecf8b277644",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 82,
"avg_line_length": 28.82608695652174,
"alnum_prop": 0.6666666666666666,
"repo_name": "plaidml/plaidml",
"id": "2e77525dec60d270b8573eb63fa6c9a62e74e0b8",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/plaidml-v1",
"path": "plaidbench/plaidbench/networks/keras/imdb_lstm.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "12908"
},
{
"name": "C++",
"bytes": "2299440"
},
{
"name": "CMake",
"bytes": "85677"
},
{
"name": "HTML",
"bytes": "2745"
},
{
"name": "MLIR",
"bytes": "339818"
},
{
"name": "Makefile",
"bytes": "607"
},
{
"name": "Python",
"bytes": "588389"
},
{
"name": "TeX",
"bytes": "2194"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
import os
import sys
from abc import abstractmethod
from contextlib import contextmanager
from hashlib import sha1
from twitter.common.collections.orderedset import OrderedSet
from pants.base.exceptions import TaskError
from pants.base.fingerprint_strategy import TaskIdentityFingerprintStrategy
from pants.base.worker_pool import Work
from pants.cache.artifact_cache import UnreadableArtifact, call_insert, call_use_cached_files
from pants.cache.cache_setup import CacheSetup
from pants.invalidation.build_invalidator import BuildInvalidator, CacheKeyGenerator
from pants.invalidation.cache_manager import InvalidationCacheManager, InvalidationCheck
from pants.option.optionable import Optionable
from pants.option.options_fingerprinter import OptionsFingerprinter
from pants.option.scope import ScopeInfo
from pants.reporting.reporting_utils import items_to_report_element
from pants.subsystem.subsystem_client_mixin import SubsystemClientMixin
from pants.util.meta import AbstractClass
class TaskBase(SubsystemClientMixin, Optionable, AbstractClass):
"""Defines a lifecycle that prepares a task for execution and provides the base machinery
needed to execute it.
Provides the base lifecycle methods that allow a task to interact with the command line, other
tasks and the user. The lifecycle is linear and run via the following sequence:
1. register_options - declare options configurable via cmd-line flag or config file.
2. product_types - declare the product types your task is capable of producing.
3. alternate_target_roots - propose a different set of target roots to use than those specified
via the CLI for the active pants run.
4. prepare - request any products needed from other tasks.
5. __init__ - distill configuration into the information needed to execute.
Provides access to the current run context for scoping work.
Also provides the basic facilities for doing work efficiently including providing a work directory
for scratch space on disk, an invalidator for checking which targets need work done on, and an
artifact cache for re-using previously cached work.
#TODO(John Sirois): Lifecycle is currently split between TaskBase and Task and lifecycle
(interface) and helpers (utility) are currently conflated. Tease these apart and narrow the scope
of the helpers. Ideally console tasks don't inherit a workdir, invalidator or build cache for
example.
"""
options_scope_category = ScopeInfo.TASK
# Tests may override this to provide a stable name despite the class name being a unique,
# synthetic name.
_stable_name = None
@classmethod
def stable_name(cls):
"""The stable name of this task type.
We synthesize subclasses of the task types at runtime, and these synthesized subclasses
may have random names (e.g., in tests), so this gives us a stable name to use across runs,
e.g., in artifact cache references.
"""
return cls._stable_name or cls._compute_stable_name()
@classmethod
def _compute_stable_name(cls):
return '{}_{}'.format(cls.__module__, cls.__name__).replace('.', '_')
@classmethod
def global_subsystems(cls):
"""The global subsystems this task uses.
A tuple of subsystem types.
"""
return tuple()
@classmethod
def task_subsystems(cls):
"""The private, per-task subsystems this task uses.
A tuple of subsystem types.
"""
return (CacheSetup,)
@classmethod
def product_types(cls):
"""The list of products this Task produces. Set the product type(s) for this
task i.e. the product type(s) this task creates e.g ['classes'].
By default, each task is considered as creating a unique product type(s).
Subclasses that create products, should override this to specify their unique product type(s).
"""
return []
@classmethod
def known_scope_infos(cls):
"""Yields ScopeInfo for all known scopes for this task, in no particular order."""
# The task's own scope.
yield cls.get_scope_info()
# The scopes of any task-specific subsystems it uses.
for dep in cls.subsystem_dependencies_iter():
if not dep.is_global():
yield dep.subsystem_cls.get_scope_info(subscope=dep.scope)
@classmethod
def supports_passthru_args(cls):
"""Subclasses may override to indicate that they can use passthru args."""
return False
@classmethod
def _scoped_options(cls, options):
return options[cls.options_scope]
@classmethod
def _alternate_target_roots(cls, options, address_mapper, build_graph):
# Subclasses should not generally need to override this method.
# TODO(John Sirois): Kill when killing GroupTask as part of RoundEngine parallelization.
return cls.alternate_target_roots(cls._scoped_options(options), address_mapper, build_graph)
@classmethod
def alternate_target_roots(cls, options, address_mapper, build_graph):
"""Allows a Task to propose alternate target roots from those specified on the CLI.
At most 1 unique proposal is allowed amongst all tasks involved in the run. If more than 1
unique list of target roots is proposed an error is raised during task scheduling.
:returns list: The new target roots to use or none to accept the CLI specified target roots.
"""
@classmethod
def _prepare(cls, options, round_manager):
# Subclasses should not generally need to override this method.
# TODO(John Sirois): Kill when killing GroupTask as part of RoundEngine parallelization.
return cls.prepare(cls._scoped_options(options), round_manager)
@classmethod
def prepare(cls, options, round_manager):
"""Prepares a task for execution.
Called before execution and prior to any tasks that may be (indirectly) depended upon.
Typically a task that requires products from other goals would register interest in those
products here and then retrieve the requested product mappings when executed.
"""
def __init__(self, context, workdir):
"""Subclass __init__ methods, if defined, *must* follow this idiom:
class MyTask(Task):
def __init__(self, *args, **kwargs):
super(MyTask, self).__init__(*args, **kwargs)
...
This allows us to change Task.__init__()'s arguments without
changing every subclass. If the subclass does not need its own
initialization, this method can (and should) be omitted entirely.
"""
super(TaskBase, self).__init__()
self.context = context
self._workdir = workdir
# TODO: It would be nice to use self.get_options().cache_key_gen_version here, because then
# we could have a separate value for each scope if we really wanted to. However we can't
# access per-task options in Task.__init__ because GroupTask.__init__ calls it with the
# group task's scope, which isn't currently in the known scopes we generate options for.
self._cache_key_generator = CacheKeyGenerator(
self.context.options.for_global_scope().cache_key_gen_version)
self._cache_key_errors = set()
self._build_invalidator_dir = os.path.join(
self.context.options.for_global_scope().pants_workdir,
'build_invalidator',
self.stable_name())
self._cache_factory = CacheSetup.create_cache_factory_for_task(self)
self._options_fingerprinter = OptionsFingerprinter(self.context.build_graph)
self._fingerprint = None
def get_options(self):
"""Returns the option values for this task's scope."""
return self.context.options.for_scope(self.options_scope)
def get_passthru_args(self):
if not self.supports_passthru_args():
raise TaskError('{0} Does not support passthru args.'.format(self.stable_name()))
else:
return self.context.options.passthru_args_for_scope(self.options_scope)
@property
def workdir(self):
"""A scratch-space for this task that will be deleted by `clean-all`.
It's not guaranteed that the workdir exists, just that no other task has been given this
workdir path to use.
"""
return self._workdir
def _options_fingerprint(self, scope):
pairs = self.context.options.get_fingerprintable_for_scope(scope)
hasher = sha1()
for (option_type, option_val) in pairs:
fp = self._options_fingerprinter.fingerprint(option_type, option_val)
if fp is not None:
hasher.update(fp)
return hasher.hexdigest()
@property
def fingerprint(self):
"""Returns a fingerprint for the identity of the task.
A task fingerprint is composed of the options the task is currently running under.
Useful for invalidating unchanging targets being executed beneath changing task
options that affect outputted artifacts.
A task's fingerprint is only valid afer the task has been fully initialized.
"""
if not self._fingerprint:
hasher = sha1()
hasher.update(self._options_fingerprint(self.options_scope))
for dep in self.subsystem_dependencies_iter():
hasher.update(self._options_fingerprint(dep.options_scope()))
self._fingerprint = str(hasher.hexdigest())
return self._fingerprint
def artifact_cache_reads_enabled(self):
return self._cache_factory.read_cache_available()
def artifact_cache_writes_enabled(self):
return self._cache_factory.write_cache_available()
def invalidate(self):
"""Invalidates all targets for this task."""
BuildInvalidator(self._build_invalidator_dir).force_invalidate_all()
def create_cache_manager(self, invalidate_dependents, fingerprint_strategy=None):
"""Creates a cache manager that can be used to invalidate targets on behalf of this task.
Use this if you need to check for invalid targets but can't use the contextmanager created by
invalidated(), e.g., because you don't want to mark the targets as valid when done.
invalidate_dependents: If True then any targets depending on changed targets are invalidated.
fingerprint_strategy: A FingerprintStrategy instance, which can do per task, finer grained
fingerprinting of a given Target.
"""
return InvalidationCacheManager(self._cache_key_generator,
self._build_invalidator_dir,
invalidate_dependents,
fingerprint_strategy=fingerprint_strategy,
invalidation_report=self.context.invalidation_report,
task_name=type(self).__name__)
@property
def cache_target_dirs(self):
"""Whether to cache files in VersionedTarget's results_dir after exiting an invalidated block.
Subclasses may override this method to return True if they wish to use this style
of "automated" caching, where each VersionedTarget is given an associated results directory,
which will automatically be uploaded to the cache. Tasks should place the output files
for each VersionedTarget in said results directory. It is highly suggested to follow this
schema for caching, rather than manually making updates to the artifact cache.
"""
return False
@property
def incremental(self):
"""Whether this Task implements incremental building of individual targets.
Incremental tasks with `cache_target_dirs` set will have the results_dir of the previous build
for a target cloned into the results_dir for the current build (where possible). This
copy-on-write behaviour allows for immutability of the results_dir once a target has been
marked valid.
"""
return False
@property
def cache_incremental(self):
"""For incremental tasks, indicates whether the results of incremental builds should be cached.
Deterministic per-target incremental compilation is a relatively difficult thing to implement,
so this property provides an escape hatch to avoid caching things in that riskier case.
"""
return False
@contextmanager
def invalidated(self,
targets,
invalidate_dependents=False,
partition_size_hint=sys.maxint,
silent=False,
locally_changed_targets=None,
fingerprint_strategy=None,
topological_order=False):
"""Checks targets for invalidation, first checking the artifact cache.
Subclasses call this to figure out what to work on.
:param targets: The targets to check for changes.
:param invalidate_dependents: If True then any targets depending on changed targets are invalidated.
:param partition_size_hint: Each VersionedTargetSet in the yielded list will represent targets
containing roughly this number of source files, if possible. Set to
sys.maxint for a single VersionedTargetSet. Set to 0 for one
VersionedTargetSet per target. It is up to the caller to do the right
thing with whatever partitioning it asks for.
:param locally_changed_targets: Targets that we've edited locally. If specified, and there aren't too
many of them, we keep these in separate partitions from other targets,
as these are more likely to have build errors, and so to be rebuilt over
and over, and partitioning them separately is a performance win.
:param fingerprint_strategy: A FingerprintStrategy instance, which can do per task, finer grained
fingerprinting of a given Target.
If no exceptions are thrown by work in the block, the build cache is updated for the targets.
Note: the artifact cache is not updated. That must be done manually.
:returns: Yields an InvalidationCheck object reflecting the (partitioned) targets.
:rtype: InvalidationCheck
"""
# TODO(benjy): Compute locally_changed_targets here instead of passing it in? We currently pass
# it in because JvmCompile already has the source->target mapping for other reasons, and also
# to selectively enable this feature.
fingerprint_strategy = fingerprint_strategy or TaskIdentityFingerprintStrategy(self)
cache_manager = self.create_cache_manager(invalidate_dependents,
fingerprint_strategy=fingerprint_strategy)
# We separate locally-modified targets from others by coloring them differently.
# This can be a performance win, because these targets are more likely to be iterated
# over, and this preserves "chunk stability" for them.
colors = {}
# But we only do so if there aren't too many, or this optimization will backfire.
locally_changed_target_limit = 10
if locally_changed_targets and len(locally_changed_targets) < locally_changed_target_limit:
for t in targets:
if t in locally_changed_targets:
colors[t] = 'locally_changed'
else:
colors[t] = 'not_locally_changed'
invalidation_check = cache_manager.check(targets, partition_size_hint, colors, topological_order=topological_order)
if invalidation_check.invalid_vts and self.artifact_cache_reads_enabled():
with self.context.new_workunit('cache'):
cached_vts, uncached_vts = \
self.check_artifact_cache(self.check_artifact_cache_for(invalidation_check))
if cached_vts:
cached_targets = [vt.target for vt in cached_vts]
for t in cached_targets:
self.context.run_tracker.artifact_cache_stats.add_hit('default', t)
if not silent:
self._report_targets('Using cached artifacts for ', cached_targets, '.')
if uncached_vts:
uncached_targets = [vt.target for vt in uncached_vts]
for t in uncached_targets:
self.context.run_tracker.artifact_cache_stats.add_miss('default', t)
if not silent:
self._report_targets('No cached artifacts for ', uncached_targets, '.')
# Now that we've checked the cache, re-partition whatever is still invalid.
invalidation_check = \
InvalidationCheck(invalidation_check.all_vts, uncached_vts, partition_size_hint, colors)
self._maybe_create_results_dirs(invalidation_check.all_vts)
if not silent:
targets = []
num_invalid_partitions = len(invalidation_check.invalid_vts_partitioned)
for vt in invalidation_check.invalid_vts_partitioned:
targets.extend(vt.targets)
if len(targets):
msg_elements = ['Invalidated ',
items_to_report_element([t.address.reference() for t in targets], 'target')]
if num_invalid_partitions > 1:
msg_elements.append(' in {} target partitions'.format(num_invalid_partitions))
msg_elements.append('.')
self.context.log.info(*msg_elements)
invalidation_report = self.context.invalidation_report
if invalidation_report:
for vts in invalidation_check.all_vts:
invalidation_report.add_vts(cache_manager, vts.targets, vts.cache_key, vts.valid,
phase='pre-check')
# Yield the result, and then mark the targets as up to date.
yield invalidation_check
if invalidation_report:
for vts in invalidation_check.all_vts:
invalidation_report.add_vts(cache_manager, vts.targets, vts.cache_key, vts.valid,
phase='post-check')
for vt in invalidation_check.invalid_vts:
vt.update() # In case the caller doesn't update.
write_to_cache = (self.cache_target_dirs
and self.artifact_cache_writes_enabled()
and invalidation_check.invalid_vts)
if write_to_cache:
pairs = []
for vt in invalidation_check.invalid_vts:
if self._should_cache(vt):
pairs.append((vt, [vt.results_dir]))
self.update_artifact_cache(pairs)
def _should_cache(self, vt):
"""Return true if the given vt should be written to a cache (if configured)."""
if vt.target.has_label('no_cache'):
return False
elif not vt.is_incremental or self.cache_incremental:
return True
else:
return False
def _maybe_create_results_dirs(self, vts):
"""If `cache_target_dirs`, create results_dirs for the given versioned targets."""
if self.cache_target_dirs:
for vt in vts:
vt.create_results_dir(self.workdir, allow_incremental=self.incremental)
def check_artifact_cache_for(self, invalidation_check):
"""Decides which VTS to check the artifact cache for.
By default we check for each invalid target. Can be overridden, e.g., to
instead check only for a single artifact for the entire target set.
"""
return invalidation_check.invalid_vts
def check_artifact_cache(self, vts):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a pair (cached, uncached) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
return self.do_check_artifact_cache(vts)
def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a pair (cached, uncached) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], []
cached_vts = []
uncached_vts = OrderedSet(vts)
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key, vt.results_dir if vt.has_results_dir else None)
for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.append(vt)
uncached_vts.discard(vt)
elif isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
self._maybe_create_results_dirs(vts)
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
def flatten(vts):
return list(itertools.chain.from_iterable([vt.versioned_targets for vt in vts]))
all_cached_vts, all_uncached_vts = flatten(cached_vts), flatten(uncached_vts)
if post_process_cached_vts:
post_process_cached_vts(all_cached_vts)
for vt in all_cached_vts:
vt.update()
return all_cached_vts, all_uncached_vts
def update_artifact_cache(self, vts_artifactfiles_pairs):
"""Write to the artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of absolute paths to artifacts for the VersionedTargetSet.
"""
update_artifact_cache_work = self._get_update_artifact_cache_work(vts_artifactfiles_pairs)
if update_artifact_cache_work:
self.context.submit_background_work_chain([update_artifact_cache_work],
parent_workunit_name='cache')
def _get_update_artifact_cache_work(self, vts_artifactfiles_pairs):
"""Create a Work instance to update an artifact cache, if we're configured to.
vts_artifactfiles_pairs - a list of pairs (vts, artifactfiles) where
- vts is single VersionedTargetSet.
- artifactfiles is a list of paths to artifacts for the VersionedTargetSet.
"""
cache = self._cache_factory.get_write_cache()
if cache:
if len(vts_artifactfiles_pairs) == 0:
return None
# Do some reporting.
targets = set()
for vts, _ in vts_artifactfiles_pairs:
targets.update(vts.targets)
self._report_targets('Caching artifacts for ', list(targets), '.')
always_overwrite = self._cache_factory.overwrite()
# Cache the artifacts.
args_tuples = []
for vts, artifactfiles in vts_artifactfiles_pairs:
overwrite = always_overwrite or vts.cache_key in self._cache_key_errors
args_tuples.append((cache, vts.cache_key, artifactfiles, overwrite))
return Work(lambda x: self.context.subproc_map(call_insert, x), [(args_tuples,)], 'insert')
else:
return None
def _report_targets(self, prefix, targets, suffix):
self.context.log.info(
prefix,
items_to_report_element([t.address.reference() for t in targets], 'target'),
suffix)
def require_single_root_target(self):
"""If a single target was specified on the cmd line, returns that target.
Otherwise throws TaskError.
"""
target_roots = self.context.target_roots
if len(target_roots) == 0:
raise TaskError('No target specified.')
elif len(target_roots) > 1:
raise TaskError('Multiple targets specified: {}'
.format(', '.join([repr(t) for t in target_roots])))
return target_roots[0]
class Task(TaskBase):
"""An executable task.
Tasks form the atoms of work done by pants and when executed generally produce artifacts as a
side effect whether these be files on disk (for example compilation outputs) or characters output
to the terminal (for example dependency graph metadata).
"""
@abstractmethod
def execute(self):
"""Executes this task."""
class QuietTaskMixin(object):
"""A mixin to signal that pants shouldn't print verbose progress information for this task."""
pass
| {
"content_hash": "d777581084c2607076d2fbc6159f949a",
"timestamp": "",
"source": "github",
"line_count": 552,
"max_line_length": 119,
"avg_line_length": 42.84057971014493,
"alnum_prop": 0.6940544654939107,
"repo_name": "megaserg/pants",
"id": "84dac0b281a79c8960805931b40a0096f54027af",
"size": "23795",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/python/pants/backend/core/tasks/task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "11572"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1437"
},
{
"name": "HTML",
"bytes": "64029"
},
{
"name": "Java",
"bytes": "307373"
},
{
"name": "JavaScript",
"bytes": "28962"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4127534"
},
{
"name": "Scala",
"bytes": "85457"
},
{
"name": "Shell",
"bytes": "49640"
},
{
"name": "Thrift",
"bytes": "2898"
}
],
"symlink_target": ""
} |
import sys
#This works just like /bin/false, but Windows users might not have that
sys.exit(1)
| {
"content_hash": "9c2820b72766008c59911e6220320962",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 14.142857142857142,
"alnum_prop": 0.7373737373737373,
"repo_name": "tdyas/pants",
"id": "384035975266c2a5dac5171047f6b24b8984b291",
"size": "231",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/backend/jvm/tasks/false.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "655"
},
{
"name": "C++",
"bytes": "2010"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Dockerfile",
"bytes": "5596"
},
{
"name": "GAP",
"bytes": "1283"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "2765"
},
{
"name": "HTML",
"bytes": "44381"
},
{
"name": "Java",
"bytes": "518180"
},
{
"name": "JavaScript",
"bytes": "22906"
},
{
"name": "Python",
"bytes": "7955590"
},
{
"name": "Rust",
"bytes": "1031208"
},
{
"name": "Scala",
"bytes": "106520"
},
{
"name": "Shell",
"bytes": "109904"
},
{
"name": "Starlark",
"bytes": "502255"
},
{
"name": "Thrift",
"bytes": "2953"
}
],
"symlink_target": ""
} |
"""Python abstract syntax node definitions
This file is automatically generated.
"""
from types import TupleType, ListType
from consts import CO_VARARGS, CO_VARKEYWORDS
def flatten(list):
l = []
for elt in list:
t = type(elt)
if t is TupleType or t is ListType:
for elt2 in flatten(elt):
l.append(elt2)
else:
l.append(elt)
return l
def flatten_nodes(list):
return [n for n in flatten(list) if isinstance(n, Node)]
def asList(nodearg):
l = []
for item in nodearg:
if hasattr(item, "asList"):
l.append(item.asList())
else:
t = type(item)
if t is TupleType or t is ListType:
l.append(tuple(asList(item)))
else:
l.append(item)
return l
nodes = {}
class Node: # an abstract base class
lineno = None # provide a lineno for nodes that don't have one
def getType(self):
pass # implemented by subclass
def getChildren(self):
pass # implemented by subclasses
def asList(self):
return tuple(asList(self.getChildren()))
def getChildNodes(self):
pass # implemented by subclasses
class EmptyNode(Node):
pass
class Slice(Node):
nodes["slice"] = "Slice"
def __init__(self, expr, flags, lower, upper):
self.expr = expr
self.flags = flags
self.lower = lower
self.upper = upper
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.flags)
children.append(self.lower)
children.append(self.upper)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.lower is not None: nodelist.append(self.lower)
if self.upper is not None: nodelist.append(self.upper)
return tuple(nodelist)
def __repr__(self):
return "Slice(%s, %s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.lower), repr(self.upper))
class Const(Node):
nodes["const"] = "Const"
def __init__(self, value):
self.value = value
def getChildren(self):
return self.value,
def getChildNodes(self):
return ()
def __repr__(self):
return "Const(%s)" % (repr(self.value),)
class Raise(Node):
nodes["raise"] = "Raise"
def __init__(self, expr1, expr2, expr3):
self.expr1 = expr1
self.expr2 = expr2
self.expr3 = expr3
def getChildren(self):
children = []
children.append(self.expr1)
children.append(self.expr2)
children.append(self.expr3)
return tuple(children)
def getChildNodes(self):
nodelist = []
if self.expr1 is not None: nodelist.append(self.expr1)
if self.expr2 is not None: nodelist.append(self.expr2)
if self.expr3 is not None: nodelist.append(self.expr3)
return tuple(nodelist)
def __repr__(self):
return "Raise(%s, %s, %s)" % (repr(self.expr1), repr(self.expr2), repr(self.expr3))
class For(Node):
nodes["for"] = "For"
def __init__(self, assign, list, body, else_):
self.assign = assign
self.list = list
self.body = body
self.else_ = else_
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.list)
children.append(self.body)
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.list)
nodelist.append(self.body)
if self.else_ is not None: nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "For(%s, %s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.body), repr(self.else_))
class AssTuple(Node):
nodes["asstuple"] = "AssTuple"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "AssTuple(%s)" % (repr(self.nodes),)
class Mul(Node):
nodes["mul"] = "Mul"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Mul((%s, %s))" % (repr(self.left), repr(self.right))
class Invert(Node):
nodes["invert"] = "Invert"
def __init__(self, expr):
self.expr = expr
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Invert(%s)" % (repr(self.expr),)
class RightShift(Node):
nodes["rightshift"] = "RightShift"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "RightShift((%s, %s))" % (repr(self.left), repr(self.right))
class AssList(Node):
nodes["asslist"] = "AssList"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "AssList(%s)" % (repr(self.nodes),)
class From(Node):
nodes["from"] = "From"
def __init__(self, modname, names):
self.modname = modname
self.names = names
def getChildren(self):
return self.modname, self.names
def getChildNodes(self):
return ()
def __repr__(self):
return "From(%s, %s)" % (repr(self.modname), repr(self.names))
class Getattr(Node):
nodes["getattr"] = "Getattr"
def __init__(self, expr, attrname):
self.expr = expr
self.attrname = attrname
def getChildren(self):
return self.expr, self.attrname
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Getattr(%s, %s)" % (repr(self.expr), repr(self.attrname))
class Dict(Node):
nodes["dict"] = "Dict"
def __init__(self, items):
self.items = items
def getChildren(self):
children = []
children.extend(flatten(self.items))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.items))
return tuple(nodelist)
def __repr__(self):
return "Dict(%s)" % (repr(self.items),)
class Module(Node):
nodes["module"] = "Module"
def __init__(self, doc, node):
self.doc = doc
self.node = node
def getChildren(self):
return self.doc, self.node
def getChildNodes(self):
return self.node,
def __repr__(self):
return "Module(%s, %s)" % (repr(self.doc), repr(self.node))
class Expression(Node):
# Expression is an artifical node class to support "eval"
nodes["expression"] = "Expression"
def __init__(self, node):
self.node = node
def getChildren(self):
return self.node,
def getChildNodes(self):
return self.node,
def __repr__(self):
return "Expression(%s)" % (repr(self.node))
class UnaryAdd(Node):
nodes["unaryadd"] = "UnaryAdd"
def __init__(self, expr):
self.expr = expr
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "UnaryAdd(%s)" % (repr(self.expr),)
class Ellipsis(Node):
nodes["ellipsis"] = "Ellipsis"
def __init__(self, ):
pass
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Ellipsis()"
class Print(Node):
nodes["print"] = "Print"
def __init__(self, nodes, dest):
self.nodes = nodes
self.dest = dest
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.dest)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
if self.dest is not None: nodelist.append(self.dest)
return tuple(nodelist)
def __repr__(self):
return "Print(%s, %s)" % (repr(self.nodes), repr(self.dest))
class Import(Node):
nodes["import"] = "Import"
def __init__(self, names):
self.names = names
def getChildren(self):
return self.names,
def getChildNodes(self):
return ()
def __repr__(self):
return "Import(%s)" % (repr(self.names),)
class Subscript(Node):
nodes["subscript"] = "Subscript"
def __init__(self, expr, flags, subs):
self.expr = expr
self.flags = flags
self.subs = subs
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.flags)
children.extend(flatten(self.subs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.subs))
return tuple(nodelist)
def __repr__(self):
return "Subscript(%s, %s, %s)" % (repr(self.expr), repr(self.flags), repr(self.subs))
class TryExcept(Node):
nodes["tryexcept"] = "TryExcept"
def __init__(self, body, handlers, else_):
self.body = body
self.handlers = handlers
self.else_ = else_
def getChildren(self):
children = []
children.append(self.body)
children.extend(flatten(self.handlers))
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.body)
nodelist.extend(flatten_nodes(self.handlers))
if self.else_ is not None: nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "TryExcept(%s, %s, %s)" % (repr(self.body), repr(self.handlers), repr(self.else_))
class Or(Node):
nodes["or"] = "Or"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Or(%s)" % (repr(self.nodes),)
class Name(Node):
nodes["name"] = "Name"
def __init__(self, name):
self.name = name
def getChildren(self):
return self.name,
def getChildNodes(self):
return ()
def __repr__(self):
return "Name(%s)" % (repr(self.name),)
class Function(Node):
nodes["function"] = "Function"
def __init__(self, name, argnames, defaults, flags, doc, code):
self.name = name
self.argnames = argnames
self.defaults = defaults
self.flags = flags
self.doc = doc
self.code = code
self.varargs = self.kwargs = None
if flags & CO_VARARGS:
self.varargs = 1
if flags & CO_VARKEYWORDS:
self.kwargs = 1
def getChildren(self):
children = []
children.append(self.name)
children.append(self.argnames)
children.extend(flatten(self.defaults))
children.append(self.flags)
children.append(self.doc)
children.append(self.code)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.defaults))
nodelist.append(self.code)
return tuple(nodelist)
def __repr__(self):
return "Function(%s, %s, %s, %s, %s, %s)" % (repr(self.name), repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.doc), repr(self.code))
class Assert(Node):
nodes["assert"] = "Assert"
def __init__(self, test, fail):
self.test = test
self.fail = fail
def getChildren(self):
children = []
children.append(self.test)
children.append(self.fail)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.test)
if self.fail is not None: nodelist.append(self.fail)
return tuple(nodelist)
def __repr__(self):
return "Assert(%s, %s)" % (repr(self.test), repr(self.fail))
class Return(Node):
nodes["return"] = "Return"
def __init__(self, value):
self.value = value
def getChildren(self):
return self.value,
def getChildNodes(self):
return self.value,
def __repr__(self):
return "Return(%s)" % (repr(self.value),)
class Power(Node):
nodes["power"] = "Power"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Power((%s, %s))" % (repr(self.left), repr(self.right))
class Exec(Node):
nodes["exec"] = "Exec"
def __init__(self, expr, locals, globals):
self.expr = expr
self.locals = locals
self.globals = globals
def getChildren(self):
children = []
children.append(self.expr)
children.append(self.locals)
children.append(self.globals)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
if self.locals is not None: nodelist.append(self.locals)
if self.globals is not None: nodelist.append(self.globals)
return tuple(nodelist)
def __repr__(self):
return "Exec(%s, %s, %s)" % (repr(self.expr), repr(self.locals), repr(self.globals))
class Stmt(Node):
nodes["stmt"] = "Stmt"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Stmt(%s)" % (repr(self.nodes),)
class Sliceobj(Node):
nodes["sliceobj"] = "Sliceobj"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Sliceobj(%s)" % (repr(self.nodes),)
class Break(Node):
nodes["break"] = "Break"
def __init__(self, ):
pass
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Break()"
class Bitand(Node):
nodes["bitand"] = "Bitand"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitand(%s)" % (repr(self.nodes),)
class FloorDiv(Node):
nodes["floordiv"] = "FloorDiv"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "FloorDiv((%s, %s))" % (repr(self.left), repr(self.right))
class TryFinally(Node):
nodes["tryfinally"] = "TryFinally"
def __init__(self, body, final):
self.body = body
self.final = final
def getChildren(self):
return self.body, self.final
def getChildNodes(self):
return self.body, self.final
def __repr__(self):
return "TryFinally(%s, %s)" % (repr(self.body), repr(self.final))
class Not(Node):
nodes["not"] = "Not"
def __init__(self, expr):
self.expr = expr
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Not(%s)" % (repr(self.expr),)
class Class(Node):
nodes["class"] = "Class"
def __init__(self, name, bases, doc, code):
self.name = name
self.bases = bases
self.doc = doc
self.code = code
def getChildren(self):
children = []
children.append(self.name)
children.extend(flatten(self.bases))
children.append(self.doc)
children.append(self.code)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.bases))
nodelist.append(self.code)
return tuple(nodelist)
def __repr__(self):
return "Class(%s, %s, %s, %s)" % (repr(self.name), repr(self.bases), repr(self.doc), repr(self.code))
class Mod(Node):
nodes["mod"] = "Mod"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Mod((%s, %s))" % (repr(self.left), repr(self.right))
class Printnl(Node):
nodes["printnl"] = "Printnl"
def __init__(self, nodes, dest):
self.nodes = nodes
self.dest = dest
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.dest)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
if self.dest is not None: nodelist.append(self.dest)
return tuple(nodelist)
def __repr__(self):
return "Printnl(%s, %s)" % (repr(self.nodes), repr(self.dest))
class Tuple(Node):
nodes["tuple"] = "Tuple"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Tuple(%s)" % (repr(self.nodes),)
class AssAttr(Node):
nodes["assattr"] = "AssAttr"
def __init__(self, expr, attrname, flags):
self.expr = expr
self.attrname = attrname
self.flags = flags
def getChildren(self):
return self.expr, self.attrname, self.flags
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "AssAttr(%s, %s, %s)" % (repr(self.expr), repr(self.attrname), repr(self.flags))
class Keyword(Node):
nodes["keyword"] = "Keyword"
def __init__(self, name, expr):
self.name = name
self.expr = expr
def getChildren(self):
return self.name, self.expr
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Keyword(%s, %s)" % (repr(self.name), repr(self.expr))
class AugAssign(Node):
nodes["augassign"] = "AugAssign"
def __init__(self, node, op, expr):
self.node = node
self.op = op
self.expr = expr
def getChildren(self):
return self.node, self.op, self.expr
def getChildNodes(self):
return self.node, self.expr
def __repr__(self):
return "AugAssign(%s, %s, %s)" % (repr(self.node), repr(self.op), repr(self.expr))
class List(Node):
nodes["list"] = "List"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "List(%s)" % (repr(self.nodes),)
class Yield(Node):
nodes["yield"] = "Yield"
def __init__(self, value):
self.value = value
def getChildren(self):
return self.value,
def getChildNodes(self):
return self.value,
def __repr__(self):
return "Yield(%s)" % (repr(self.value),)
class LeftShift(Node):
nodes["leftshift"] = "LeftShift"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "LeftShift((%s, %s))" % (repr(self.left), repr(self.right))
class AssName(Node):
nodes["assname"] = "AssName"
def __init__(self, name, flags):
self.name = name
self.flags = flags
def getChildren(self):
return self.name, self.flags
def getChildNodes(self):
return ()
def __repr__(self):
return "AssName(%s, %s)" % (repr(self.name), repr(self.flags))
class While(Node):
nodes["while"] = "While"
def __init__(self, test, body, else_):
self.test = test
self.body = body
self.else_ = else_
def getChildren(self):
children = []
children.append(self.test)
children.append(self.body)
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.test)
nodelist.append(self.body)
if self.else_ is not None: nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "While(%s, %s, %s)" % (repr(self.test), repr(self.body), repr(self.else_))
class Continue(Node):
nodes["continue"] = "Continue"
def __init__(self, ):
pass
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Continue()"
class Backquote(Node):
nodes["backquote"] = "Backquote"
def __init__(self, expr):
self.expr = expr
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Backquote(%s)" % (repr(self.expr),)
class Discard(Node):
nodes["discard"] = "Discard"
def __init__(self, expr):
self.expr = expr
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "Discard(%s)" % (repr(self.expr),)
class Div(Node):
nodes["div"] = "Div"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Div((%s, %s))" % (repr(self.left), repr(self.right))
class Assign(Node):
nodes["assign"] = "Assign"
def __init__(self, nodes, expr):
self.nodes = nodes
self.expr = expr
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
children.append(self.expr)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
nodelist.append(self.expr)
return tuple(nodelist)
def __repr__(self):
return "Assign(%s, %s)" % (repr(self.nodes), repr(self.expr))
class Lambda(Node):
nodes["lambda"] = "Lambda"
def __init__(self, argnames, defaults, flags, code):
self.argnames = argnames
self.defaults = defaults
self.flags = flags
self.code = code
self.varargs = self.kwargs = None
if flags & CO_VARARGS:
self.varargs = 1
if flags & CO_VARKEYWORDS:
self.kwargs = 1
def getChildren(self):
children = []
children.append(self.argnames)
children.extend(flatten(self.defaults))
children.append(self.flags)
children.append(self.code)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.defaults))
nodelist.append(self.code)
return tuple(nodelist)
def __repr__(self):
return "Lambda(%s, %s, %s, %s)" % (repr(self.argnames), repr(self.defaults), repr(self.flags), repr(self.code))
class And(Node):
nodes["and"] = "And"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "And(%s)" % (repr(self.nodes),)
class Compare(Node):
nodes["compare"] = "Compare"
def __init__(self, expr, ops):
self.expr = expr
self.ops = ops
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.ops))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.ops))
return tuple(nodelist)
def __repr__(self):
return "Compare(%s, %s)" % (repr(self.expr), repr(self.ops))
class Bitor(Node):
nodes["bitor"] = "Bitor"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitor(%s)" % (repr(self.nodes),)
class Bitxor(Node):
nodes["bitxor"] = "Bitxor"
def __init__(self, nodes):
self.nodes = nodes
def getChildren(self):
children = []
children.extend(flatten(self.nodes))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.nodes))
return tuple(nodelist)
def __repr__(self):
return "Bitxor(%s)" % (repr(self.nodes),)
class CallFunc(Node):
nodes["callfunc"] = "CallFunc"
def __init__(self, node, args, star_args = None, dstar_args = None):
self.node = node
self.args = args
self.star_args = star_args
self.dstar_args = dstar_args
def getChildren(self):
children = []
children.append(self.node)
children.extend(flatten(self.args))
children.append(self.star_args)
children.append(self.dstar_args)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.node)
nodelist.extend(flatten_nodes(self.args))
if self.star_args is not None: nodelist.append(self.star_args)
if self.dstar_args is not None: nodelist.append(self.dstar_args)
return tuple(nodelist)
def __repr__(self):
return "CallFunc(%s, %s, %s, %s)" % (repr(self.node), repr(self.args), repr(self.star_args), repr(self.dstar_args))
class Global(Node):
nodes["global"] = "Global"
def __init__(self, names):
self.names = names
def getChildren(self):
return self.names,
def getChildNodes(self):
return ()
def __repr__(self):
return "Global(%s)" % (repr(self.names),)
class Add(Node):
nodes["add"] = "Add"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Add((%s, %s))" % (repr(self.left), repr(self.right))
class ListCompIf(Node):
nodes["listcompif"] = "ListCompIf"
def __init__(self, test):
self.test = test
def getChildren(self):
return self.test,
def getChildNodes(self):
return self.test,
def __repr__(self):
return "ListCompIf(%s)" % (repr(self.test),)
class Sub(Node):
nodes["sub"] = "Sub"
def __init__(self, (left, right)):
self.left = left
self.right = right
def getChildren(self):
return self.left, self.right
def getChildNodes(self):
return self.left, self.right
def __repr__(self):
return "Sub((%s, %s))" % (repr(self.left), repr(self.right))
class Pass(Node):
nodes["pass"] = "Pass"
def __init__(self, ):
pass
def getChildren(self):
return ()
def getChildNodes(self):
return ()
def __repr__(self):
return "Pass()"
class UnarySub(Node):
nodes["unarysub"] = "UnarySub"
def __init__(self, expr):
self.expr = expr
def getChildren(self):
return self.expr,
def getChildNodes(self):
return self.expr,
def __repr__(self):
return "UnarySub(%s)" % (repr(self.expr),)
class If(Node):
nodes["if"] = "If"
def __init__(self, tests, else_):
self.tests = tests
self.else_ = else_
def getChildren(self):
children = []
children.extend(flatten(self.tests))
children.append(self.else_)
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.extend(flatten_nodes(self.tests))
if self.else_ is not None: nodelist.append(self.else_)
return tuple(nodelist)
def __repr__(self):
return "If(%s, %s)" % (repr(self.tests), repr(self.else_))
class ListComp(Node):
nodes["listcomp"] = "ListComp"
def __init__(self, expr, quals):
self.expr = expr
self.quals = quals
def getChildren(self):
children = []
children.append(self.expr)
children.extend(flatten(self.quals))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.expr)
nodelist.extend(flatten_nodes(self.quals))
return tuple(nodelist)
def __repr__(self):
return "ListComp(%s, %s)" % (repr(self.expr), repr(self.quals))
class ListCompFor(Node):
nodes["listcompfor"] = "ListCompFor"
def __init__(self, assign, list, ifs):
self.assign = assign
self.list = list
self.ifs = ifs
def getChildren(self):
children = []
children.append(self.assign)
children.append(self.list)
children.extend(flatten(self.ifs))
return tuple(children)
def getChildNodes(self):
nodelist = []
nodelist.append(self.assign)
nodelist.append(self.list)
nodelist.extend(flatten_nodes(self.ifs))
return tuple(nodelist)
def __repr__(self):
return "ListCompFor(%s, %s, %s)" % (repr(self.assign), repr(self.list), repr(self.ifs))
klasses = globals()
for k in nodes.keys():
nodes[k] = klasses[nodes[k]]
| {
"content_hash": "524d9afd482f57b5f76d0821153c70be",
"timestamp": "",
"source": "github",
"line_count": 1241,
"max_line_length": 162,
"avg_line_length": 25.7767929089444,
"alnum_prop": 0.5731970364812904,
"repo_name": "MalloyPower/parsing-python",
"id": "8d69da6d9907f55e8cefd7031e0a03a67aa0dc8d",
"size": "31989",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/compiler/ast.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import asyncio
from collections import defaultdict
from contextlib import suppress
import logging
import uuid
from .client import Client
from .utils import log_errors, TimeoutError
from .worker import get_worker
from .utils import parse_timedelta
logger = logging.getLogger(__name__)
class EventExtension:
""" An extension for the scheduler to manage Events
This adds the following routes to the scheduler
* event_wait
* event_set
* event_clear
* event_is_set
In principle, the implementation logic is quite simple
as we can reuse the asyncio.Event as much as possible:
we keep a mapping from name to an asyncio.Event and call
every function (wait, set, clear, is_set) directly on these
events.
However, this would cause a memory leak: created events in the
dictionary are never removed.
For this, we also keep a counter for the number of waiters on
a specific event.
If an event is set, we need to keep track of this state so
we can not remove it (the default flag is false).
If it is unset but there are waiters, we can also not remove
it, as those waiters would then have dangling futures.
Therefore the only time we can remove the event from our dict
is when the number of waiters is 0 and the event flag is cleared.
"""
def __init__(self, scheduler):
self.scheduler = scheduler
# Keep track of all current events, identified by their name
self._events = defaultdict(asyncio.Event)
# Keep track on how many waiters are present, so we know when
# we can remove the event
self._waiter_count = defaultdict(int)
self.scheduler.handlers.update(
{
"event_wait": self.event_wait,
"event_set": self.event_set,
"event_clear": self.event_clear,
"event_is_set": self.event_is_set,
}
)
self.scheduler.extensions["events"] = self
async def event_wait(self, comm=None, name=None, timeout=None):
""" Wait until the event is set to true.
Returns false, when this did not happen in the given time
and true otherwise.
"""
with log_errors():
name = self._normalize_name(name)
event = self._events[name]
future = event.wait()
if timeout is not None:
future = asyncio.wait_for(future, timeout)
self._waiter_count[name] += 1
try:
await future
except TimeoutError:
return False
finally:
self._waiter_count[name] -= 1
if not self._waiter_count[name] and not event.is_set():
# No one is waiting for this
# and as the default flag for an event is false
# we can safely remove it
self._delete_event(name)
return True
def event_set(self, comm=None, name=None):
""" Set the event with the given name to true.
All waiters on this event will be notified.
"""
with log_errors():
name = self._normalize_name(name)
# No matter if someone is listening or not,
# we set the event to true
self._events[name].set()
def event_clear(self, comm=None, name=None):
"""Set the event with the given name to false."""
with log_errors():
name = self._normalize_name(name)
if not self._waiter_count[name]:
# No one is waiting for this
# and as the default flag for an event is false
# we can safely remove it
self._delete_event(name)
else:
# There are waiters
# This can happen if an event is "double-cleared"
# In principle, the event should be unset at this point
# (because if it is set, all waiters should have been
# notified). But to prevent race conditions
# due to unlucky timing, we clear anyways
assert name in self._events
event = self._events[name]
event.clear()
def event_is_set(self, comm=None, name=None):
with log_errors():
name = self._normalize_name(name)
# the default flag value is false
# we could also let the defaultdict
# create a new event for us, but that
# could produce many unused events
if name not in self._events:
return False
return self._events[name].is_set()
def _normalize_name(self, name):
""" Helper function to normalize an event name """
if isinstance(name, list):
name = tuple(name)
return name
def _delete_event(self, name):
""" Helper function to delete an event """
# suppress key errors to make calling this method
# also possible if we do not even have such an event
with suppress(KeyError):
del self._waiter_count[name]
with suppress(KeyError):
del self._events[name]
class Event:
""" Distributed Centralized Event equivalent to asyncio.Event
An event stores a single flag, which is set to false on start.
The flag can be set to true (using the set() call) or back to false
(with the clear() call).
Every call to wait() blocks until the event flag is set to true.
Parameters
----------
name: string (optional)
Name of the event. Choosing the same name allows two
disconnected processes to coordinate an event.
If not given, a random name will be generated.
client: Client (optional)
Client to use for communication with the scheduler.
If not given, the default global client will be used.
Examples
--------
>>> event_1 = Event('a') # doctest: +SKIP
>>> event_1.wait(timeout=1) # doctest: +SKIP
>>> # in another process
>>> event_2 = Event('a') # doctest: +SKIP
>>> event_2.set() # doctest: +SKIP
>>> # now event_1 will stop waiting
"""
def __init__(self, name=None, client=None):
try:
self.client = client or Client.current()
except ValueError:
# Initialise new client
self.client = get_worker().client
self.name = name or "event-" + uuid.uuid4().hex
def __await__(self):
""" async constructor
Make it possible to write
>>> event = await Event("x") # doctest: +SKIP
even though no waiting is implied
"""
async def _():
return self
return _().__await__()
def wait(self, timeout=None):
""" Wait until the event is set.
Parameters
----------
timeout : number or string or timedelta, optional
Seconds to wait on the event in the scheduler. This does not
include local coroutine time, network transfer time, etc..
Instead of number of seconds, it is also possible to specify
a timedelta in string format, e.g. "200ms".
Examples
--------
>>> event = Event('a') # doctest: +SKIP
>>> event.wait(timeout="1s") # doctest: +SKIP
Returns
-------
True if the event was set of false, if a timeout happend
"""
timeout = parse_timedelta(timeout)
result = self.client.sync(
self.client.scheduler.event_wait, name=self.name, timeout=timeout,
)
return result
def clear(self):
""" Clear the event (set its flag to false).
All waiters will now block.
"""
return self.client.sync(self.client.scheduler.event_clear, name=self.name)
def set(self):
""" Set the event (set its flag to false).
All waiters will now be released.
"""
result = self.client.sync(self.client.scheduler.event_set, name=self.name,)
return result
def is_set(self):
""" Check if the event is set """
result = self.client.sync(self.client.scheduler.event_is_set, name=self.name,)
return result
def __reduce__(self):
return (Event, (self.name,))
| {
"content_hash": "33ecadee70e8ce708f59d34d1f1bd5d8",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 86,
"avg_line_length": 33.24701195219124,
"alnum_prop": 0.5823846614739365,
"repo_name": "blaze/distributed",
"id": "75661654b86f104cab19da67ce734d5d5a029b0d",
"size": "8345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "distributed/event.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "236"
},
{
"name": "Python",
"bytes": "511624"
},
{
"name": "Shell",
"bytes": "1120"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import sys
import django
__all__ = [
'json',
'BufferIO',
'urlopen',
'urlparse',
'quote',
'quote_plus',
'URLError',
'force_unicode', 'text_type'
]
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
# Django version
if django.VERSION < (1, 5):
from django.utils import simplejson as json
from django.utils.encoding import force_unicode
else:
import json
from django.utils.encoding import force_text as force_unicode
try:
from django.utils.encoding import smart_text
except ImportError:
from django.utils.encoding import smart_unicode as smart_text
try:
# Python >= 2.7
from importlib import import_module
except ImportError:
from django.utils.importlib import import_module
# Python 2 and 3
if PY3:
from urllib.error import URLError
from urllib.request import urlopen
from urllib.parse import quote, quote_plus
import urllib.parse as urlparse
from io import BytesIO as BufferIO
text_type = str
string_type = str
def encode(value, charset='utf-8', errors='ignore'):
if isinstance(value, bytes):
return value
return value.encode(charset, errors)
def urlsplit(url):
return urlparse.urlsplit(url.decode('ascii', 'ignore'))
elif PY2:
from urllib2 import URLError
from urllib2 import urlopen
from urllib import quote, quote_plus
import urlparse
from cStringIO import StringIO as BufferIO
text_type = unicode
string_type = basestring
urlsplit = urlparse.urlsplit
def encode(value, charset='utf-8', errors='ignore'):
if isinstance(value, unicode):
return value.encode(charset, errors)
return unicode(value, errors=errors).encode(charset)
| {
"content_hash": "c40e23e1c9a9bbf8c0fae9b10822446b",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 65,
"avg_line_length": 22.82278481012658,
"alnum_prop": 0.6783139212423738,
"repo_name": "Resmin/sorl-thumbnail",
"id": "4c328dc1f3bfb858d910b84c4703fb7e41d38f14",
"size": "1803",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "sorl/thumbnail/compat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "3522"
},
{
"name": "Python",
"bytes": "133676"
},
{
"name": "Shell",
"bytes": "508"
}
],
"symlink_target": ""
} |
import base64
import errno
import os
from oslo.config import cfg
from kds.common import exception
from kds.openstack.common.crypto import utils as cryptoutils
CONF = cfg.CONF
KEY_SIZE = 16
class CryptoManager(object):
def __init__(self):
self.crypto = cryptoutils.SymmetricCrypto(enctype=CONF.kds.enctype,
hashtype=CONF.kds.hashtype)
self.hkdf = cryptoutils.HKDF(hashtype=CONF.kds.hashtype)
self.mkey = self._load_master_key()
def _load_master_key(self):
"""Load the master key from file, or create one if not available."""
mkey = None
try:
with open(CONF.kds.master_key_file, 'r') as f:
mkey = base64.b64decode(f.read())
except IOError as e:
if e.errno == errno.ENOENT:
flags = os.O_WRONLY | os.O_CREAT | os.O_EXCL
mkey = self.crypto.new_key(KEY_SIZE)
f = None
try:
f = os.open(CONF.kds.master_key_file, flags, 0o600)
os.write(f, base64.b64encode(mkey))
except Exception:
try:
os.remove(CONF.kds.master_key_file)
except OSError:
pass
raise
finally:
if f:
os.close(f)
else:
# the file could be unreadable due to bad permissions
# so just pop up whatever error comes
raise
return mkey
def generate_keys(self, prk, info="", key_size=KEY_SIZE):
"""Generate a new key from an existing key and information.
:param string prk: Existing pseudo-random key
:param string info: Additional information for building a new key
:returns tuple(string, string): raw signature key, raw encryption key
"""
key = self.hkdf.expand(prk, info, 2 * key_size)
return key[:key_size], key[key_size:]
def _get_storage_keys(self, key_id):
"""Get a set of keys that will be used to encrypt the data for this
identity in the database.
:param string key_id: Key Identifier
:returns tuple(string, string): raw signature key, raw encryption key
"""
if not self.mkey:
raise exception.UnexpectedError('Failed to find mkey')
return self.generate_keys(self.mkey, key_id)
def encrypt_keyblock(self, key_id, keyblock):
"""Encrypt a key for storage.
Returns the signature and the encryption key.
"""
skey, ekey = self._get_storage_keys(key_id)
# encrypt the key
try:
enc_key = self.crypto.encrypt(ekey, keyblock, b64encode=False)
except Exception:
raise exception.UnexpectedError('Failed to encrypt key')
# sign it for integrity
try:
sig_key = self.crypto.sign(skey, enc_key, b64encode=False)
except Exception:
raise exception.UnexpectedError('Failed to sign key')
return sig_key, enc_key
def decrypt_keyblock(self, key_id, sig_key, enc_key):
"""Decrypt a key from storage.
Returns the raw key data.
"""
skey, ekey = self._get_storage_keys(key_id)
# signature check
try:
sigc = self.crypto.sign(skey, enc_key, b64encode=False)
except Exception:
raise exception.UnexpectedError('Failed to verify key')
if not sigc == sig_key:
raise exception.UnexpectedError('Signature check failed')
# decrypt the key
try:
plain = self.crypto.decrypt(ekey, enc_key, b64decode=False)
except Exception:
raise exception.UnexpectedError('Failed to decrypt key')
return plain
| {
"content_hash": "1d03937acfb5afd15d025ecf4a769df6",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 77,
"avg_line_length": 31.84426229508197,
"alnum_prop": 0.5675675675675675,
"repo_name": "jamielennox/openstack-kds",
"id": "0e5236df6e069e0c4d4ded897aa59f2934f1fe9b",
"size": "4476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kds/common/crypto.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "219538"
}
],
"symlink_target": ""
} |
import csv
count = 0
with open('submissions.csv', 'rb') as count_file:
csv_reader = csv.reader(count_file)
for row in csv_reader:
count += 1
print count | {
"content_hash": "894486bb8b4a8498d3424920e03859b6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 49,
"avg_line_length": 21.125,
"alnum_prop": 0.650887573964497,
"repo_name": "akshaynagpal/competitive-programming",
"id": "e1ce423afc0399644fdb124c096165d86d3d7e8e",
"size": "169",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hackerearth/ML/will_bill_solve_it/count_rows.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7427"
},
{
"name": "Java",
"bytes": "1896"
},
{
"name": "Python",
"bytes": "23959"
}
],
"symlink_target": ""
} |
""" A script to manage development tasks """
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from os import path as p
from manager import Manager
from subprocess import call
manager = Manager()
_basedir = p.dirname(__file__)
@manager.command
def clean():
"""Remove Python file and build artifacts"""
call(p.join(_basedir, 'helpers', 'clean'), shell=True)
@manager.command
def check():
"""Check staged changes for lint errors"""
call(p.join(_basedir, 'helpers', 'check-stage'), shell=True)
@manager.arg('where', 'w', help='Modules to check')
@manager.command
def lint(where=None):
"""Check style with flake8"""
call('flake8 %s' % (where if where else ''), shell=True)
@manager.command
def pipme():
"""Install requirements.txt"""
call('pip install -r requirements.txt', shell=True)
@manager.command
def require():
"""Create requirements.txt"""
cmd = 'pip freeze -l | grep -vxFf dev-requirements.txt > requirements.txt'
call(cmd, shell=True)
@manager.arg('where', 'w', help='test path', default=None)
@manager.arg(
'stop', 'x', help='Stop after first error', type=bool, default=False)
@manager.command
def test(where=None, stop=False):
"""Run nose and script tests"""
opts = '-xv' if stop else '-v'
opts += 'w %s' % where if where else ''
call([p.join(_basedir, 'helpers', 'test'), opts])
@manager.command
def register():
"""Register package with PyPI"""
call('python %s register' % p.join(_basedir, 'setup.py'), shell=True)
@manager.command
def release():
"""Package and upload a release"""
sdist()
wheel()
upload()
@manager.command
def build():
"""Create a source distribution and wheel package"""
sdist()
wheel()
@manager.command
def upload():
"""Upload distribution files"""
call('twine upload %s' % p.join(_basedir, 'dist', '*'), shell=True)
@manager.command
def sdist():
"""Create a source distribution package"""
call(p.join(_basedir, 'helpers', 'srcdist'), shell=True)
@manager.command
def wheel():
"""Create a wheel package"""
call(p.join(_basedir, 'helpers', 'wheel'), shell=True)
if __name__ == '__main__':
manager.main()
| {
"content_hash": "638f1959debc4f3ad32dcc66f1146914",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 78,
"avg_line_length": 22.948979591836736,
"alnum_prop": 0.6456202756780791,
"repo_name": "reubano/ckanutils",
"id": "5df51241f3779a341f4edc80a540324d0fb0c7da",
"size": "2296",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "981"
},
{
"name": "Python",
"bytes": "35234"
},
{
"name": "Shell",
"bytes": "2708"
}
],
"symlink_target": ""
} |
"""
This script can be used to create a source distribution, binary distribution
or Windows executable files. The output is put in dist/
See
https://github.com/google/transitfeed/wiki/BuildingPythonWindowsExecutables
for help on creating Windows executables.
"""
from distutils.core import setup
import glob
import os.path
from transitfeed import __version__ as VERSION
try:
import py2exe
has_py2exe = True
except ImportError as e:
# Won't be able to generate win32 exe
has_py2exe = False
# py2exe doesn't automatically include pytz dependency because it is optional
options = {'py2exe': {'packages': ['pytz', 'pybcp47']}}
scripts_for_py2exe = ['feedvalidator.py', 'schedule_viewer.py', 'kmlparser.py',
'kmlwriter.py', 'merge.py', 'unusual_trip_filter.py',
'location_editor.py', 'feedvalidator_googletransit.py',
'upgrade_translations.py', 'visualize_pathways.py']
# On Nov 23, 2009 Tom Brown said: I'm not confident that we can include a
# working copy of this script in the py2exe distribution because it depends on
# ogr. I do want it included in the source tar.gz.
scripts_for_source_only = ['shape_importer.py']
kwargs = {}
if has_py2exe:
kwargs['console'] = scripts_for_py2exe
# py2exe seems to ignore package_data and not add marey_graph. This makes it
# work.
kwargs['data_files'] = \
[('schedule_viewer_files',
glob.glob(os.path.join('gtfsscheduleviewer', 'files', '*')))]
options['py2exe'] = {'dist_dir': 'transitfeed-windows-binary-%s' % VERSION}
setup(
version=VERSION,
name='transitfeed',
url='https://github.com/google/transitfeed/',
download_url='https://github.com/google/transitfeed/archive/'
'%s.tar.gz' % VERSION,
maintainer='Multiple',
maintainer_email='transitfeed@googlegroups.com',
description='GTFS library and tools',
long_description='This module provides a library for reading, writing and '
'validating GTFS files. It includes some scripts that validate a feed, '
'display it using the Google Maps API and the start of a KML importer '
'and exporter.',
platforms='OS Independent',
license='Apache License, Version 2.0',
packages=['gtfsscheduleviewer', 'transitfeed'],
# Also need to list package_data contents in MANIFEST.in for it to be
# included in sdist. See "[Distutils] package_data not used by sdist
# command" Feb 2, 2007
package_data={'gtfsscheduleviewer': ['files/*']},
scripts=scripts_for_py2exe + scripts_for_source_only,
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: GIS',
'Topic :: Software Development :: Libraries :: Python Modules'
],
options=options,
**kwargs
)
if has_py2exe:
# Some data files are not copied automatically by py2exe into the
# library.zip file. This concerns mainly files which are loaded by modules
# using pkg_resources.
import zipfile
# Open the library.zip file for appending additional files.
zipfile_path = os.path.join(options['py2exe']['dist_dir'], 'library.zip')
z = zipfile.ZipFile(zipfile_path, 'a')
# Sometime between pytz-2008a and pytz-2008i common_timezones started to
# include only names of zones with a corresponding data file in zoneinfo.
# pytz installs the zoneinfo directory tree in the same directory
# as the pytz/__init__.py file. These data files are loaded using
# pkg_resources.resource_stream. py2exe does not copy this to library.zip so
# resource_stream can't find the files and common_timezones is empty when
# read in the py2exe executable.
# This manually copies zoneinfo into the zip. See also
# https://github.com/google/transitfeed/issues/121
import pytz
# Make sure the layout of pytz hasn't changed
assert (pytz.__file__.endswith('__init__.pyc') or
pytz.__file__.endswith('__init__.py')), pytz.__file__
zoneinfo_dir = os.path.join(os.path.dirname(pytz.__file__), 'zoneinfo')
# '..\\Lib\\pytz\\__init__.py' -> '..\\Lib'
disk_basedir = os.path.dirname(os.path.dirname(pytz.__file__))
for absdir, directories, filenames in os.walk(zoneinfo_dir):
assert absdir.startswith(disk_basedir), (absdir, disk_basedir)
zip_dir = absdir[len(disk_basedir):]
for f in filenames:
z.write(os.path.join(absdir, f), os.path.join(zip_dir, f))
# The custom pybcp47 module included int the googletransit extension reads
# from a registry file in the resource path. This manually copies the file
# language-subtag-registry.txt to the library.zip file.
import extensions.googletransit.pybcp47 as pybcp47_module
pybcp47_dir = os.path.join(os.path.dirname(pybcp47_module.__file__))
disk_basedir = os.path.dirname(os.path.dirname(os.path.dirname(pybcp47_dir)))
zip_dir = pybcp47_dir[len(disk_basedir):]
z.write(os.path.join(pybcp47_dir, 'language-subtag-registry.txt'),
os.path.join(zip_dir, 'language-subtag-registry.txt'))
# Finally close the library.zip file.
z.close()
| {
"content_hash": "4a20440be13f27ada09ccca79f02d1b8",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 80,
"avg_line_length": 43.577235772357724,
"alnum_prop": 0.692723880597015,
"repo_name": "avilaton/transitfeed",
"id": "732936ba8dfb841c84e38ff19e4706ec92f065e5",
"size": "5963",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2638"
},
{
"name": "HTML",
"bytes": "2164"
},
{
"name": "JavaScript",
"bytes": "108520"
},
{
"name": "Python",
"bytes": "1039665"
},
{
"name": "Visual Basic",
"bytes": "357"
}
],
"symlink_target": ""
} |
import subprocess
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit Me!
# Each day after you post a signup post, copy its 6-character ID to this array.
signupPageSubmissionIds = [ "476eue", "47cxl7", "47izz3", "47obzs", "47ubpe", "481kzc", "48aipq" ]
flaskport = 8885
app = Flask(__name__)
app.debug = True
commentHashesAndComments = {}
def loginAndReturnRedditSession():
config = ConfigParser()
config.read("../reddit-password-credentials.cfg")
user = config.get("Reddit", "user")
password = config.get("Reddit", "password")
# TODO: password auth is going away, and we will soon need to do oauth.
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
redditSession.login(user, password, disable_warning=True)
# submissions = redditSession.get_subreddit('pornfree').get_hot(limit=5)
# print [str(x) for x in submissions]
return redditSession
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
o.refresh(force=True)
return redditSession
def getSubmissionsForRedditSession(redditSession):
submissions = [redditSession.get_submission(submission_id=submissionId) for submissionId in signupPageSubmissionIds]
for submission in submissions:
submission.replace_more_comments(limit=None, threshold=0)
return submissions
def getCommentsForSubmissions(submissions):
comments = []
for submission in submissions:
comments += praw.helpers.flatten_tree(submission.comments)
return comments
def retireCommentHash(commentHash):
with open("retiredcommenthashes.txt", "a") as commentHashFile:
commentHashFile.write(commentHash + '\n')
def retiredCommentHashes():
with open("retiredcommenthashes.txt", "r") as commentHashFile:
# return commentHashFile.readlines()
return commentHashFile.read().splitlines()
@app.route('/moderatesignups.html')
def moderatesignups():
global commentHashesAndComments
commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
# redditSession = loginAndReturnRedditSession()
redditSession = loginOAuthAndReturnRedditSession()
submissions = getSubmissionsForRedditSession(redditSession)
flat_comments = getCommentsForSubmissions(submissions)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write(os.getcwd())
stringio.write("<br>\n")
for submission in submissions:
stringio.write(submission.title)
stringio.write("<br>\n")
stringio.write("</h3>\n\n")
stringio.write('<form action="copydisplayduringsignuptoclipboard.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" value="Copy display-during-signup.py stdout to clipboard">')
stringio.write('</form>')
for comment in flat_comments:
# print comment.is_root
# print comment.score
i += 1
commentHash = sha1()
commentHash.update(comment.permalink)
commentHash.update(comment.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
commentHashesAndComments[commentHash] = comment
authorName = str(comment.author) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName) # can be None if author was deleted. So check for that and skip if it's None.
stringio.write('</b></font><br>')
if ParticipantCollection().hasParticipantNamed(authorName):
stringio.write(' <small><font color="green">(member)</font></small>')
# if ParticipantCollection().participantNamed(authorName).isStillIn:
# stringio.write(' <small><font color="green">(in)</font></small>')
# else:
# stringio.write(' <small><font color="red">(out)</font></small>')
else:
stringio.write(' <small><font color="red">(not a member)</font></small>')
stringio.write('<form action="takeaction.html" method="post" target="invisibleiframe">')
stringio.write('<input type="submit" name="actiontotake" value="Signup" style="color:white;background-color:green">')
# stringio.write('<input type="submit" name="actiontotake" value="Signup and checkin">')
# stringio.write('<input type="submit" name="actiontotake" value="Relapse">')
# stringio.write('<input type="submit" name="actiontotake" value="Reinstate">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment">')
stringio.write('<input type="submit" name="actiontotake" value="Skip comment and don\'t upvote">')
stringio.write('<input type="hidden" name="username" value="' + b64encode(authorName) + '">')
stringio.write('<input type="hidden" name="commenthash" value="' + commentHash + '">')
stringio.write('<input type="hidden" name="commentpermalink" value="' + comment.permalink + '">')
stringio.write('</form>')
stringio.write(bleach.clean(markdown.markdown(comment.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
@app.route('/takeaction.html', methods=["POST"])
def takeaction():
username = b64decode(request.form["username"])
commentHash = str(request.form["commenthash"])
# commentPermalink = request.form["commentpermalink"]
actionToTake = request.form["actiontotake"]
# print commentHashesAndComments
comment = commentHashesAndComments[commentHash]
# print "comment: " + str(comment)
if actionToTake == 'Signup':
print "signup - " + username
subprocess.call(['./signup.py', username])
comment.upvote()
retireCommentHash(commentHash)
# if actionToTake == 'Signup and checkin':
# print "signup and checkin - " + username
# subprocess.call(['./signup-and-checkin.sh', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Relapse':
# print "relapse - " + username
# subprocess.call(['./relapse.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
# elif actionToTake == 'Reinstate':
# print "reinstate - " + username
# subprocess.call(['./reinstate.py', username])
# comment.upvote()
# retireCommentHash(commentHash)
elif actionToTake == 'Skip comment':
print "Skip comment - " + username
comment.upvote()
retireCommentHash(commentHash)
elif actionToTake == "Skip comment and don't upvote":
print "Skip comment and don't upvote - " + username
retireCommentHash(commentHash)
return Response("hello", mimetype='text/html')
@app.route('/copydisplayduringsignuptoclipboard.html', methods=["POST"])
def copydisplayduringsignuptoclipboard():
print "TODO: Copy display to clipboard"
subprocess.call(['./display-during-signup.py'])
return Response("hello", mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
| {
"content_hash": "4534aa5c42fa586b16d96a30a65a1d2a",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 129,
"avg_line_length": 41.58247422680412,
"alnum_prop": 0.6736085285731994,
"repo_name": "foobarbazblarg/stayclean",
"id": "a6640f99f18febc20cde2ad8eacff0ebba1404e4",
"size": "8086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stayclean-2016-march/serve-signups-with-flask.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
} |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "selected_points_publisher"
PROJECT_SPACE_DIR = "/home/mike/catkin_ws/devel"
PROJECT_VERSION = "1.0.0"
| {
"content_hash": "f2aa90ef52e38e83d3271bafa7ff847a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 68,
"avg_line_length": 46.857142857142854,
"alnum_prop": 0.6402439024390244,
"repo_name": "mikewrock/phd_backup_full",
"id": "4b62648c7f89454c920aa73ba684d9c6d587665e",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/selected_points_publisher/catkin_generated/pkg.develspace.context.pc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1749931"
},
{
"name": "C++",
"bytes": "4946476"
},
{
"name": "CMake",
"bytes": "1090857"
},
{
"name": "CSS",
"bytes": "31449"
},
{
"name": "Common Lisp",
"bytes": "434689"
},
{
"name": "HTML",
"bytes": "23955"
},
{
"name": "LOLCODE",
"bytes": "3316"
},
{
"name": "Lua",
"bytes": "6855"
},
{
"name": "MATLAB",
"bytes": "1183"
},
{
"name": "Makefile",
"bytes": "8352565"
},
{
"name": "NewLisp",
"bytes": "345"
},
{
"name": "Python",
"bytes": "642925"
},
{
"name": "Shell",
"bytes": "12715"
},
{
"name": "TeX",
"bytes": "1623552"
}
],
"symlink_target": ""
} |
"""
Types used in the opt_einsum package
"""
from typing import Any, Collection, FrozenSet, List, Optional, Tuple, Union
PathType = Collection[Tuple[int, ...]]
ArrayType = Any # TODO
ArrayIndexType = FrozenSet[str]
TensorShapeType = Tuple[int, ...]
ContractionListType = List[Tuple[Any, ArrayIndexType, str, Optional[Tuple[str, ...]], Union[str, bool]]]
| {
"content_hash": "f10e353864b05cc20f80055166810acd",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 104,
"avg_line_length": 32.45454545454545,
"alnum_prop": 0.7198879551820728,
"repo_name": "dgasmith/opt_einsum",
"id": "3fb22ce375f146861ab345d3950099301a9d12ce",
"size": "357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opt_einsum/typing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "674"
},
{
"name": "Python",
"bytes": "323514"
},
{
"name": "TeX",
"bytes": "2362"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import pipes
import string
import sys
GDB_RUNNER_SCRIPT = string.Template("""#!/usr/bin/env python
import os
env = ${b64env}
for k, v in env.items():
os.environ[k] = v
os.system("${gdb} --fullname --args ${test_args}")
""")
if __name__ == '__main__':
gdb = sys.argv[1]
generated_path = sys.argv[2]
test_args = sys.argv[3:]
test_args[0] = os.path.abspath(test_args[0])
with open(generated_path, 'w') as f:
f.write(
GDB_RUNNER_SCRIPT.substitute(
b64env=str(dict(os.environ)),
gdb=gdb,
test_args=' '.join(pipes.quote(arg) for arg in test_args)))
# To make bazel consider the test a failure we exit non-zero.
print('Test was not run, instead a gdb wrapper script was produced in %s' % generated_path)
sys.exit(1)
| {
"content_hash": "ef5e0017c3325900b8bc68d412e3e7d6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 93,
"avg_line_length": 25.90625,
"alnum_prop": 0.6332931242460796,
"repo_name": "dnoe/envoy",
"id": "a23d19fa6c5755ffcaf061bd85ad519cac8c1e2a",
"size": "1163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/gen_gdb_wrapper_script.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9173"
},
{
"name": "C++",
"bytes": "12933900"
},
{
"name": "Dockerfile",
"bytes": "245"
},
{
"name": "Emacs Lisp",
"bytes": "966"
},
{
"name": "Go",
"bytes": "836"
},
{
"name": "PowerShell",
"bytes": "4285"
},
{
"name": "PureBasic",
"bytes": "472"
},
{
"name": "Python",
"bytes": "946275"
},
{
"name": "Shell",
"bytes": "98909"
},
{
"name": "Thrift",
"bytes": "748"
}
],
"symlink_target": ""
} |
import os
import json
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
CONFIG_FILE_NAME = '/etc/randomcab/main.ini'
INSTALL_DIR = ""
from configparser import RawConfigParser
config = RawConfigParser()
if os.path.exists(CONFIG_FILE_NAME):
DEBUG = False
config.read(CONFIG_FILE_NAME)
INSTALL_DIR = config.get('directories', 'INSTALL_DIR')
else:
DEBUG = True
INSTALL_DIR = "/".join(os.getcwd().split('/')[:-1]);
config.read(INSTALL_DIR+CONFIG_FILE_NAME)
if INSTALL_DIR is None:
raise Exception("INSTALL_DIR not defined")
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Admin', 'info@randomcab.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'sqlite.db',
}
}
FLICKR_API_KEY = config.get('secrets', 'FLICKR_API_KEY')
FLICKR_API_SECRET = config.get('secrets', 'FLICKR_API_SECRET')
DEFAULT_FROM_EMAIL='info@randomcab.com'
LOG_EMAIL=config.get('mail', 'LOG_EMAIL')
SERVER_EMAIL='django@randomcab.com'
EMAIL_BACKEND = "randomcab.backends.mail.MailFailOverBackend"
EMAIL_BACKEND_LIST = json.loads(config.get('mail', 'EMAIL_BACKEND_LIST'))['mailservers']
ALLOWED_HOSTS = [".randomcab.com"]
TIME_ZONE = 'Europe/Amsterdam'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_ROOT = ''
STATIC_URL = '/static/'
STATICFILES_DIRS = (
INSTALL_DIR + "/resources/static",
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
SECRET_KEY = config.get('secrets', 'SECRET_KEY')
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'randomcab.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'randomcab.wsgi.application'
TEMPLATE_DIRS = (
INSTALL_DIR + "/resources/templates",
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#3rd party
'widget_tweaks',
#randomcab
'randomcab.home',
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| {
"content_hash": "b7f448db39666bf227851b5fbcfd5218",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 88,
"avg_line_length": 25.345070422535212,
"alnum_prop": 0.666851903306474,
"repo_name": "bjarnoldus/randomcab",
"id": "79e61f2ef1a42b10dee5c1eb00333711c4ed751c",
"size": "3599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/randomcab/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3414"
},
{
"name": "Python",
"bytes": "76778"
},
{
"name": "Shell",
"bytes": "211"
}
],
"symlink_target": ""
} |
import triton as TT
from arybo.tools import tritonast2arybo
TT.setArchitecture(TT.ARCH.X86_64)
TT.convertRegisterToSymbolicVariable(TT.REG.RAX)
TT.convertRegisterToSymbolicVariable(TT.REG.RBX)
inst = TT.Instruction()
inst.setOpcodes("\x48\x31\xd8") # xor rax, rbx
TT.processing(inst)
rax_ast = TT.buildSymbolicRegister(TT.REG.RAX)
rax_ast = TT.getFullAst(rax_ast)
print(rax_ast)
e = tritonast2arybo(rax_ast)
print(e)
| {
"content_hash": "96d24c2184c20c6730be1cf6884421e8",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 48,
"avg_line_length": 23.5,
"alnum_prop": 0.7801418439716312,
"repo_name": "quarkslab/arybo",
"id": "6ea7d8d7fc20e8ff1cdbd4a34e3d835466c63c63",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/triton_ast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "314"
},
{
"name": "C",
"bytes": "4404"
},
{
"name": "C++",
"bytes": "254464"
},
{
"name": "CMake",
"bytes": "18931"
},
{
"name": "Python",
"bytes": "133372"
}
],
"symlink_target": ""
} |
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import codecs
import doctest
import os
import re
import shutil
import sys
import tempfile
import time
import traceback
from extra.beep.beep import beep
from lib.controller.controller import start
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import getUnicode
from lib.core.common import randomStr
from lib.core.common import readXmlFile
from lib.core.data import conf
from lib.core.data import logger
from lib.core.data import paths
from lib.core.exception import SqlmapBaseException
from lib.core.exception import SqlmapNotVulnerableException
from lib.core.log import LOGGER_HANDLER
from lib.core.option import init
from lib.core.option import initOptions
from lib.core.option import setVerbosity
from lib.core.optiondict import optDict
from lib.core.settings import UNICODE_ENCODING
from lib.parse.cmdline import cmdLineParser
failedItem = None
failedParseOn = None
failedTraceBack = None
def smokeTest():
"""
Runs the basic smoke testing of a program
"""
retVal = True
count, length = 0, 0
for root, _, files in os.walk(paths.SQLMAP_ROOT_PATH):
if any(_ in root for _ in ("thirdparty", "extra")):
continue
for ifile in files:
length += 1
for root, _, files in os.walk(paths.SQLMAP_ROOT_PATH):
if any(_ in root for _ in ("thirdparty", "extra")):
continue
for ifile in files:
if os.path.splitext(ifile)[1].lower() == ".py" and ifile != "__init__.py":
path = os.path.join(root, os.path.splitext(ifile)[0])
path = path.replace(paths.SQLMAP_ROOT_PATH, '.')
path = path.replace(os.sep, '.').lstrip('.')
try:
__import__(path)
module = sys.modules[path]
except Exception, msg:
retVal = False
dataToStdout("\r")
errMsg = "smoke test failed at importing module '%s' (%s):\n%s" % (path, os.path.join(root, ifile), msg)
logger.error(errMsg)
else:
# Run doc tests
# Reference: http://docs.python.org/library/doctest.html
(failure_count, test_count) = doctest.testmod(module)
if failure_count > 0:
retVal = False
count += 1
status = '%d/%d (%d%%) ' % (count, length, round(100.0 * count / length))
dataToStdout("\r[%s] [INFO] complete: %s" % (time.strftime("%X"), status))
clearConsoleLine()
if retVal:
logger.info("smoke test final result: PASSED")
else:
logger.error("smoke test final result: FAILED")
return retVal
def adjustValueType(tagName, value):
for family in optDict.keys():
for name, type_ in optDict[family].items():
if type(type_) == tuple:
type_ = type_[0]
if tagName == name:
if type_ == "boolean":
value = (value == "True")
elif type_ == "integer":
value = int(value)
elif type_ == "float":
value = float(value)
break
return value
def liveTest():
"""
Runs the test of a program against the live testing environment
"""
global failedItem
global failedParseOn
global failedTraceBack
retVal = True
count = 0
global_ = {}
vars_ = {}
livetests = readXmlFile(paths.LIVE_TESTS_XML)
length = len(livetests.getElementsByTagName("case"))
element = livetests.getElementsByTagName("global")
if element:
for item in element:
for child in item.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.hasAttribute("value"):
global_[child.tagName] = adjustValueType(child.tagName, child.getAttribute("value"))
element = livetests.getElementsByTagName("vars")
if element:
for item in element:
for child in item.childNodes:
if child.nodeType == child.ELEMENT_NODE and child.hasAttribute("value"):
var = child.getAttribute("value")
vars_[child.tagName] = randomStr(6) if var == "random" else var
for case in livetests.getElementsByTagName("case"):
parse_from_console_output = False
count += 1
name = None
parse = []
switches = dict(global_)
value = ""
vulnerable = True
result = None
if case.hasAttribute("name"):
name = case.getAttribute("name")
if conf.runCase and ((conf.runCase.isdigit() and conf.runCase != count) or not re.search(conf.runCase, name, re.DOTALL)):
continue
if case.getElementsByTagName("switches"):
for child in case.getElementsByTagName("switches")[0].childNodes:
if child.nodeType == child.ELEMENT_NODE and child.hasAttribute("value"):
value = replaceVars(child.getAttribute("value"), vars_)
switches[child.tagName] = adjustValueType(child.tagName, value)
if case.getElementsByTagName("parse"):
for item in case.getElementsByTagName("parse")[0].getElementsByTagName("item"):
if item.hasAttribute("value"):
value = replaceVars(item.getAttribute("value"), vars_)
if item.hasAttribute("console_output"):
parse_from_console_output = bool(item.getAttribute("console_output"))
parse.append((value, parse_from_console_output))
conf.verbose = global_.get("verbose", 1)
setVerbosity()
msg = "running live test case: %s (%d/%d)" % (name, count, length)
logger.info(msg)
initCase(switches, count)
test_case_fd = codecs.open(os.path.join(paths.SQLMAP_OUTPUT_PATH, "test_case"), "wb", UNICODE_ENCODING)
test_case_fd.write("%s\n" % name)
try:
result = runCase(parse)
except SqlmapNotVulnerableException:
vulnerable = False
finally:
conf.verbose = global_.get("verbose", 1)
setVerbosity()
if result is True:
logger.info("test passed")
cleanCase()
else:
errMsg = "test failed "
if failedItem:
errMsg += "at parsing item \"%s\" " % failedItem
errMsg += "- scan folder: %s " % paths.SQLMAP_OUTPUT_PATH
errMsg += "- traceback: %s" % bool(failedTraceBack)
if not vulnerable:
errMsg += " - SQL injection not detected"
logger.error(errMsg)
test_case_fd.write("%s\n" % errMsg)
if failedParseOn:
console_output_fd = codecs.open(os.path.join(paths.SQLMAP_OUTPUT_PATH, "console_output"), "wb", UNICODE_ENCODING)
console_output_fd.write(failedParseOn)
console_output_fd.close()
if failedTraceBack:
traceback_fd = codecs.open(os.path.join(paths.SQLMAP_OUTPUT_PATH, "traceback"), "wb", UNICODE_ENCODING)
traceback_fd.write(failedTraceBack)
traceback_fd.close()
beep()
if conf.stopFail is True:
return retVal
test_case_fd.close()
retVal &= bool(result)
dataToStdout("\n")
if retVal:
logger.info("live test final result: PASSED")
else:
logger.error("live test final result: FAILED")
return retVal
def initCase(switches, count):
global failedItem
global failedParseOn
global failedTraceBack
failedItem = None
failedParseOn = None
failedTraceBack = None
paths.SQLMAP_OUTPUT_PATH = tempfile.mkdtemp(prefix="sqlmaptest-%d-" % count)
paths.SQLMAP_DUMP_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "dump")
paths.SQLMAP_FILES_PATH = os.path.join(paths.SQLMAP_OUTPUT_PATH, "%s", "files")
logger.debug("using output directory '%s' for this test case" % paths.SQLMAP_OUTPUT_PATH)
LOGGER_HANDLER.stream = sys.stdout = tempfile.SpooledTemporaryFile(max_size=0, mode="w+b", prefix="sqlmapstdout-")
cmdLineOptions = cmdLineParser()
if switches:
for key, value in switches.items():
if key in cmdLineOptions.__dict__:
cmdLineOptions.__dict__[key] = value
initOptions(cmdLineOptions, True)
init()
def cleanCase():
shutil.rmtree(paths.SQLMAP_OUTPUT_PATH, True)
def runCase(parse):
global failedItem
global failedParseOn
global failedTraceBack
retVal = True
handled_exception = None
unhandled_exception = None
result = False
console = ""
try:
result = start()
except KeyboardInterrupt:
pass
except SqlmapBaseException, e:
handled_exception = e
except Exception, e:
unhandled_exception = e
finally:
sys.stdout.seek(0)
console = sys.stdout.read()
LOGGER_HANDLER.stream = sys.stdout = sys.__stdout__
if unhandled_exception:
failedTraceBack = "unhandled exception: %s" % str(traceback.format_exc())
retVal = None
elif handled_exception:
failedTraceBack = "handled exception: %s" % str(traceback.format_exc())
retVal = None
elif result is False: # this means no SQL injection has been detected - if None, ignore
retVal = False
console = getUnicode(console, system=True)
if parse and retVal:
with codecs.open(conf.dumper.getOutputFile(), "rb", UNICODE_ENCODING) as f:
content = f.read()
for item, parse_from_console_output in parse:
parse_on = console if parse_from_console_output else content
if item.startswith("r'") and item.endswith("'"):
if not re.search(item[2:-1], parse_on, re.DOTALL):
retVal = None
failedItem = item
break
elif item not in parse_on:
retVal = None
failedItem = item
break
if failedItem is not None:
failedParseOn = console
elif retVal is False:
failedParseOn = console
return retVal
def replaceVars(item, vars_):
retVal = item
if item and vars_:
for var in re.findall("\$\{([^}]+)\}", item):
if var in vars_:
retVal = retVal.replace("${%s}" % var, vars_[var])
return retVal
| {
"content_hash": "14b76fc9123db6ed709fb6d595b09081",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 129,
"avg_line_length": 32.171171171171174,
"alnum_prop": 0.5890973583496686,
"repo_name": "JeyZeta/Dangerous",
"id": "d09fd486c71fb1c29d25ff43efe0c9e014896226",
"size": "10736",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Dangerous/Golismero/tools/sqlmap/lib/core/testing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "13260"
},
{
"name": "C",
"bytes": "12851"
},
{
"name": "C++",
"bytes": "3174"
},
{
"name": "CSS",
"bytes": "267451"
},
{
"name": "HTML",
"bytes": "2686153"
},
{
"name": "JavaScript",
"bytes": "1356956"
},
{
"name": "Lua",
"bytes": "14436"
},
{
"name": "Makefile",
"bytes": "11190"
},
{
"name": "Objective-C",
"bytes": "998"
},
{
"name": "PHP",
"bytes": "619"
},
{
"name": "PLpgSQL",
"bytes": "536"
},
{
"name": "Perl",
"bytes": "263365"
},
{
"name": "Python",
"bytes": "16669102"
},
{
"name": "Roff",
"bytes": "9828"
},
{
"name": "Ruby",
"bytes": "503"
},
{
"name": "Shell",
"bytes": "6691"
}
],
"symlink_target": ""
} |
def normalize_doc(doc):
if isinstance(doc, str):
if doc == '':
return NIL
return doc
return doc.normalize()
class Doc:
"""The base class for all Docs, except for plain ``str`` s which
are unboxed.
A Doc is a tree structure that represents the set of all possible
layouts of the contents. The layout algorithm processes the tree,
narrowing down the set of layouts based on input parameters like
total and ribbon width to produce a stream of SDocs (simple Docs)
that represent a single layout.
"""
__slots__ = ()
def normalize(self):
return self
class Annotated(Doc):
__slots__ = ('doc', 'annotation')
def __init__(self, doc, annotation):
self.doc = doc
self.annotation = annotation
def __repr__(self):
return f'Annotated({repr(self.doc)})'
class Nil(Doc):
def __repr__(self):
return 'NIL'
NIL = Nil()
class Concat(Doc):
__slots__ = ('docs', )
def __init__(self, docs):
self.docs = list(docs)
def normalize(self):
normalized_docs = []
propagate_broken = False
for doc in self.docs:
doc = normalize_doc(doc)
if isinstance(doc, Concat):
normalized_docs.extend(doc.docs)
elif isinstance(doc, AlwaysBreak):
propagate_broken = True
normalized_docs.append(doc.doc)
elif doc is NIL:
continue
else:
normalized_docs.append(doc)
if not normalized_docs:
return NIL
if len(normalized_docs) == 1:
res = normalized_docs[0]
else:
res = Concat(normalized_docs)
if propagate_broken:
res = AlwaysBreak(res)
return res
def __repr__(self):
return f"Concat({', '.join(repr(doc) for doc in self.docs)})"
class Nest(Doc):
__slots__ = ('indent', 'doc')
def __init__(self, indent, doc):
assert isinstance(indent, int)
assert isinstance(doc, Doc)
self.indent = indent
self.doc = doc
def normalize(self):
inner_normalized = normalize_doc(self.doc)
if isinstance(inner_normalized, AlwaysBreak):
return AlwaysBreak(
Nest(self.indent, inner_normalized.doc)
)
return Nest(self.indent, normalize_doc(self.doc))
def __repr__(self):
return f'Nest({repr(self.indent)}, {repr(self.doc)})'
class FlatChoice(Doc):
__slots__ = ('when_broken', 'when_flat')
def __init__(self, when_broken, when_flat):
self.when_broken = when_broken
self.when_flat = when_flat
def normalize(self):
broken_normalized = normalize_doc(self.when_broken)
if isinstance(broken_normalized, AlwaysBreak):
return broken_normalized
flat_normalized = normalize_doc(self.when_flat)
if isinstance(flat_normalized, AlwaysBreak):
return broken_normalized
return FlatChoice(
broken_normalized,
flat_normalized
)
def __repr__(self):
return (
f'FlatChoice(when_broken={repr(self.when_broken)}, '
f'when_flat={repr(self.when_flat)})'
)
class Contextual(Doc):
__slots__ = ('fn', )
def __init__(self, fn):
self.fn = fn
def __repr__(self):
return f'Contextual({repr(self.fn)})'
class HardLine(Doc):
def __repr__(self):
return 'HardLine()'
HARDLINE = HardLine()
LINE = FlatChoice(HARDLINE, ' ')
SOFTLINE = FlatChoice(HARDLINE, NIL)
class Group(Doc):
__slots__ = ('doc', )
def __init__(self, doc):
assert isinstance(doc, Doc)
self.doc = doc
def normalize(self):
doc_normalized = normalize_doc(self.doc)
if isinstance(doc_normalized, AlwaysBreak):
# Group is the possibility of either flat
# or break; since we're always breaking,
# we don't need Group.
return doc_normalized
elif doc_normalized is NIL:
return NIL
return Group(doc_normalized)
def __repr__(self):
return f'Group({repr(self.doc)})'
class AlwaysBreak(Doc):
__slots__ = ('doc', )
def __init__(self, doc):
assert isinstance(doc, Doc)
self.doc = doc
def normalize(self):
doc_normalized = normalize_doc(self.doc)
if isinstance(doc_normalized, AlwaysBreak):
return doc_normalized
return AlwaysBreak(doc_normalized)
def __repr__(self):
return f'AlwaysBreak({repr(self.doc)})'
class Fill(Doc):
__slots__ = ('docs', )
def __init__(self, docs):
self.docs = list(docs)
def normalize(self):
normalized_docs = []
propagate_broken = False
for doc in self.docs:
if isinstance(doc, AlwaysBreak):
propagate_broken = True
doc = doc.doc
if doc is NIL:
continue
else:
normalized_docs.append(doc)
if normalized_docs:
res = Fill(normalized_docs)
if propagate_broken:
res = AlwaysBreak(res)
return res
return NIL
def __repr__(self):
return f"Fill([{', '.join(repr(doc) for doc in self.docs)}])"
| {
"content_hash": "71bc789093040cbf3e5984b9e387a80b",
"timestamp": "",
"source": "github",
"line_count": 218,
"max_line_length": 69,
"avg_line_length": 24.71559633027523,
"alnum_prop": 0.5567928730512249,
"repo_name": "tommikaikkonen/peprint",
"id": "d3deb88a4708bb22e1ebb102bcf98641b43fb391",
"size": "5388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "peprint/doc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2269"
},
{
"name": "Python",
"bytes": "95629"
}
],
"symlink_target": ""
} |
import json
import logging
import re
import time
from nose.plugins.skip import SkipTest
from nose.tools import assert_equal, assert_true
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from desktop.lib.i18n import smart_str
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import add_to_group, grant_access
from hadoop.pseudo_hdfs4 import is_live_cluster
from notebook.api import _save_notebook
from notebook.connectors.hiveserver2 import HS2Api
from notebook.models import make_notebook, Notebook
from beeswax.server import dbms
from beeswax.test_base import BeeswaxSampleProvider, get_query_server_config, is_hive_on_spark
LOG = logging.getLogger(__name__)
class TestHiveserver2Api(object):
def setUp(self):
self.client = make_logged_in_client(username="test", groupname="test", recreate=False, is_superuser=False)
self.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "notebook")
self.db = dbms.get(self.user, get_query_server_config())
self.api = HS2Api(self.user)
def test_prepare_hql_query(self):
statement = "SELECT myUpper(description) FROM sample_07 LIMIT 10"
snippet_json = """
{
"status": "running",
"database": "default",
"properties": {
"files": [],
"functions": [{
"class_name": "org.hue.udf.MyUpper",
"name": "myUpper"
}],
"settings": []
},
"result": {
"handle": {
"log_context": null,
"statements_count": 1,
"statement_id": 0,
"has_more_statements": false,
"secret": "UVZXF/qtTQumumz0Q8tNDQ==",
"has_result_set": true,
"operation_type": 0,
"modified_row_count": null,
"guid": "ZxOd4IjqTeK1PUTq+MdcDA=="
},
"type": "table",
"id": "ae81b805-dcf1-9692-0452-797681e997ed"
},
"statement": "%(statement)s",
"type": "hive",
"id": "9b50e364-f7b2-303d-e924-db8b0bd9866d"
}
""" % {'statement': statement}
session_json = """
{
"type": "hive",
"properties": [
{
"multiple": true,
"value": [
{
"path": "/user/test/myudfs.jar",
"type": "jar"
}
],
"nice_name": "Files",
"key": "files",
"help_text": "Add one or more files, jars, or archives to the list of resources.",
"type": "hdfs-files"
},
{
"multiple": true,
"value": [
{
"class_name": "org.hue.udf.MyUpper",
"name": "myUpper"
}
],
"nice_name": "Functions",
"key": "functions",
"help_text": "Add one or more registered UDFs (requires function name and fully-qualified class name).",
"type": "functions"
},
{
"multiple": true,
"value": [
{
"value": "spark",
"key": "hive.execution.engine"
}
],
"nice_name": "Settings",
"key": "settings",
"help_text": "Hive and Hadoop configuration properties.",
"type": "settings",
"options": [
"hive.map.aggr",
"hive.exec.compress.output",
"hive.exec.parallel",
"hive.execution.engine",
"mapreduce.job.queuename"
]
}
],
"id": 30
}
"""
snippet = json.loads(snippet_json)
session = json.loads(session_json)
hql_query = self.api._prepare_hql_query(snippet, statement, session)
assert_equal([{'key': 'hive.execution.engine', 'value': 'spark'}], hql_query.settings)
assert_equal([{'type': 'jar', 'path': '/user/test/myudfs.jar'}], hql_query.file_resources)
assert_equal([{'name': 'myUpper', 'class_name': 'org.hue.udf.MyUpper'}], hql_query.functions)
config_statements = ', '.join(hql_query.get_configuration_statements())
pattern = re.compile("ADD JAR hdfs://[A-Za-z0-9.:_-]+/user/test/myudfs.jar")
assert_true(pattern.search(config_statements), config_statements)
assert_true("CREATE TEMPORARY FUNCTION myUpper AS 'org.hue.udf.MyUpper'" in config_statements, config_statements)
def test_upgrade_properties(self):
properties = None
# Verify that upgrade will return defaults if current properties not formatted as settings
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
assert_equal(upgraded_props, self.api.get_properties(lang='hive'))
# Verify that upgrade will save old properties and new settings
properties = [
{
'key': 'hive.execution.engine',
'value': 'mr'
},
{
'key': 'hive.exec.compress.output',
'value': False
}
]
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
settings = next((prop for prop in upgraded_props if prop['key'] == 'settings'), None)
assert_equal(settings['value'], properties)
# Verify that already upgraded properties will be unchanged
properties = [
{
"multiple": True,
"value": [],
"nice_name": "Files",
"key": "files",
"help_text": "Add one or more files, jars, or archives to the list of resources.",
"type": "hdfs-files"
},
{
"multiple": True,
"value": [],
"nice_name": "Functions",
"key": "functions",
"help_text": "Add one or more registered UDFs (requires function name and fully-qualified class name).",
"type": "functions"
},
{
"multiple": True,
"value": [
{
"key": "hive.execution.engine",
"value": "spark"
}
],
"nice_name": "Settings",
"key": "settings",
"help_text": "Hive and Hadoop configuration properties.",
"type": "settings",
"options": [
"hive.map.aggr",
"hive.exec.compress.output",
"hive.exec.parallel",
"hive.execution.engine",
"mapreduce.job.queuename"
]
}
]
upgraded_props = self.api.upgrade_properties(lang='hive', properties=properties)
assert_equal(upgraded_props, properties)
def test_progress(self):
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = """INFO : Compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Semantic Analysis Completed
INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null)
INFO : Completed compiling command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb); Time taken: 0.116 seconds
INFO : Executing command(queryId=hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Query ID = hive_20160620133030_7e69739c-a00b-4170-8717-9eee331130eb
INFO : Total jobs = 2
INFO : Launching Job 1 out of 2
INFO : Starting task [Stage-1:MAPRED] in serial mode
INFO : Number of reduce tasks not specified. Estimated from input data size: 1
INFO : In order to change the average load for a reducer (in bytes):
INFO : set hive.exec.reducers.bytes.per.reducer=<number>
INFO : In order to limit the maximum number of reducers:
INFO : set hive.exec.reducers.max=<number>
INFO : In order to set a constant number of reducers:
INFO : set mapreduce.job.reduces=<number>
INFO : number of splits:1
INFO : Submitting tokens for job: job_1466104358744_0003
INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/
"""
assert_equal(self.api.progress(snippet, logs), 5)
logs += """INFO : Starting Job = job_1466104358744_0003, Tracking URL = http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466104358744_0003/
INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466104358744_0003
INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
INFO : 2016-06-20 13:30:34,494 Stage-1 map = 0%, reduce = 0%
INFO : 2016-06-20 13:30:47,081 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 3.13 sec
INFO : 2016-06-20 13:30:58,606 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 5.59 sec
INFO : MapReduce Total cumulative CPU time: 5 seconds 590 msec
INFO : Ended Job = job_1466104358744_0003
"""
assert_equal(self.api.progress(snippet, logs), 50)
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "impala",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = "Query 734a81444c85be66:d05f3bb1a6c2d0a5: 0% Complete (1 out of 4693)"
assert_equal(self.api.progress(snippet, logs), 0)
logs += """Query 734a81444c85be66:d05f3bb1a6c2d0a5: 20% Complete (4 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 30% Complete (7 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 40% Complete (7 out of 4693)
Query 734a81444c85be66:d05f3bb1a6c2d0a5: 50% Complete (234 out of 4693)
"""
assert_equal(self.api.progress(snippet, logs), 50)
def test_get_jobs(self):
notebook = json.loads("""
{
"uuid": "f5d6394d-364f-56e8-6dd3-b1c5a4738c52",
"id": 1234,
"sessions": [{"type": "hive", "properties": [], "id": "1234"}],
"type": "query-hive",
"name": "Test Hiveserver2 Editor",
"isSaved": false,
"parentUuid": null
}
""")
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 2,
"has_more_statements": true
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': "SELECT * FROM sample_07;"}
)
logs = """INFO : Compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Semantic Analysis Completed
INFO : Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:app, type:string, comment:null), FieldSchema(name:avg_bytes, type:double, comment:null)], properties:null)
INFO : Completed compiling command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13); Time taken: 0.073 seconds
INFO : Executing command(queryId=hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13): SELECT app,
AVG(bytes) AS avg_bytes
FROM web_logs
GROUP BY app
HAVING app IS NOT NULL
ORDER BY avg_bytes DESC
INFO : Query ID = hive_20160624155555_c81f8b95-af22-45fd-8e2c-fb012f530f13
INFO : Total jobs = 2
INFO : Launching Job 1 out of 2
INFO : Starting task [Stage-1:MAPRED] in serial mode
INFO : Number of reduce tasks not specified. Estimated from input data size: 1
INFO : In order to change the average load for a reducer (in bytes):
INFO : set hive.exec.reducers.bytes.per.reducer=<number>
INFO : In order to limit the maximum number of reducers:
INFO : set hive.exec.reducers.max=<number>
INFO : In order to set a constant number of reducers:
INFO : set mapreduce.job.reduces=<number>
INFO : number of splits:1
INFO : Submitting tokens for job: job_1466630204796_0059
INFO : The url to track the job: http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
INFO : Starting Job = job_1466630204796_0059, Tracking URL = http://jennykim-1.vpc.cloudera.com:8088/proxy/application_1466630204796_0059/
INFO : Kill Command = /usr/lib/hadoop/bin/hadoop job -kill job_1466630204796_0059
"""
jobs = self.api.get_jobs(notebook, snippet, logs)
assert_true(isinstance(jobs, list))
assert_true(len(jobs), 1)
assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
assert_equal(jobs[0]['started'], True)
assert_equal(jobs[0]['finished'], False)
assert_true('url' in jobs[0])
logs += """INFO : Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
INFO : 2016-06-24 15:55:51,125 Stage-1 map = 0%, reduce = 0%
INFO : 2016-06-24 15:56:00,410 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.12 sec
INFO : 2016-06-24 15:56:09,709 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 4.04 sec
INFO : MapReduce Total cumulative CPU time: 4 seconds 40 msec
INFO : Ended Job = job_1466630204796_0059
INFO : Launching Job 2 out of 2
"""
jobs = self.api.get_jobs(notebook, snippet, logs)
assert_true(len(jobs), 1)
assert_equal(jobs[0]['name'], 'job_1466630204796_0059')
assert_equal(jobs[0]['started'], True)
assert_equal(jobs[0]['finished'], True)
def test_get_current_statement(self):
snippet = json.loads("""
{
"status": "running",
"database": "default",
"id": "d70d31ee-a62a-4854-b2b1-b852f6a390f5",
"result": {
"type": "table",
"handle": {
"statement_id": 0,
"statements_count": 1,
"has_more_statements": false
},
"id": "ca11fcb1-11a5-f534-8200-050c8e1e57e3"
},
"statement": "%(statement)s",
"type": "hive",
"properties": {
"files": [],
"functions": [],
"settings": []
}
}
""" % {'statement': u"SELECT 'Привет', '你好';"}
)
statement = self.api._get_current_statement(MockDb(), snippet)
assert_equal('086ecec9a8b89b1b47cce358bdbb343be23b1f8b54ca76bc81927e27', statement['previous_statement_hash'])
def MockDb():
def close_operation(handle): pass
class TestHiveserver2ApiWithHadoop(BeeswaxSampleProvider):
@classmethod
def setup_class(cls):
super(TestHiveserver2ApiWithHadoop, cls).setup_class(load_data=False)
def setUp(self):
self.client.post('/beeswax/install_examples')
self.user = User.objects.get(username='test')
add_to_group('test')
grant_access("test", "test", "notebook")
grant_access("test", "test", "impala")
self.db = dbms.get(self.user, get_query_server_config())
self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/test')
self.api = HS2Api(self.user)
self.statement = 'SELECT description, salary FROM sample_07 WHERE (sample_07.salary > 100000) ORDER BY salary DESC LIMIT 1000'
def create_query_document(self, owner, query_type='hive', database='default',
name='Test Query', description='Test Query', statement='',
files=None, functions=None, settings=None):
"""
Creates and returns a query Document2 object
:param owner: owner of doc
:param query_type: hive, impala or spark
:param database: database name
:param name: name of document
:param description: description of document
:param statement: SQL statement (can be multi-query statement)
:param files: list of dicts representing files
:param functions: list of dicts representing functions
:param settings: list of dicts representing settings
:return: Document2 object representing query
"""
if query_type not in ('hive', 'impala', 'spark'):
raise ValueError("Invalid query_type: %s" % query_type)
notebook = make_notebook(name=name, description=description, editor_type=query_type, statement=statement,
status='ready', database=database, files=files, functions=functions, settings=settings)
notebook_doc, save_as = _save_notebook(notebook.get_data(), owner)
return notebook_doc
def get_snippet(self, notebook, snippet_idx=0):
data = notebook.get_data()
snippet = data['snippets'][snippet_idx]
if 'result' not in snippet:
snippet['result'] = {}
if 'handle' not in snippet['result']:
snippet['result']['handle'] = {}
return snippet
def execute_and_wait(self, query_doc, snippet_idx=0, timeout=30.0, wait=1.0):
notebook = Notebook(document=query_doc)
snippet = self.get_snippet(notebook, snippet_idx=snippet_idx)
curr = time.time()
end = curr + timeout
status = 'ready'
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
notebook = Notebook(document=query_doc)
snippet = self.get_snippet(notebook, snippet_idx=snippet_idx)
data = json.loads(response.content)
snippet['result']['handle'] = data['handle']
while status != 'available' and curr <= end:
response = self.client.post(reverse('notebook:check_status'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
status = data['query_status']['status']
snippet['status'] = status
time.sleep(wait)
curr = time.time()
if status != 'available':
raise Exception('Query failed to complete or return results.')
return snippet
def test_query_with_unicode(self):
statement = "SELECT * FROM sample_07 WHERE code='한';"
doc = self.create_query_document(owner=self.user, statement=statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
snippet['result']['handle'] = data['handle']
response = self.client.post(reverse('notebook:get_logs'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true("SELECT * FROM sample_07 WHERE code='한'" in smart_str(data['logs']))
def test_get_current_statement(self):
multi_statement = "SELECT description, salary FROM sample_07 LIMIT 20;\r\nSELECT AVG(salary) FROM sample_07;"
doc = self.create_query_document(owner=self.user, statement=multi_statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(0, data['handle']['statement_id'], data)
assert_equal(2, data['handle']['statements_count'], data)
assert_equal(True, data['handle']['has_more_statements'], data)
assert_equal({'row': 0, 'column': 0}, data['handle']['start'], data)
assert_equal({'row': 0, 'column': 51}, data['handle']['end'], data)
snippet['result']['handle'] = data['handle']
response = self.client.post(reverse('notebook:execute'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_equal(1, data['handle']['statement_id'], data)
assert_equal(2, data['handle']['statements_count'], data)
assert_equal(False, data['handle']['has_more_statements'], data)
assert_equal({'row': 1, 'column': 0}, data['handle']['start'], data)
assert_equal({'row': 1, 'column': 33}, data['handle']['end'], data)
def test_explain(self):
# Hive 2 with Tez set hive.explain.user to true by default, but this test is expecting output when this setting
# is set to false.
doc = self.create_query_document(owner=self.user, statement=self.statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
snippet['properties']['settings'].append({"key": "hive.explain.user", "value": "false"})
response = self.client.post(reverse('notebook:explain'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('STAGE DEPENDENCIES' in data['explanation'], data)
assert_equal(self.statement, data['statement'], data)
def test_download(self):
statement = "SELECT 'hello world';"
doc = self.create_query_document(owner=self.user, statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:download'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'format': 'csv'})
assert_equal(200, response.status_code)
assert_equal(('Content-Disposition', 'attachment; filename=Test Query.csv'), response._headers['content-disposition'])
def test_get_sample(self):
doc = self.create_query_document(owner=self.user, statement=self.statement)
notebook = Notebook(document=doc)
snippet = self.get_snippet(notebook, snippet_idx=0)
response = self.client.post(reverse('notebook:api_sample_data',
kwargs={'database': 'default', 'table': 'sample_07'}),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('headers' in data)
assert_true('rows' in data)
assert_true(len(data['rows']) > 0)
response = self.client.post(reverse('notebook:api_sample_data_column',
kwargs={'database': 'default', 'table': 'sample_07', 'column': 'code'}),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('headers' in data)
assert_equal(['code'], data['headers'])
assert_true('rows' in data)
assert_true(len(data['rows']) > 0)
def test_fetch_result_size_mr(self):
if not is_live_cluster(): # Mini-cluster does not have JHS
raise SkipTest
# Assert that a query with no job will return no rows or size
statement = "SELECT 'hello world';"
settings = [
{
'key': 'hive.execution.engine',
'value': 'mr'
}
]
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(None, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that a query with map & reduce task returns rows
statement = "SELECT DISTINCT code FROM sample_07;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(823, data['result']['rows'])
assert_true(data['result']['size'] > 0, data['result'])
# Assert that a query with multiple jobs returns rows
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(23, data['result']['rows'])
assert_true(data['result']['size'] > 0, data['result'])
def test_fetch_result_size_spark(self):
if not is_live_cluster() or not is_hive_on_spark():
raise SkipTest
# TODO: Add session cleanup here so we don't have orphan spark sessions
# Assert that a query with no job will return no rows or size
statement = "SELECT 'hello world';"
settings = [
{
'key': 'hive.execution.engine',
'value': 'spark'
}
]
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(None, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that a query that runs a job will return rows and size
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, statement=statement, settings=settings)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(23, data['result']['rows'])
assert_true(data['result']['size'] > 0)
def test_fetch_result_size_impala(self):
if not is_live_cluster():
raise SkipTest
# Create session so that session object is saved to DB for server URL lookup
session = self.api.create_session(lang='impala')
try:
# Assert that a query that runs a job will return rows
statement = "SELECT app, COUNT(1) AS count FROM web_logs GROUP BY app ORDER BY count DESC;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=2.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_true('size' in data['result'])
assert_equal(23, data['result']['rows'])
assert_equal(None, data['result']['size'])
# Assert that selecting all from partitioned table works
statement = "SELECT * FROM web_logs;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=5.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(1000, data['result']['rows'])
finally:
self.api.close_session(session)
def test_fetch_result_abbreviated(self):
if not is_live_cluster():
raise SkipTest
# Create session so that session object is saved to DB for server URL lookup
session = self.api.create_session(lang='impala')
try:
# Assert that abbreviated rows returned (e.g. - 1.00K) still returns actual rows
statement = "SELECT * FROM web_logs;"
doc = self.create_query_document(owner=self.user, query_type='impala', statement=statement)
notebook = Notebook(document=doc)
snippet = self.execute_and_wait(doc, snippet_idx=0, timeout=60.0, wait=5.0)
self.client.post(reverse('notebook:fetch_result_data'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet), 'rows': 100, 'startOver': 'false'})
response = self.client.post(reverse('notebook:fetch_result_size'),
{'notebook': notebook.get_json(), 'snippet': json.dumps(snippet)})
data = json.loads(response.content)
assert_equal(0, data['status'], data)
assert_true('result' in data)
assert_true('rows' in data['result'])
assert_equal(1000, data['result']['rows'])
finally:
self.api.close_session(session)
| {
"content_hash": "6e302ad809b1129d57908495aae870ae",
"timestamp": "",
"source": "github",
"line_count": 840,
"max_line_length": 187,
"avg_line_length": 40.43571428571428,
"alnum_prop": 0.5809927574633457,
"repo_name": "todaychi/hue",
"id": "6761b5d1e6c5a768930f13366fc65ea02d465641",
"size": "34796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/libs/notebook/src/notebook/connectors/tests/tests_hiveserver2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2717013"
},
{
"name": "C++",
"bytes": "199945"
},
{
"name": "CSS",
"bytes": "691188"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "23983570"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "5432201"
},
{
"name": "Lex",
"bytes": "39802"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "146585"
},
{
"name": "Mako",
"bytes": "3525679"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "31565"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "45877726"
},
{
"name": "Roff",
"bytes": "16669"
},
{
"name": "Shell",
"bytes": "46975"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "521413"
},
{
"name": "Yacc",
"bytes": "353353"
}
],
"symlink_target": ""
} |
"""RNN helpers for TensorFlow models.
@@bidirectional_dynamic_rnn
@@dynamic_rnn
@@raw_rnn
@@static_rnn
@@static_state_saving_rnn
@@static_bidirectional_rnn
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.util import nest
# pylint: disable=protected-access
_concat = rnn_cell_impl._concat
_like_rnncell = rnn_cell_impl._like_rnncell
# pylint: enable=protected-access
def _transpose_batch_time(x):
"""Transpose the batch and time dimensions of a Tensor.
Retains as much of the static shape information as possible.
Args:
x: A tensor of rank 2 or higher.
Returns:
x transposed along the first two dimensions.
Raises:
ValueError: if `x` is rank 1 or lower.
"""
x_static_shape = x.get_shape()
if x_static_shape.ndims is not None and x_static_shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2, but saw shape: %s" %
(x, x_static_shape))
x_rank = array_ops.rank(x)
x_t = array_ops.transpose(
x, array_ops.concat(
([1, 0], math_ops.range(2, x_rank)), axis=0))
x_t.set_shape(
tensor_shape.TensorShape([
x_static_shape[1].value, x_static_shape[0].value
]).concatenate(x_static_shape[2:]))
return x_t
def _best_effort_input_batch_size(flat_input):
"""Get static input batch size if available, with fallback to the dynamic one.
Args:
flat_input: An iterable of time major input Tensors of shape [max_time,
batch_size, ...]. All inputs should have compatible batch sizes.
Returns:
The batch size in Python integer if available, or a scalar Tensor otherwise.
Raises:
ValueError: if there is any input with an invalid shape.
"""
for input_ in flat_input:
shape = input_.shape
if shape.ndims is None:
continue
if shape.ndims < 2:
raise ValueError(
"Expected input tensor %s to have rank at least 2" % input_)
batch_size = shape[1].value
if batch_size is not None:
return batch_size
# Fallback to the dynamic batch size of the first input.
return array_ops.shape(flat_input[0])[1]
def _infer_state_dtype(explicit_dtype, state):
"""Infer the dtype of an RNN state.
Args:
explicit_dtype: explicitly declared dtype or None.
state: RNN's hidden state. Must be a Tensor or a nested iterable containing
Tensors.
Returns:
dtype: inferred dtype of hidden state.
Raises:
ValueError: if `state` has heterogeneous dtypes or is empty.
"""
if explicit_dtype is not None:
return explicit_dtype
elif nest.is_sequence(state):
inferred_dtypes = [element.dtype for element in nest.flatten(state)]
if not inferred_dtypes:
raise ValueError("Unable to infer dtype from empty state.")
all_same = all([x == inferred_dtypes[0] for x in inferred_dtypes])
if not all_same:
raise ValueError(
"State has tensors of different inferred_dtypes. Unable to infer a "
"single representative dtype.")
return inferred_dtypes[0]
else:
return state.dtype
# pylint: disable=unused-argument
def _rnn_step(
time, sequence_length, min_sequence_length, max_sequence_length,
zero_output, state, call_cell, state_size, skip_conditionals=False):
"""Calculate one step of a dynamic RNN minibatch.
Returns an (output, state) pair conditioned on the sequence_lengths.
When skip_conditionals=False, the pseudocode is something like:
if t >= max_sequence_length:
return (zero_output, state)
if t < min_sequence_length:
return call_cell()
# Selectively output zeros or output, old state or new state depending
# on if we've finished calculating each row.
new_output, new_state = call_cell()
final_output = np.vstack([
zero_output if time >= sequence_lengths[r] else new_output_r
for r, new_output_r in enumerate(new_output)
])
final_state = np.vstack([
state[r] if time >= sequence_lengths[r] else new_state_r
for r, new_state_r in enumerate(new_state)
])
return (final_output, final_state)
Args:
time: Python int, the current time step
sequence_length: int32 `Tensor` vector of size [batch_size]
min_sequence_length: int32 `Tensor` scalar, min of sequence_length
max_sequence_length: int32 `Tensor` scalar, max of sequence_length
zero_output: `Tensor` vector of shape [output_size]
state: Either a single `Tensor` matrix of shape `[batch_size, state_size]`,
or a list/tuple of such tensors.
call_cell: lambda returning tuple of (new_output, new_state) where
new_output is a `Tensor` matrix of shape `[batch_size, output_size]`.
new_state is a `Tensor` matrix of shape `[batch_size, state_size]`.
state_size: The `cell.state_size` associated with the state.
skip_conditionals: Python bool, whether to skip using the conditional
calculations. This is useful for `dynamic_rnn`, where the input tensor
matches `max_sequence_length`, and using conditionals just slows
everything down.
Returns:
A tuple of (`final_output`, `final_state`) as given by the pseudocode above:
final_output is a `Tensor` matrix of shape [batch_size, output_size]
final_state is either a single `Tensor` matrix, or a tuple of such
matrices (matching length and shapes of input `state`).
Raises:
ValueError: If the cell returns a state tuple whose length does not match
that returned by `state_size`.
"""
# Convert state to a list for ease of use
flat_state = nest.flatten(state)
flat_zero_output = nest.flatten(zero_output)
def _copy_one_through(output, new_output):
# If the state contains a scalar value we simply pass it through.
if output.shape.ndims == 0:
return new_output
copy_cond = (time >= sequence_length)
with ops.colocate_with(new_output):
return array_ops.where(copy_cond, output, new_output)
def _copy_some_through(flat_new_output, flat_new_state):
# Use broadcasting select to determine which values should get
# the previous state & zero output, and which values should get
# a calculated state & output.
flat_new_output = [
_copy_one_through(zero_output, new_output)
for zero_output, new_output in zip(flat_zero_output, flat_new_output)]
flat_new_state = [
_copy_one_through(state, new_state)
for state, new_state in zip(flat_state, flat_new_state)]
return flat_new_output + flat_new_state
def _maybe_copy_some_through():
"""Run RNN step. Pass through either no or some past state."""
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
flat_new_state = nest.flatten(new_state)
flat_new_output = nest.flatten(new_output)
return control_flow_ops.cond(
# if t < min_seq_len: calculate and return everything
time < min_sequence_length, lambda: flat_new_output + flat_new_state,
# else copy some of it through
lambda: _copy_some_through(flat_new_output, flat_new_state))
# TODO(ebrevdo): skipping these conditionals may cause a slowdown,
# but benefits from removing cond() and its gradient. We should
# profile with and without this switch here.
if skip_conditionals:
# Instead of using conditionals, perform the selective copy at all time
# steps. This is faster when max_seq_len is equal to the number of unrolls
# (which is typical for dynamic_rnn).
new_output, new_state = call_cell()
nest.assert_same_structure(state, new_state)
new_state = nest.flatten(new_state)
new_output = nest.flatten(new_output)
final_output_and_state = _copy_some_through(new_output, new_state)
else:
empty_update = lambda: flat_zero_output + flat_state
final_output_and_state = control_flow_ops.cond(
# if t >= max_seq_len: copy all state through, output zeros
time >= max_sequence_length, empty_update,
# otherwise calculation is required: copy some or all of it through
_maybe_copy_some_through)
if len(final_output_and_state) != len(flat_zero_output) + len(flat_state):
raise ValueError("Internal error: state and output were not concatenated "
"correctly.")
final_output = final_output_and_state[:len(flat_zero_output)]
final_state = final_output_and_state[len(flat_zero_output):]
for output, flat_output in zip(final_output, flat_zero_output):
output.set_shape(flat_output.get_shape())
for substate, flat_substate in zip(final_state, flat_state):
substate.set_shape(flat_substate.get_shape())
final_output = nest.pack_sequence_as(
structure=zero_output, flat_sequence=final_output)
final_state = nest.pack_sequence_as(
structure=state, flat_sequence=final_state)
return final_output, final_state
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, n_features)
or nested tuples of tensors.
lengths: A `Tensor` of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply reverses
the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
flat_input_seq = tuple(nest.flatten(input_) for input_ in input_seq)
flat_results = [[] for _ in range(len(input_seq))]
for sequence in zip(*flat_input_seq):
input_shape = tensor_shape.unknown_shape(
ndims=sequence[0].get_shape().ndims)
for input_ in sequence:
input_shape.merge_with(input_.get_shape())
input_.set_shape(input_shape)
# Join into (time, batch_size, depth)
s_joined = array_ops.stack(sequence)
# Reverse along dimension 0
s_reversed = array_ops.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops.unstack(s_reversed)
for r, flat_result in zip(result, flat_results):
r.set_shape(input_shape)
flat_result.append(r)
results = [nest.pack_sequence_as(structure=input_, flat_sequence=flat_result)
for input_, flat_result in zip(input_seq, flat_results)]
return results
def bidirectional_dynamic_rnn(cell_fw, cell_bw, inputs, sequence_length=None,
initial_state_fw=None, initial_state_bw=None,
dtype=None, parallel_iterations=None,
swap_memory=False, time_major=False, scope=None):
"""Creates a dynamic version of bidirectional recurrent neural network.
Takes input and builds independent forward and backward RNNs. The input_size
of forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: The RNN inputs.
If time_major == False (default), this must be a tensor of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such elements.
If time_major == True, this must be a tensor of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such elements.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences in the batch.
If not provided, all batch entries are assumed to be full sequences; and
time reversal is applied from time `0` to `max_time` for each sequence.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial states and expected output.
Required if initial_states are not provided or RNN states have a
heterogeneous dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_states) where:
outputs: A tuple (output_fw, output_bw) containing the forward and
the backward rnn output `Tensor`.
If time_major == False (default),
output_fw will be a `Tensor` shaped:
`[batch_size, max_time, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[batch_size, max_time, cell_bw.output_size]`.
If time_major == True,
output_fw will be a `Tensor` shaped:
`[max_time, batch_size, cell_fw.output_size]`
and output_bw will be a `Tensor` shaped:
`[max_time, batch_size, cell_bw.output_size]`.
It returns a tuple instead of a single concatenated `Tensor`, unlike
in the `bidirectional_rnn`. If the concatenated one is preferred,
the forward and backward outputs can be concatenated as
`tf.concat(outputs, 2)`.
output_states: A tuple (output_state_fw, output_state_bw) containing
the forward and the backward final states of bidirectional rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
"""
if not _like_rnncell(cell_fw):
raise TypeError("cell_fw must be an instance of RNNCell")
if not _like_rnncell(cell_bw):
raise TypeError("cell_bw must be an instance of RNNCell")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = dynamic_rnn(
cell=cell_fw, inputs=inputs, sequence_length=sequence_length,
initial_state=initial_state_fw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=fw_scope)
# Backward direction
if not time_major:
time_dim = 1
batch_dim = 0
else:
time_dim = 0
batch_dim = 1
def _reverse(input_, seq_lengths, seq_dim, batch_dim):
if seq_lengths is not None:
return array_ops.reverse_sequence(
input=input_, seq_lengths=seq_lengths,
seq_dim=seq_dim, batch_dim=batch_dim)
else:
return array_ops.reverse(input_, axis=[seq_dim])
with vs.variable_scope("bw") as bw_scope:
inputs_reverse = _reverse(
inputs, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
tmp, output_state_bw = dynamic_rnn(
cell=cell_bw, inputs=inputs_reverse, sequence_length=sequence_length,
initial_state=initial_state_bw, dtype=dtype,
parallel_iterations=parallel_iterations, swap_memory=swap_memory,
time_major=time_major, scope=bw_scope)
output_bw = _reverse(
tmp, seq_lengths=sequence_length,
seq_dim=time_dim, batch_dim=batch_dim)
outputs = (output_fw, output_bw)
output_states = (output_state_fw, output_state_bw)
return (outputs, output_states)
def dynamic_rnn(cell, inputs, sequence_length=None, initial_state=None,
dtype=None, parallel_iterations=None, swap_memory=False,
time_major=False, scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
Performs fully dynamic unrolling of `inputs`.
Example:
```python
# create a BasicRNNCell
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(hidden_size)
# 'outputs' is a tensor of shape [batch_size, max_time, cell_state_size]
# defining initial state
initial_state = rnn_cell.zero_state(batch_size, dtype=tf.float32)
# 'state' is a tensor of shape [batch_size, cell_state_size]
outputs, state = tf.nn.dynamic_rnn(rnn_cell, input_data,
initial_state=initial_state,
dtype=tf.float32)
```
```python
# create 2 LSTMCells
rnn_layers = [tf.nn.rnn_cell.LSTMCell(size) for size in [128, 256]]
# create a RNN cell composed sequentially of a number of RNNCells
multi_rnn_cell = tf.nn.rnn_cell.MultiRNNCell(rnn_layers)
# 'outputs' is a tensor of shape [batch_size, max_time, 256]
# 'state' is a N-tuple where N is the number of LSTMCells containing a
# tf.contrib.rnn.LSTMStateTuple for each cell
outputs, state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,
inputs=data,
dtype=tf.float32)
```
Args:
cell: An instance of RNNCell.
inputs: The RNN inputs.
If `time_major == False` (default), this must be a `Tensor` of shape:
`[batch_size, max_time, ...]`, or a nested tuple of such
elements.
If `time_major == True`, this must be a `Tensor` of shape:
`[max_time, batch_size, ...]`, or a nested tuple of such
elements.
This may also be a (possibly nested) tuple of Tensors satisfying
this property. The first two dimensions must match across all the inputs,
but otherwise the ranks and other shape components may differ.
In this case, input to `cell` at each time-step will replicate the
structure of these tuples, except for the time dimension (from which the
time is taken).
The input to `cell` at each time step will be a `Tensor` or (possibly
nested) tuple of Tensors each with dimensions `[batch_size, ...]`.
sequence_length: (optional) An int32/int64 vector sized `[batch_size]`.
Used to copy-through state and zero-out outputs when past a batch
element's sequence length. So it's more for correctness than performance.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
time_major: The shape format of the `inputs` and `outputs` Tensors.
If true, these `Tensors` must be shaped `[max_time, batch_size, depth]`.
If false, these `Tensors` must be shaped `[batch_size, max_time, depth]`.
Using `time_major = True` is a bit more efficient because it avoids
transposes at the beginning and end of the RNN calculation. However,
most TensorFlow data is batch-major, so by default this function
accepts input and emits output in batch-major form.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs: The RNN output `Tensor`.
If time_major == False (default), this will be a `Tensor` shaped:
`[batch_size, max_time, cell.output_size]`.
If time_major == True, this will be a `Tensor` shaped:
`[max_time, batch_size, cell.output_size]`.
Note, if `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `outputs` will be a tuple having the
same structure as `cell.output_size`, containing Tensors having shapes
corresponding to the shape data in `cell.output_size`.
state: The final state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes. If cells are `LSTMCells`
`state` will be a tuple containing a `LSTMStateTuple` for each cell.
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not _like_rnncell(cell):
raise TypeError("cell must be an instance of RNNCell")
# By default, time_major==False and inputs are batch-major: shaped
# [batch, time, depth]
# For internal calculations, we transpose to [time, batch, depth]
flat_input = nest.flatten(inputs)
if not time_major:
# (B,T,D) => (T,B,D)
flat_input = [ops.convert_to_tensor(input_) for input_ in flat_input]
flat_input = tuple(_transpose_batch_time(input_) for input_ in flat_input)
parallel_iterations = parallel_iterations or 32
if sequence_length is not None:
sequence_length = math_ops.to_int32(sequence_length)
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size, "
"but saw shape: %s" % sequence_length.get_shape())
sequence_length = array_ops.identity( # Just to find it in the graph.
sequence_length, name="sequence_length")
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if context.in_graph_mode():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
batch_size = _best_effort_input_batch_size(flat_input)
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If there is no initial_state, you must give a dtype.")
state = cell.zero_state(batch_size, dtype)
def _assert_has_shape(x, shape):
x_shape = array_ops.shape(x)
packed_shape = array_ops.stack(shape)
return control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(x_shape, packed_shape)),
["Expected shape for Tensor %s is " % x.name,
packed_shape, " but saw shape: ", x_shape])
if context.in_graph_mode() and sequence_length is not None:
# Perform some shape validation
with ops.control_dependencies(
[_assert_has_shape(sequence_length, [batch_size])]):
sequence_length = array_ops.identity(
sequence_length, name="CheckSeqLen")
inputs = nest.pack_sequence_as(structure=inputs, flat_sequence=flat_input)
(outputs, final_state) = _dynamic_rnn_loop(
cell,
inputs,
state,
parallel_iterations=parallel_iterations,
swap_memory=swap_memory,
sequence_length=sequence_length,
dtype=dtype)
# Outputs of _dynamic_rnn_loop are always shaped [time, batch, depth].
# If we are performing batch-major calculations, transpose output back
# to shape [batch, time, depth]
if not time_major:
# (T,B,D) => (B,T,D)
outputs = nest.map_structure(_transpose_batch_time, outputs)
return (outputs, final_state)
def _dynamic_rnn_loop(cell,
inputs,
initial_state,
parallel_iterations,
swap_memory,
sequence_length=None,
dtype=None):
"""Internal implementation of Dynamic RNN.
Args:
cell: An instance of RNNCell.
inputs: A `Tensor` of shape [time, batch_size, input_size], or a nested
tuple of such elements.
initial_state: A `Tensor` of shape `[batch_size, state_size]`, or if
`cell.state_size` is a tuple, then this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
parallel_iterations: Positive Python int.
swap_memory: A Python boolean
sequence_length: (optional) An `int32` `Tensor` of shape [batch_size].
dtype: (optional) Expected dtype of output. If not specified, inferred from
initial_state.
Returns:
Tuple `(final_outputs, final_state)`.
final_outputs:
A `Tensor` of shape `[time, batch_size, cell.output_size]`. If
`cell.output_size` is a (possibly nested) tuple of ints or `TensorShape`
objects, then this returns a (possibly nsted) tuple of Tensors matching
the corresponding shapes.
final_state:
A `Tensor`, or possibly nested tuple of Tensors, matching in length
and shapes to `initial_state`.
Raises:
ValueError: If the input depth cannot be inferred via shape inference
from the inputs.
"""
state = initial_state
assert isinstance(parallel_iterations, int), "parallel_iterations must be int"
state_size = cell.state_size
flat_input = nest.flatten(inputs)
flat_output_size = nest.flatten(cell.output_size)
# Construct an initial output
input_shape = array_ops.shape(flat_input[0])
time_steps = input_shape[0]
batch_size = _best_effort_input_batch_size(flat_input)
inputs_got_shape = tuple(input_.get_shape().with_rank_at_least(3)
for input_ in flat_input)
const_time_steps, const_batch_size = inputs_got_shape[0].as_list()[:2]
for shape in inputs_got_shape:
if not shape[2:].is_fully_defined():
raise ValueError(
"Input size (depth of inputs) must be accessible via shape inference,"
" but saw value None.")
got_time_steps = shape[0].value
got_batch_size = shape[1].value
if const_time_steps != got_time_steps:
raise ValueError(
"Time steps is not the same for all the elements in the input in a "
"batch.")
if const_batch_size != got_batch_size:
raise ValueError(
"Batch_size is not the same for all the elements in the input.")
# Prepare dynamic conditional copying of state & output
def _create_zero_arrays(size):
size = _concat(batch_size, size)
return array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
flat_zero_output = tuple(_create_zero_arrays(output)
for output in flat_output_size)
zero_output = nest.pack_sequence_as(structure=cell.output_size,
flat_sequence=flat_zero_output)
if sequence_length is not None:
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
time = array_ops.constant(0, dtype=dtypes.int32, name="time")
with ops.name_scope("dynamic_rnn") as scope:
base_name = scope
def _create_ta(name, dtype):
return tensor_array_ops.TensorArray(dtype=dtype,
size=time_steps,
tensor_array_name=base_name + name)
in_graph_mode = context.in_graph_mode()
if in_graph_mode:
output_ta = tuple(_create_ta("output_%d" % i,
_infer_state_dtype(dtype, state))
for i in range(len(flat_output_size)))
input_ta = tuple(_create_ta("input_%d" % i, flat_input[i].dtype)
for i in range(len(flat_input)))
input_ta = tuple(ta.unstack(input_)
for ta, input_ in zip(input_ta, flat_input))
else:
output_ta = tuple([0 for _ in range(time_steps.numpy())]
for i in range(len(flat_output_size)))
input_ta = flat_input
def _time_step(time, output_ta_t, state):
"""Take a time step of the dynamic RNN.
Args:
time: int32 scalar Tensor.
output_ta_t: List of `TensorArray`s that represent the output.
state: nested tuple of vector tensors that represent the state.
Returns:
The tuple (time + 1, output_ta_t with updated flow, new_state).
"""
if in_graph_mode:
input_t = tuple(ta.read(time) for ta in input_ta)
# Restore some shape information
for input_, shape in zip(input_t, inputs_got_shape):
input_.set_shape(shape[1:])
else:
input_t = tuple(ta[time.numpy()] for ta in input_ta)
input_t = nest.pack_sequence_as(structure=inputs, flat_sequence=input_t)
call_cell = lambda: cell(input_t, state)
if sequence_length is not None:
(output, new_state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=state_size,
skip_conditionals=True)
else:
(output, new_state) = call_cell()
# Pack state if using state tuples
output = nest.flatten(output)
if in_graph_mode:
output_ta_t = tuple(
ta.write(time, out) for ta, out in zip(output_ta_t, output))
else:
for ta, out in zip(output_ta_t, output):
ta[time.numpy()] = out
return (time + 1, output_ta_t, new_state)
_, output_final_ta, final_state = control_flow_ops.while_loop(
cond=lambda time, *_: time < time_steps,
body=_time_step,
loop_vars=(time, output_ta, state),
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
# Unpack final output if not using output tuples.
if in_graph_mode:
final_outputs = tuple(ta.stack() for ta in output_final_ta)
# Restore some shape information
for output, output_size in zip(final_outputs, flat_output_size):
shape = _concat(
[const_time_steps, const_batch_size], output_size, static=True)
output.set_shape(shape)
else:
final_outputs = output_final_ta
final_outputs = nest.pack_sequence_as(
structure=cell.output_size, flat_sequence=final_outputs)
if not in_graph_mode:
final_outputs = array_ops.stack(final_outputs, axis=0)
return (final_outputs, final_state)
def raw_rnn(cell, loop_fn,
parallel_iterations=None, swap_memory=False, scope=None):
"""Creates an `RNN` specified by RNNCell `cell` and loop function `loop_fn`.
**NOTE: This method is still in testing, and the API may change.**
This function is a more primitive version of `dynamic_rnn` that provides
more direct access to the inputs each iteration. It also provides more
control over when to start and finish reading the sequence, and
what to emit for the output.
For example, it can be used to implement the dynamic decoder of a seq2seq
model.
Instead of working with `Tensor` objects, most operations work with
`TensorArray` objects directly.
The operation of `raw_rnn`, in pseudo-code, is basically the following:
```python
time = tf.constant(0, dtype=tf.int32)
(finished, next_input, initial_state, _, loop_state) = loop_fn(
time=time, cell_output=None, cell_state=None, loop_state=None)
emit_ta = TensorArray(dynamic_size=True, dtype=initial_state.dtype)
state = initial_state
while not all(finished):
(output, cell_state) = cell(next_input, state)
(next_finished, next_input, next_state, emit, loop_state) = loop_fn(
time=time + 1, cell_output=output, cell_state=cell_state,
loop_state=loop_state)
# Emit zeros and copy forward state for minibatch entries that are finished.
state = tf.where(finished, state, next_state)
emit = tf.where(finished, tf.zeros_like(emit), emit)
emit_ta = emit_ta.write(time, emit)
# If any new minibatch entries are marked as finished, mark these.
finished = tf.logical_or(finished, next_finished)
time += 1
return (emit_ta, state, loop_state)
```
with the additional properties that output and state may be (possibly nested)
tuples, as determined by `cell.output_size` and `cell.state_size`, and
as a result the final `state` and `emit_ta` may themselves be tuples.
A simple implementation of `dynamic_rnn` via `raw_rnn` looks like this:
```python
inputs = tf.placeholder(shape=(max_time, batch_size, input_depth),
dtype=tf.float32)
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)
inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
inputs_ta = inputs_ta.unstack(inputs)
cell = tf.contrib.rnn.LSTMCell(num_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(batch_size, tf.float32)
else:
next_cell_state = cell_state
elements_finished = (time >= sequence_length)
finished = tf.reduce_all(elements_finished)
next_input = tf.cond(
finished,
lambda: tf.zeros([batch_size, input_depth], dtype=tf.float32),
lambda: inputs_ta.read(time))
next_loop_state = None
return (elements_finished, next_input, next_cell_state,
emit_output, next_loop_state)
outputs_ta, final_state, _ = raw_rnn(cell, loop_fn)
outputs = outputs_ta.stack()
```
Args:
cell: An instance of RNNCell.
loop_fn: A callable that takes inputs
`(time, cell_output, cell_state, loop_state)`
and returns the tuple
`(finished, next_input, next_cell_state, emit_output, next_loop_state)`.
Here `time` is an int32 scalar `Tensor`, `cell_output` is a
`Tensor` or (possibly nested) tuple of tensors as determined by
`cell.output_size`, and `cell_state` is a `Tensor`
or (possibly nested) tuple of tensors, as determined by the `loop_fn`
on its first call (and should match `cell.state_size`).
The outputs are: `finished`, a boolean `Tensor` of
shape `[batch_size]`, `next_input`: the next input to feed to `cell`,
`next_cell_state`: the next state to feed to `cell`,
and `emit_output`: the output to store for this iteration.
Note that `emit_output` should be a `Tensor` or (possibly nested)
tuple of tensors with shapes and structure matching `cell.output_size`
and `cell_output` above. The parameter `cell_state` and output
`next_cell_state` may be either a single or (possibly nested) tuple
of tensors. The parameter `loop_state` and
output `next_loop_state` may be either a single or (possibly nested) tuple
of `Tensor` and `TensorArray` objects. This last parameter
may be ignored by `loop_fn` and the return value may be `None`. If it
is not `None`, then the `loop_state` will be propagated through the RNN
loop, for use purely by `loop_fn` to keep track of its own state.
The `next_loop_state` parameter returned may be `None`.
The first call to `loop_fn` will be `time = 0`, `cell_output = None`,
`cell_state = None`, and `loop_state = None`. For this call:
The `next_cell_state` value should be the value with which to initialize
the cell's state. It may be a final state from a previous RNN or it
may be the output of `cell.zero_state()`. It should be a
(possibly nested) tuple structure of tensors.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a `TensorShape`, this must be a `Tensor` of
appropriate type and shape `[batch_size] + cell.state_size`.
If `cell.state_size` is a (possibly nested) tuple of ints or
`TensorShape`, this will be a tuple having the corresponding shapes.
The `emit_output` value may be either `None` or a (possibly nested)
tuple structure of tensors, e.g.,
`(tf.zeros(shape_0, dtype=dtype_0), tf.zeros(shape_1, dtype=dtype_1))`.
If this first `emit_output` return value is `None`,
then the `emit_ta` result of `raw_rnn` will have the same structure and
dtypes as `cell.output_size`. Otherwise `emit_ta` will have the same
structure, shapes (prepended with a `batch_size` dimension), and dtypes
as `emit_output`. The actual values returned for `emit_output` at this
initializing call are ignored. Note, this emit structure must be
consistent across all time steps.
parallel_iterations: (Default: 32). The number of iterations to run in
parallel. Those operations which do not have any temporal dependency
and can be run in parallel, will be. This parameter trades off
time for space. Values >> 1 use more memory but take less time,
while smaller values use less memory but computations take longer.
swap_memory: Transparently swap the tensors produced in forward inference
but needed for back prop from GPU to CPU. This allows training RNNs
which would typically not fit on a single GPU, with very minimal (or no)
performance penalty.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A tuple `(emit_ta, final_state, final_loop_state)` where:
`emit_ta`: The RNN output `TensorArray`.
If `loop_fn` returns a (possibly nested) set of Tensors for
`emit_output` during initialization, (inputs `time = 0`,
`cell_output = None`, and `loop_state = None`), then `emit_ta` will
have the same structure, dtypes, and shapes as `emit_output` instead.
If `loop_fn` returns `emit_output = None` during this call,
the structure of `cell.output_size` is used:
If `cell.output_size` is a (possibly nested) tuple of integers
or `TensorShape` objects, then `emit_ta` will be a tuple having the
same structure as `cell.output_size`, containing TensorArrays whose
elements' shapes correspond to the shape data in `cell.output_size`.
`final_state`: The final cell state. If `cell.state_size` is an int, this
will be shaped `[batch_size, cell.state_size]`. If it is a
`TensorShape`, this will be shaped `[batch_size] + cell.state_size`.
If it is a (possibly nested) tuple of ints or `TensorShape`, this will
be a tuple having the corresponding shapes.
`final_loop_state`: The final loop state as returned by `loop_fn`.
Raises:
TypeError: If `cell` is not an instance of RNNCell, or `loop_fn` is not
a `callable`.
"""
if not _like_rnncell(cell):
raise TypeError("cell must be an instance of RNNCell")
if not callable(loop_fn):
raise TypeError("loop_fn must be a callable")
parallel_iterations = parallel_iterations or 32
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if context.in_graph_mode():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
time = constant_op.constant(0, dtype=dtypes.int32)
(elements_finished, next_input, initial_state, emit_structure,
init_loop_state) = loop_fn(
time, None, None, None) # time, cell_output, cell_state, loop_state
flat_input = nest.flatten(next_input)
# Need a surrogate loop state for the while_loop if none is available.
loop_state = (init_loop_state if init_loop_state is not None
else constant_op.constant(0, dtype=dtypes.int32))
input_shape = [input_.get_shape() for input_ in flat_input]
static_batch_size = input_shape[0][0]
for input_shape_i in input_shape:
# Static verification that batch sizes all match
static_batch_size.merge_with(input_shape_i[0])
batch_size = static_batch_size.value
if batch_size is None:
batch_size = array_ops.shape(flat_input[0])[0]
nest.assert_same_structure(initial_state, cell.state_size)
state = initial_state
flat_state = nest.flatten(state)
flat_state = [ops.convert_to_tensor(s) for s in flat_state]
state = nest.pack_sequence_as(structure=state,
flat_sequence=flat_state)
if emit_structure is not None:
flat_emit_structure = nest.flatten(emit_structure)
flat_emit_size = [emit.shape if emit.shape.is_fully_defined() else
array_ops.shape(emit) for emit in flat_emit_structure]
flat_emit_dtypes = [emit.dtype for emit in flat_emit_structure]
else:
emit_structure = cell.output_size
flat_emit_size = nest.flatten(emit_structure)
flat_emit_dtypes = [flat_state[0].dtype] * len(flat_emit_size)
flat_emit_ta = [
tensor_array_ops.TensorArray(
dtype=dtype_i, dynamic_size=True, size=0, name="rnn_output_%d" % i)
for i, dtype_i in enumerate(flat_emit_dtypes)]
emit_ta = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_emit_ta)
flat_zero_emit = [
array_ops.zeros(_concat(batch_size, size_i), dtype_i)
for size_i, dtype_i in zip(flat_emit_size, flat_emit_dtypes)]
zero_emit = nest.pack_sequence_as(structure=emit_structure,
flat_sequence=flat_zero_emit)
def condition(unused_time, elements_finished, *_):
return math_ops.logical_not(math_ops.reduce_all(elements_finished))
def body(time, elements_finished, current_input,
emit_ta, state, loop_state):
"""Internal while loop body for raw_rnn.
Args:
time: time scalar.
elements_finished: batch-size vector.
current_input: possibly nested tuple of input tensors.
emit_ta: possibly nested tuple of output TensorArrays.
state: possibly nested tuple of state tensors.
loop_state: possibly nested tuple of loop state tensors.
Returns:
Tuple having the same size as Args but with updated values.
"""
(next_output, cell_state) = cell(current_input, state)
nest.assert_same_structure(state, cell_state)
nest.assert_same_structure(cell.output_size, next_output)
next_time = time + 1
(next_finished, next_input, next_state, emit_output,
next_loop_state) = loop_fn(
next_time, next_output, cell_state, loop_state)
nest.assert_same_structure(state, next_state)
nest.assert_same_structure(current_input, next_input)
nest.assert_same_structure(emit_ta, emit_output)
# If loop_fn returns None for next_loop_state, just reuse the
# previous one.
loop_state = loop_state if next_loop_state is None else next_loop_state
def _copy_some_through(current, candidate):
"""Copy some tensors through via array_ops.where."""
def copy_fn(cur_i, cand_i):
with ops.colocate_with(cand_i):
return array_ops.where(elements_finished, cur_i, cand_i)
return nest.map_structure(copy_fn, current, candidate)
emit_output = _copy_some_through(zero_emit, emit_output)
next_state = _copy_some_through(state, next_state)
emit_ta = nest.map_structure(
lambda ta, emit: ta.write(time, emit), emit_ta, emit_output)
elements_finished = math_ops.logical_or(elements_finished, next_finished)
return (next_time, elements_finished, next_input,
emit_ta, next_state, loop_state)
returned = control_flow_ops.while_loop(
condition, body, loop_vars=[
time, elements_finished, next_input,
emit_ta, state, loop_state],
parallel_iterations=parallel_iterations,
swap_memory=swap_memory)
(emit_ta, final_state, final_loop_state) = returned[-3:]
if init_loop_state is None:
final_loop_state = None
return (emit_ta, final_state, final_loop_state)
def static_rnn(cell,
inputs,
initial_state=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a recurrent neural network specified by RNNCell `cell`.
The simplest form of RNN network generated is:
```python
state = cell.zero_state(...)
outputs = []
for input_ in inputs:
output, state = cell(input_, state)
outputs.append(output)
return (outputs, state)
```
However, a few other options are available:
An initial state can be provided.
If the sequence_length vector is provided, dynamic calculation is performed.
This method of calculation does not compute the RNN steps past the maximum
sequence length of the minibatch (thus saving computational time),
and properly propagates the state at an example's sequence length
to the final state output.
The dynamic calculation performed is, at time `t` for batch row `b`,
```python
(output, state)(b, t) =
(t >= sequence_length(b))
? (zeros(cell.output_size), states(b, sequence_length(b) - 1))
: cell(input(b, t), state(b, t - 1))
```
Args:
cell: An instance of RNNCell.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`, or a nested tuple of such elements.
initial_state: (optional) An initial state for the RNN.
If `cell.state_size` is an integer, this must be
a `Tensor` of appropriate type and shape `[batch_size, cell.state_size]`.
If `cell.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell.state_size`.
dtype: (optional) The data type for the initial state and expected output.
Required if initial_state is not provided or RNN state has a heterogeneous
dtype.
sequence_length: Specifies the length of each sequence in inputs.
An int32 or int64 vector (tensor) size `[batch_size]`, values in `[0, T)`.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
- outputs is a length T list of outputs (one for each input), or a nested
tuple of such elements.
- state is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the input depth
(column size) cannot be inferred from inputs via shape inference.
"""
if not _like_rnncell(cell):
raise TypeError("cell must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
outputs = []
# Create a new scope in which the caching device is either
# determined by the parent scope, or is set to place the cached
# Variable using the same placement as for the rest of the RNN.
with vs.variable_scope(scope or "rnn") as varscope:
if context.in_graph_mode():
if varscope.caching_device is None:
varscope.set_caching_device(lambda op: op.device)
# Obtain the first sequence of the input
first_input = inputs
while nest.is_sequence(first_input):
first_input = first_input[0]
# Temporarily avoid EmbeddingWrapper and seq2seq badness
# TODO(lukaszkaiser): remove EmbeddingWrapper
if first_input.get_shape().ndims != 1:
input_shape = first_input.get_shape().with_rank_at_least(2)
fixed_batch_size = input_shape[0]
flat_inputs = nest.flatten(inputs)
for flat_input in flat_inputs:
input_shape = flat_input.get_shape().with_rank_at_least(2)
batch_size, input_size = input_shape[0], input_shape[1:]
fixed_batch_size.merge_with(batch_size)
for i, size in enumerate(input_size):
if size.value is None:
raise ValueError(
"Input size (dimension %d of inputs) must be accessible via "
"shape inference, but saw value None." % i)
else:
fixed_batch_size = first_input.get_shape().with_rank_at_least(1)[0]
if fixed_batch_size.value:
batch_size = fixed_batch_size.value
else:
batch_size = array_ops.shape(first_input)[0]
if initial_state is not None:
state = initial_state
else:
if not dtype:
raise ValueError("If no initial_state is provided, "
"dtype must be specified")
state = cell.zero_state(batch_size, dtype)
if sequence_length is not None: # Prepare variables
sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if sequence_length.get_shape().ndims not in (None, 1):
raise ValueError(
"sequence_length must be a vector of length batch_size")
def _create_zero_output(output_size):
# convert int to TensorShape if necessary
size = _concat(batch_size, output_size)
output = array_ops.zeros(
array_ops.stack(size), _infer_state_dtype(dtype, state))
shape = _concat(fixed_batch_size.value, output_size, static=True)
output.set_shape(tensor_shape.TensorShape(shape))
return output
output_size = cell.output_size
flat_output_size = nest.flatten(output_size)
flat_zero_output = tuple(
_create_zero_output(size) for size in flat_output_size)
zero_output = nest.pack_sequence_as(
structure=output_size, flat_sequence=flat_zero_output)
sequence_length = math_ops.to_int32(sequence_length)
min_sequence_length = math_ops.reduce_min(sequence_length)
max_sequence_length = math_ops.reduce_max(sequence_length)
for time, input_ in enumerate(inputs):
if time > 0:
varscope.reuse_variables()
# pylint: disable=cell-var-from-loop
call_cell = lambda: cell(input_, state)
# pylint: enable=cell-var-from-loop
if sequence_length is not None:
(output, state) = _rnn_step(
time=time,
sequence_length=sequence_length,
min_sequence_length=min_sequence_length,
max_sequence_length=max_sequence_length,
zero_output=zero_output,
state=state,
call_cell=call_cell,
state_size=cell.state_size)
else:
(output, state) = call_cell()
outputs.append(output)
return (outputs, state)
def static_state_saving_rnn(cell,
inputs,
state_saver,
state_name,
sequence_length=None,
scope=None):
"""RNN that accepts a state saver for time-truncated RNN calculation.
Args:
cell: An instance of `RNNCell`.
inputs: A length T list of inputs, each a `Tensor` of shape
`[batch_size, input_size]`.
state_saver: A state saver object with methods `state` and `save_state`.
state_name: Python string or tuple of strings. The name to use with the
state_saver. If the cell returns tuples of states (i.e.,
`cell.state_size` is a tuple) then `state_name` should be a tuple of
strings having the same length as `cell.state_size`. Otherwise it should
be a single string.
sequence_length: (optional) An int32/int64 vector size [batch_size].
See the documentation for rnn() for more details about sequence_length.
scope: VariableScope for the created subgraph; defaults to "rnn".
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input)
states is the final state
Raises:
TypeError: If `cell` is not an instance of RNNCell.
ValueError: If `inputs` is `None` or an empty list, or if the arity and
type of `state_name` does not match that of `cell.state_size`.
"""
state_size = cell.state_size
state_is_tuple = nest.is_sequence(state_size)
state_name_tuple = nest.is_sequence(state_name)
if state_is_tuple != state_name_tuple:
raise ValueError("state_name should be the same type as cell.state_size. "
"state_name: %s, cell.state_size: %s" % (str(state_name),
str(state_size)))
if state_is_tuple:
state_name_flat = nest.flatten(state_name)
state_size_flat = nest.flatten(state_size)
if len(state_name_flat) != len(state_size_flat):
raise ValueError("#elems(state_name) != #elems(state_size): %d vs. %d" %
(len(state_name_flat), len(state_size_flat)))
initial_state = nest.pack_sequence_as(
structure=state_size,
flat_sequence=[state_saver.state(s) for s in state_name_flat])
else:
initial_state = state_saver.state(state_name)
(outputs, state) = static_rnn(
cell,
inputs,
initial_state=initial_state,
sequence_length=sequence_length,
scope=scope)
if state_is_tuple:
flat_state = nest.flatten(state)
state_name = nest.flatten(state_name)
save_state = [
state_saver.save_state(name, substate)
for name, substate in zip(state_name, flat_state)
]
else:
save_state = [state_saver.save_state(state_name, state)]
with ops.control_dependencies(save_state):
last_output = outputs[-1]
flat_last_output = nest.flatten(last_output)
flat_last_output = [
array_ops.identity(output) for output in flat_last_output
]
outputs[-1] = nest.pack_sequence_as(
structure=last_output, flat_sequence=flat_last_output)
return (outputs, state)
def static_bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case above (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states are
ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, input_size], or a nested tuple of such elements.
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
`[batch_size, cell_fw.state_size]`.
If `cell_fw.state_size` is a tuple, this should be a tuple of
tensors having shapes `[batch_size, s] for s in cell_fw.state_size`.
initial_state_bw: (optional) Same as for `initial_state_fw`, but using
the corresponding properties of `cell_bw`.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int32/int64 vector, size `[batch_size]`,
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to
"bidirectional_rnn"
Returns:
A tuple (outputs, output_state_fw, output_state_bw) where:
outputs is a length `T` list of outputs (one for each input), which
are depth-concatenated forward and backward outputs.
output_state_fw is the final state of the forward rnn.
output_state_bw is the final state of the backward rnn.
Raises:
TypeError: If `cell_fw` or `cell_bw` is not an instance of `RNNCell`.
ValueError: If inputs is None or an empty list.
"""
if not _like_rnncell(cell_fw):
raise TypeError("cell_fw must be an instance of RNNCell")
if not _like_rnncell(cell_bw):
raise TypeError("cell_bw must be an instance of RNNCell")
if not nest.is_sequence(inputs):
raise TypeError("inputs must be a sequence")
if not inputs:
raise ValueError("inputs must not be empty")
with vs.variable_scope(scope or "bidirectional_rnn"):
# Forward direction
with vs.variable_scope("fw") as fw_scope:
output_fw, output_state_fw = static_rnn(
cell_fw,
inputs,
initial_state_fw,
dtype,
sequence_length,
scope=fw_scope)
# Backward direction
with vs.variable_scope("bw") as bw_scope:
reversed_inputs = _reverse_seq(inputs, sequence_length)
tmp, output_state_bw = static_rnn(
cell_bw,
reversed_inputs,
initial_state_bw,
dtype,
sequence_length,
scope=bw_scope)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
flat_output_fw = nest.flatten(output_fw)
flat_output_bw = nest.flatten(output_bw)
flat_outputs = tuple(
array_ops.concat([fw, bw], 1)
for fw, bw in zip(flat_output_fw, flat_output_bw))
outputs = nest.pack_sequence_as(
structure=output_fw, flat_sequence=flat_outputs)
return (outputs, output_state_fw, output_state_bw)
| {
"content_hash": "a5463d2eb41718fa86202448d72d0222",
"timestamp": "",
"source": "github",
"line_count": 1442,
"max_line_length": 80,
"avg_line_length": 41.237864077669904,
"alnum_prop": 0.6630286723282603,
"repo_name": "dyoung418/tensorflow",
"id": "21c7ed361dc8d613d3332905ded1952dfe34681c",
"size": "60155",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/rnn.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "155915"
},
{
"name": "C++",
"bytes": "9052366"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "763492"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1772913"
},
{
"name": "Protocol Buffer",
"bytes": "110178"
},
{
"name": "Python",
"bytes": "6032114"
},
{
"name": "Shell",
"bytes": "165125"
},
{
"name": "TypeScript",
"bytes": "403037"
}
],
"symlink_target": ""
} |
from jacket.tests.compute.unit.volume.encryptors import test_base
from jacket.compute.volume.encryptors import nop
class NoOpEncryptorTestCase(test_base.VolumeEncryptorTestCase):
def _create(self, connection_info):
return nop.NoOpEncryptor(connection_info)
def test_attach_volume(self):
self.encryptor.attach_volume(None)
def test_detach_volume(self):
self.encryptor.detach_volume()
| {
"content_hash": "d1f5a139dd48c0663ba2e825b03e63f7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 32.53846153846154,
"alnum_prop": 0.75177304964539,
"repo_name": "HybridF5/jacket",
"id": "ddf4651cc9ad95d444ce1b7001182c338b3e7361",
"size": "1098",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jacket/tests/compute/unit/volume/encryptors/test_nop.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26995056"
},
{
"name": "Shell",
"bytes": "28464"
},
{
"name": "Smarty",
"bytes": "291947"
}
],
"symlink_target": ""
} |
from setuptools import setup
setup(
name='netscramble',
version='0.1',
description='Simple logic puzzle game',
url='http://tomdryer.com',
author='Tom Dryer',
author_email='tomdryer.com@gmail.com',
license='BSD',
packages=['netscramble'],
entry_points= {
"console_scripts": ["netscramble=netscramble.gui:main"],
},
include_package_data = True,
zip_safe=False,
test_suite='nose.collector',
tests_require=['nose'],
)
| {
"content_hash": "da2be0f4719820a5cdd14efce55681ee",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 64,
"avg_line_length": 25.210526315789473,
"alnum_prop": 0.6325678496868476,
"repo_name": "tdryer/netscramble",
"id": "4457f2926b5e40dcffa3636659eed43ed690070d",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "31441"
}
],
"symlink_target": ""
} |
import datetime
import sys
"""
Prints the Xth day on the Yth week of the current month.
For example, to print the 2nd Sat of the current month:
./xth_day_of_yth_week.py 2 5
"""
week_num = int(sys.argv[1])
day_num = int(sys.argv[2])
day = datetime.datetime.now()
# 1st day in the current month
day = day.replace(day=1)
start_of_month = day.weekday()
# get to the requested week
for week in range(week_num - 1):
day += datetime.timedelta(days=7)
# get to the requested day: 0..6
day += datetime.timedelta(days=day_num - start_of_month)
print(day.strftime("%Y-%m-%d"))
| {
"content_hash": "3ddb50c886b19b960b034384b5c4b54b",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 56,
"avg_line_length": 26.045454545454547,
"alnum_prop": 0.6963350785340314,
"repo_name": "vmiklos/vmexam",
"id": "8f4a7b2f6424baf5a4405c92f0ddc4425cbf4902",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/xth_day_of_yth_week.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1356"
},
{
"name": "C",
"bytes": "207141"
},
{
"name": "C#",
"bytes": "6115"
},
{
"name": "C++",
"bytes": "174284"
},
{
"name": "CMake",
"bytes": "90430"
},
{
"name": "Go",
"bytes": "13344"
},
{
"name": "HTML",
"bytes": "7421"
},
{
"name": "Java",
"bytes": "33479"
},
{
"name": "JavaScript",
"bytes": "15830"
},
{
"name": "JetBrains MPS",
"bytes": "93"
},
{
"name": "Kotlin",
"bytes": "12619"
},
{
"name": "M4",
"bytes": "4410"
},
{
"name": "Makefile",
"bytes": "133045"
},
{
"name": "Objective-C",
"bytes": "6102"
},
{
"name": "PDDL",
"bytes": "2562"
},
{
"name": "PHP",
"bytes": "10859"
},
{
"name": "Perl",
"bytes": "566936"
},
{
"name": "PowerShell",
"bytes": "618"
},
{
"name": "Python",
"bytes": "185940"
},
{
"name": "Rust",
"bytes": "40567"
},
{
"name": "Shell",
"bytes": "74062"
},
{
"name": "TypeScript",
"bytes": "45072"
},
{
"name": "VBA",
"bytes": "3117"
},
{
"name": "Vim Script",
"bytes": "1105"
},
{
"name": "XSLT",
"bytes": "281"
}
],
"symlink_target": ""
} |
import sys
from os import mkdir,sep,path
#import numpy as np
#from cv2 import *
#from hand_grabber import PyOpenNIHandGrabber
#from pose_recognizer import PyPoseRecognizer
import thread
import xml.etree.ElementTree as ET
#import Image
from random import *
import time
#from my_fun import *
#from sklearn.externals import joblib
from robot_hand import *
import base64
import datetime
import socket
from Crypto.Cipher import AES # encryption library
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
BLOCK_SIZE = 64
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
SIGN_LIST = ['A','B','C','D','F','H','I','K','L','O','P2','S1','V','W','X','Y']
SIGN_INDEX = 0
SIGN_SIZE = 16
MAX_POSES = 100
#Communication Parameters
PASSCODE = 'PARLOMA3'*2
SIGN_WINDOW_NUMBER = 5
#IP = 'localhost'
#IP = '10.10.0.1'
IP = '192.168.85.201'
#PORT = 8089
PORT = 9091
MSGLEN = 88
class ServerSocket:
def __init__(self, IP, PORT, PASSCODE, ser, name):
self.server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.server_socket.bind((IP,PORT))
self.server_socket.listen(1)
self.hand = Hand(ser)
#self.hand.perform_hardCalibration()
self.hand.perform_softCalibration()
self.hand.perform_rest()
#self.hand = 0
self.name = name
def start(self, crypt):
print "Initializing..."
#Initializing -> strangely the first sign won't be performed
res = self.hand.perform_sign('A')
res = self.hand.perform_sign('REST')
print 'Ready! Waiting on IP '+IP+' and PORT '+str(PORT)
#return
while True:
client_socket, address = self.server_socket.accept();
print 'Listening to client, address:'
print address
thread.start_new_thread(self.handController, (self.hand, crypt, client_socket, address))
def handController(self, hand, crypt, client_socket, address, *args):
#actual_sign = 'rest'
#actual_counter = 0
while True:
msg = ''
while len(msg) < MSGLEN:
chunk = client_socket.recv(MSGLEN-len(msg))
if chunk == '':
print "Connection to Client is DOWN!"
print address
client_socket.close()
return
msg = msg + chunk
buf = msg
if len(buf) != MSGLEN: # client closed or network error
print 'Client Closed or Communication Error'
print address
client_socket.close()
return
else:
buf = DecodeAES(crypt, buf)
print buf + ' RECEIVED'
if buf == 'quit':
print 'Ok, Quitting'
return
else:
x = buf in SIGN_LIST
if x == False:
print 'Invalid sign received'
out_file = open(self.name+sep+"resultsPerformedHand.txt","a")
out_file.write('Invalid sign ' + buf + ' received! \n')
out_file.close()
else:
res = hand.perform_sign(buf)
#time.sleep(4)
hand.perform_rest()
out_file = open(self.name+sep+"resultsPerformedHand.txt","a")
out_file.write(res + '\n')
out_file.close()
out_file = open(self.name+sep+"resultsReceivedInternet.txt","a")
out_file.write(buf + '\t' + buf + '\n')
out_file.close()
#if actual_sign == buf:
#actual_counter += 1
# if actual_counter == SIGN_WINDOW_NUMBER:
# hand.perform_sign(buf)
# print 'Sign Performed'
#else:
#actual_sign = buf
#actual_counter = 1
#main
if __name__=="__main__":
if len(sys.argv)!=3:
print("Usage:Client > python script_name serial volunteer")
else:
if not path.exists(sys.argv[2]):
mkdir(sys.argv[2])
print "New folder created for this experiment"
out_file = open(sys.argv[2]+sep+"resultsPerformedHand.txt","w")
out_file.write('#Reference pose of PRENSILIA Hand wrt specified poses \n')
out_file.write('#Joints order middle, ring, little, thumb, thumb_o \n')
out_file.close()
out_file = open(sys.argv[2]+sep+"resultsReceivedInternet.txt","w")
out_file.write('Sign received from internet' + '\t' + 'Actual joints positions' + '\n')
out_file.close()
server = ServerSocket(IP, PORT, 'P'*16, sys.argv[1], sys.argv[2])
crypt = AES.new(PASSCODE)
server.start(crypt)
#while True:
# Accept and dispatch connection from client
#print 'Waiting on IP '+IP+' and PORT '+str(PORT)
#(SocketClient, address) = server.server_socket.accept()
#handController(SocketClient, address, crypt)
| {
"content_hash": "275eee465d8b84efd9d076ee38a2fa31",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 100,
"avg_line_length": 34.96296296296296,
"alnum_prop": 0.548728813559322,
"repo_name": "parloma/Prensilia",
"id": "145fc73e020cebe952dc49c000d57cff6475d182",
"size": "5782",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prensilia/Exp_OUTPUT_WINDOWS.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "18021"
},
{
"name": "Python",
"bytes": "104539"
},
{
"name": "Shell",
"bytes": "81"
}
],
"symlink_target": ""
} |
"""
=============
Keypress demo
=============
This example demonstrates the different keypress-gathering techniques available
in the ExperimentController class.
"""
# Author: Dan McCloy <drmccloy@uw.edu>
#
# License: BSD (3-clause)
from expyfun import ExperimentController, building_doc
import expyfun.analyze as ea
print(__doc__)
isi = 0.5
wait_dur = 3.0 if not building_doc else 0.
msg_dur = 3.0 if not building_doc else 0.
with ExperimentController('KeypressDemo', screen_num=0,
window_size=[640, 480], full_screen=False,
stim_db=0, noise_db=0, output_dir=None,
participant='foo', session='001',
version='dev') as ec:
ec.wait_secs(isi)
###############
# screen_prompt
pressed = ec.screen_prompt('press any key\n\nscreen_prompt('
'max_wait={})'.format(wait_dur),
max_wait=wait_dur, timestamp=True)
ec.write_data_line('screen_prompt', pressed)
if pressed[0] is None:
message = 'no keys pressed'
else:
message = '{} pressed after {} secs'.format(pressed[0],
round(pressed[1], 4))
ec.screen_prompt(message, msg_dur)
ec.wait_secs(isi)
##################
# wait_for_presses
ec.screen_text('press some keys\n\nwait_for_presses(max_wait={})'
''.format(wait_dur))
screenshot = ec.screenshot()
ec.flip()
pressed = ec.wait_for_presses(wait_dur)
ec.write_data_line('wait_for_presses', pressed)
if not len(pressed):
message = 'no keys pressed'
else:
message = ['{} pressed after {} secs\n'
''.format(key, round(time, 4)) for key, time in pressed]
message = ''.join(message)
ec.screen_prompt(message, msg_dur)
ec.wait_secs(isi)
############################################
# wait_for_presses, relative to master clock
ec.screen_text('press some keys\n\nwait_for_presses(max_wait={}, '
'relative_to=0.0)'.format(wait_dur))
ec.flip()
pressed = ec.wait_for_presses(wait_dur, relative_to=0.0)
ec.write_data_line('wait_for_presses relative_to 0.0', pressed)
if not len(pressed):
message = 'no keys pressed'
else:
message = ['{} pressed at {} secs\n'
''.format(key, round(time, 4)) for key, time in pressed]
message = ''.join(message)
ec.screen_prompt(message, msg_dur)
ec.wait_secs(isi)
##########################################
# listen_presses / wait_secs / get_presses
ec.screen_text('press some keys\n\nlisten_presses()\nwait_secs({0})'
'\nget_presses()'.format(wait_dur))
ec.flip()
ec.listen_presses()
ec.wait_secs(wait_dur)
pressed = ec.get_presses() # relative_to=0.0
ec.write_data_line('listen / wait / get_presses', pressed)
if not len(pressed):
message = 'no keys pressed'
else:
message = ['{} pressed after {} secs\n'
''.format(key, round(time, 4)) for key, time in pressed]
message = ''.join(message)
ec.screen_prompt(message, msg_dur)
ec.wait_secs(isi)
####################################################################
# listen_presses / wait_secs / get_presses, relative to master clock
ec.screen_text('press a few keys\n\nlisten_presses()'
'\nwait_secs({0})\nget_presses(relative_to=0.0)'
''.format(wait_dur))
ec.flip()
ec.listen_presses()
ec.wait_secs(wait_dur)
pressed = ec.get_presses(relative_to=0.0)
ec.write_data_line('listen / wait / get_presses relative_to 0.0', pressed)
if not len(pressed):
message = 'no keys pressed'
else:
message = ['{} pressed at {} secs\n'
''.format(key, round(time, 4)) for key, time in pressed]
message = ''.join(message)
ec.screen_prompt(message, msg_dur)
ec.wait_secs(isi)
###########################################
# listen_presses / while loop / get_presses
disp_time = wait_dur
countdown = ec.current_time + disp_time
ec.call_on_next_flip(ec.listen_presses)
ec.screen_text('press some keys\n\nlisten_presses()'
'\nwhile loop {}\nget_presses()'.format(disp_time))
ec.flip()
while ec.current_time < countdown:
cur_time = round(countdown - ec.current_time, 1)
if cur_time != disp_time:
disp_time = cur_time
# redraw text with updated disp_time
ec.screen_text('press some keys\n\nlisten_presses() '
'\nwhile loop {}\nget_presses()'.format(disp_time))
ec.flip()
pressed = ec.get_presses()
ec.write_data_line('listen / while / get_presses', pressed)
if not len(pressed):
message = 'no keys pressed'
else:
message = ['{} pressed after {} secs\n'
''.format(key, round(time, 4)) for key, time in pressed]
message = ''.join(message)
ec.screen_prompt(message, msg_dur)
ec.wait_secs(isi)
#####################################################################
# listen_presses / while loop / get_presses, relative to master clock
disp_time = wait_dur
countdown = ec.current_time + disp_time
ec.call_on_next_flip(ec.listen_presses)
ec.screen_text('press some keys\n\nlisten_presses()\nwhile loop '
'{}\nget_presses(relative_to=0.0)'.format(disp_time))
ec.flip()
while ec.current_time < countdown:
cur_time = round(countdown - ec.current_time, 1)
if cur_time != disp_time:
disp_time = cur_time
# redraw text with updated disp_time
ec.screen_text('press some keys\n\nlisten_presses()\nwhile '
'loop {}\nget_presses(relative_to=0.0)'
''.format(disp_time))
ec.flip()
pressed = ec.get_presses(relative_to=0.0)
ec.write_data_line('listen / while / get_presses relative_to 0.0', pressed)
if not len(pressed):
message = 'no keys pressed'
else:
message = ['{} pressed at {} secs\n'
''.format(key, round(time, 4)) for key, time in pressed]
message = ''.join(message)
ec.screen_prompt(message, msg_dur)
ea.plot_screen(screenshot)
| {
"content_hash": "b50096d59121b623962d5a0175f32cdd",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 38.214285714285715,
"alnum_prop": 0.5468847352024923,
"repo_name": "LABSN/expyfun",
"id": "c0f657aa9864edba64f4ec58f7f2bc8082542403",
"size": "6420",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "examples/experiments/keypress.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1433"
},
{
"name": "PowerShell",
"bytes": "895"
},
{
"name": "Python",
"bytes": "589852"
}
],
"symlink_target": ""
} |
import grpc
from google.cloud.automl_v1.proto import (
annotation_spec_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2,
)
from google.cloud.automl_v1.proto import (
dataset_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2,
)
from google.cloud.automl_v1.proto import (
model_evaluation_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2,
)
from google.cloud.automl_v1.proto import (
model_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2,
)
from google.cloud.automl_v1.proto import (
service_pb2 as google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2,
)
from google.longrunning import (
operations_pb2 as google_dot_longrunning_dot_operations__pb2,
)
class AutoMlStub(object):
"""AutoML Server API.
The resource names are assigned by the server.
The server never reuses names that it has created after the resources with
those names are deleted.
An ID of a resource is the last element of the item's resource name. For
`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then
the id for the item is `{dataset_id}`.
Currently the only supported `location_id` is "us-central1".
On any input that is documented to expect a string parameter in
snake_case or kebab-case, either of those cases is accepted.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.CreateDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateDatasetRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.ListDatasets = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ListDatasets",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsResponse.FromString,
)
self.UpdateDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UpdateDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateDatasetRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.FromString,
)
self.DeleteDataset = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteDataset",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteDatasetRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ImportData = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ImportData",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ImportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ExportData = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportData",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetAnnotationSpec = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetAnnotationSpec",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.FromString,
)
self.CreateModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/CreateModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.FromString,
)
self.ListModels = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ListModels",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsResponse.FromString,
)
self.DeleteModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeleteModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.UpdateModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UpdateModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.FromString,
)
self.DeployModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/DeployModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.UndeployModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/UndeployModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.ExportModel = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ExportModel",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.SerializeToString,
response_deserializer=google_dot_longrunning_dot_operations__pb2.Operation.FromString,
)
self.GetModelEvaluation = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/GetModelEvaluation",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.FromString,
)
self.ListModelEvaluations = channel.unary_unary(
"/google.cloud.automl.v1.AutoMl/ListModelEvaluations",
request_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.SerializeToString,
response_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.FromString,
)
class AutoMlServicer(object):
"""AutoML Server API.
The resource names are assigned by the server.
The server never reuses names that it has created after the resources with
those names are deleted.
An ID of a resource is the last element of the item's resource name. For
`projects/{project_id}/locations/{location_id}/datasets/{dataset_id}`, then
the id for the item is `{dataset_id}`.
Currently the only supported `location_id` is "us-central1".
On any input that is documented to expect a string parameter in
snake_case or kebab-case, either of those cases is accepted.
"""
def CreateDataset(self, request, context):
"""Creates a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetDataset(self, request, context):
"""Gets a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListDatasets(self, request, context):
"""Lists datasets in a project.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateDataset(self, request, context):
"""Updates a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteDataset(self, request, context):
"""Deletes a dataset and all of its contents.
Returns empty response in the
[response][google.longrunning.Operation.response] field when it completes,
and `delete_details` in the
[metadata][google.longrunning.Operation.metadata] field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ImportData(self, request, context):
"""Imports data into a dataset.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportData(self, request, context):
"""Exports dataset's data to the provided output location.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetAnnotationSpec(self, request, context):
"""Gets an annotation spec.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def CreateModel(self, request, context):
"""Creates a model.
Returns a Model in the [response][google.longrunning.Operation.response]
field when it completes.
When you create a model, several model evaluations are created for it:
a global evaluation, and one evaluation for each annotation spec.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetModel(self, request, context):
"""Gets a model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListModels(self, request, context):
"""Lists models.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeleteModel(self, request, context):
"""Deletes a model.
Returns `google.protobuf.Empty` in the
[response][google.longrunning.Operation.response] field when it completes,
and `delete_details` in the
[metadata][google.longrunning.Operation.metadata] field.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UpdateModel(self, request, context):
"""Updates a model.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def DeployModel(self, request, context):
"""Deploys a model. If a model is already deployed, deploying it with the
same parameters has no effect. Deploying with different parametrs
(as e.g. changing
[node_number][google.cloud.automl.v1.ImageObjectDetectionModelDeploymentMetadata.node_number])
will reset the deployment state without pausing the model's availability.
Only applicable for Text Classification, Image Object Detection; all other
domains manage deployment automatically.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def UndeployModel(self, request, context):
"""Undeploys a model. If the model is not deployed this method has no effect.
Only applicable for Text Classification, Image Object Detection;
all other domains manage deployment automatically.
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ExportModel(self, request, context):
"""Exports a trained, "export-able", model to a user specified Google Cloud
Storage location. A model is considered export-able if and only if it has
an export format defined for it in
[ModelExportOutputConfig][google.cloud.automl.v1.ModelExportOutputConfig].
Returns an empty response in the
[response][google.longrunning.Operation.response] field when it completes.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def GetModelEvaluation(self, request, context):
"""Gets a model evaluation.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def ListModelEvaluations(self, request, context):
"""Lists model evaluations.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details("Method not implemented!")
raise NotImplementedError("Method not implemented!")
def add_AutoMlServicer_to_server(servicer, server):
rpc_method_handlers = {
"CreateDataset": grpc.unary_unary_rpc_method_handler(
servicer.CreateDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateDatasetRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetDataset": grpc.unary_unary_rpc_method_handler(
servicer.GetDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"ListDatasets": grpc.unary_unary_rpc_method_handler(
servicer.ListDatasets,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListDatasetsResponse.SerializeToString,
),
"UpdateDataset": grpc.unary_unary_rpc_method_handler(
servicer.UpdateDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateDatasetRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_dataset__pb2.Dataset.SerializeToString,
),
"DeleteDataset": grpc.unary_unary_rpc_method_handler(
servicer.DeleteDataset,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteDatasetRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ImportData": grpc.unary_unary_rpc_method_handler(
servicer.ImportData,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ImportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ExportData": grpc.unary_unary_rpc_method_handler(
servicer.ExportData,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportDataRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetAnnotationSpec": grpc.unary_unary_rpc_method_handler(
servicer.GetAnnotationSpec,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetAnnotationSpecRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_annotation__spec__pb2.AnnotationSpec.SerializeToString,
),
"CreateModel": grpc.unary_unary_rpc_method_handler(
servicer.CreateModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.CreateModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetModel": grpc.unary_unary_rpc_method_handler(
servicer.GetModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.SerializeToString,
),
"ListModels": grpc.unary_unary_rpc_method_handler(
servicer.ListModels,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelsResponse.SerializeToString,
),
"DeleteModel": grpc.unary_unary_rpc_method_handler(
servicer.DeleteModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeleteModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"UpdateModel": grpc.unary_unary_rpc_method_handler(
servicer.UpdateModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UpdateModelRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__pb2.Model.SerializeToString,
),
"DeployModel": grpc.unary_unary_rpc_method_handler(
servicer.DeployModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.DeployModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"UndeployModel": grpc.unary_unary_rpc_method_handler(
servicer.UndeployModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.UndeployModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"ExportModel": grpc.unary_unary_rpc_method_handler(
servicer.ExportModel,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ExportModelRequest.FromString,
response_serializer=google_dot_longrunning_dot_operations__pb2.Operation.SerializeToString,
),
"GetModelEvaluation": grpc.unary_unary_rpc_method_handler(
servicer.GetModelEvaluation,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.GetModelEvaluationRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_model__evaluation__pb2.ModelEvaluation.SerializeToString,
),
"ListModelEvaluations": grpc.unary_unary_rpc_method_handler(
servicer.ListModelEvaluations,
request_deserializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsRequest.FromString,
response_serializer=google_dot_cloud_dot_automl__v1_dot_proto_dot_service__pb2.ListModelEvaluationsResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
"google.cloud.automl.v1.AutoMl", rpc_method_handlers
)
server.add_generic_rpc_handlers((generic_handler,))
| {
"content_hash": "9953e24c2fbd4094b2733769d4c6fd5f",
"timestamp": "",
"source": "github",
"line_count": 415,
"max_line_length": 138,
"avg_line_length": 52.46265060240964,
"alnum_prop": 0.7044828219731766,
"repo_name": "tswast/google-cloud-python",
"id": "0ad90914659506e5cfc7cdfc810949f61da02ec3",
"size": "21842",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "automl/google/cloud/automl_v1/proto/service_pb2_grpc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
import Chirp, getopt
import paho.mqtt.client as mqtt
import sys, time
import logging
class ChirpConnect:
def __init__(self, bus=1, addr=0x20, host="localhost", port=2337, ssl=True, topic="sensors/Chirp", interval=60):
self.bus_num = bus
self.bus_addr = addr
self.mqtt_host = host
self.mqtt_port = port
self.mqtt_ssl = ssl
self.mqtt_topic = topic
self.interval = interval
self.chirp = Chirp.Chirp(self.bus_num, self.bus_addr)
self.client = mqtt.Client()
self.client.on_connect = self.on_connect
self.client.on_publish = self.on_publish
logging.basicConfig(level=logging.DEBUG)
def on_connect(self, client, userdata, flag, rc):
logging.info("Connected with result code" + str(rc))
def on_publish(self, client, userdata, mid):
logging.debug("Message " + str(mid) + "published to topic")
def publishTemp(self):
temp = self.chirp.temp()
logging.debug("Publishing " + self.mqtt_topic +"/temp with value: " +str(temp))
rc = self.client.publish(self.mqtt_topic + "/temp", temp, 0, False)
logging.debug(str(rc))
def publishMoisture(self):
moisture = self.chirp.cap_sense()
logging.debug("Publishing " + self.mqtt_topic +"/moisture with value: " +str(moisture))
rc = self.client.publish(self.mqtt_topic + "/moisture", moisture, 0, False)
logging.debug(str(rc))
def publishLight(self):
light = self.chirp.light()
logging.debug("Publishing " + self.mqtt_topic +"/light with value: "+str(light))
rc = self.client.publish(self.mqtt_topic + "/light", light, 0, False)
logging.debug(str(rc))
def loop(self):
logging.info("Connecting to "+self.mqtt_host+" on port "+ str(self.mqtt_port))
self.client.connect(self.mqtt_host, self.mqtt_port, 60, "")
self.client.loop_start()
starttime = time.time()
try:
while(True):
self.publishLight()
self.publishMoisture()
self.publishTemp()
time.sleep(self.interval - ((time.time()-starttime) % self.interval))
except KeyboardInterrupt:
pass
self.client.disconnect()
logging.info("Exiting")
def printOpt():
print "ChirpConnect -b <bus=1> -a <address=0x20> -h <host=localhost> -p <port=2337> --ssl -t <topic=sensors/Chirp> -i <interval=60>"
def main(argv):
bus = 1
address = 0x20
host = "localhost"
port = 2337
ssl = False
topic = "sensors/Chirp"
interval = 60
try:
opts, args = getopt.getopt(argv, "b:a:h:p:t:i:", ["ssl"])
except getopt.GetoptError:
printOpt()
sys.exit(2)
for opt, arg in opts:
if opt == "-b":
bus = int(arg)
elif opt == "-a":
address = int(arg)
elif opt == "-h":
host = arg
elif opt == "-p":
port = int(arg)
elif opt == "--ssl":
ssl=True
elif opt == "-t":
topic = arg
elif opt == "-i":
interval = int(arg)
chirpConnect = ChirpConnect(bus, address, host, port, ssl, topic, interval)
chirpConnect.loop()
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "c9f38241e56bfd3306aee528999c75c5",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 136,
"avg_line_length": 33.40594059405941,
"alnum_prop": 0.5610551274451689,
"repo_name": "mapero/ChirpConnect",
"id": "71fb6112935191ecb485ebccbc5e3fa6a349e2f0",
"size": "3374",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ChirpConnect.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5173"
}
],
"symlink_target": ""
} |
"""Sana mDS Django admin interface
:Authors: Sana dev team
:Version: 2.0
"""
from django.contrib import admin
from .models import *
def mark_voided(modeladmin,request,queryset):
queryset.update(voided=True)
mark_voided.short_description = "Mark selected voided"
class DeviceAdmin(admin.ModelAdmin):
readonly_fields = ['uuid']
list_display = ['name', 'uuid']
list_filter = ['name',]
actions=[mark_voided,]
class ProcedureAdmin(admin.ModelAdmin):
readonly_fields = ['uuid']
list_display = ['title', 'author', 'uuid']
actions=[mark_voided,]
class RestAdmin(admin.TabularInline):
app_label="REST Services"
inlines = []
class RelationshipAdmin(admin.TabularInline):
model = Relationship
fk_name = 'to_concept'
list_display_links = []
class ConceptAdmin(admin.ModelAdmin):
inlines = [
RelationshipAdmin,
]
readonly_fields = ['uuid']
list_display = ['name', 'uuid']
list_filter = ['name',]
actions=[mark_voided,]
class ObservationAdmin(admin.ModelAdmin):
exclude = ('_complex_progress',)
readonly_fields = ['_complex_size','uuid','value']
list_display = ['question','voided','concept','value',
'subject','device','created','modified', 'encounter', 'upload_progress']
list_filter = ['node','concept', 'modified', 'encounter']
actions=[mark_voided,]
class EncounterAdmin(admin.ModelAdmin):
exclude = ['concept',]
list_display = ['subject','voided','procedure', 'created','uuid',"observer",]
#actions = [mark_encounter_voided,]
actions=[mark_voided,]
class EncounterInline(admin.StackedInline):
model = Encounter
class ObserverAdmin(admin.ModelAdmin):
readonly_fields = ['uuid',]
list_display = ['user', 'uuid']
actions=[mark_voided,]
class SubjectAdmin(admin.ModelAdmin):
readonly_fields = ['uuid',]
list_display = ['given_name', 'family_name', 'uuid', "image"]
class SubjectInline(admin.StackedInline):
model = Subject
class LocationAdmin(admin.ModelAdmin):
model = Location
list_display = ('name',)
list_filter = ('name',)
admin.site.register(Concept, ConceptAdmin)
admin.site.register(Relationship)
admin.site.register(RelationshipCategory)
admin.site.register(Device, DeviceAdmin)
admin.site.register(Encounter, EncounterAdmin)
admin.site.register(Observation,ObservationAdmin)
admin.site.register(Location,LocationAdmin)
admin.site.register(Notification)
admin.site.register(Observer,ObserverAdmin)
admin.site.register(Procedure,ProcedureAdmin)
admin.site.register(Subject,SubjectAdmin)
admin.site.register(Event)
| {
"content_hash": "759da85e51b02ca80771fbe1b297d46e",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 81,
"avg_line_length": 29.370786516853933,
"alnum_prop": 0.6966335118592196,
"repo_name": "rryan/sana.mds",
"id": "e95a6a428db65c070da1286bc8b0c5619f2a0846",
"size": "2614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mds/core/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "10323"
},
{
"name": "Python",
"bytes": "312922"
}
],
"symlink_target": ""
} |
""":mod:`ShopWizard` -- Provides an interface for searching with the Shop Wizard
.. module:: ShopWizard
:synopsis: Provides an interface for searching with the Shop Wizard
.. moduleauthor:: Joshua Gilman <joshuagilman@gmail.com>
"""
from neolib.exceptions import invalidSearch
from neolib.exceptions import invalidUser
from neolib.exceptions import parseException
from neolib.exceptions import shopWizBanned
from neolib.exceptions import activeQuest
from neolib.exceptions import invalidMethod
from neolib.inventory.ShopWizardResult import ShopWizardResult
from neolib.item.Item import Item
import logging
import time
class ShopWizard:
"""Provides an interface for searching with the Shop Wizard
Has functionality for searching and pricing using the Shop Wizard.
Provides several options for pricing an item including returning
information on the lowest item in a search result.
Attributes
waitTime (int) -- Time to wait in seconds between searches
Example
>>> res = ShopWizard.search("Mau Codestone")
>>> for item in res:
... print item.price
4000
4005
...
"""
SHOP = "shop"
GALLERY = "gallery"
CONTAINING = "containing"
EXACT = "exact"
AVERAGE = "AVERAGE"
LOWDEDUCT = "LOWDEDUCT"
AVGDEDUCT = "AVGDEDUCT"
LOW = "LOW"
RETLOW = "RETLOW"
methods = ['AVERAGE', 'LOWDEDUCT', 'AVGDEDUCT', 'LOW', 'RETLOW']
waitTime = 5
@staticmethod
def search(usr, item, area = "shop", scope = "exact", min = "0", max = "99999"):
""" Searches the shop wizard for the given item, returns result
Uses the given parameters to send a search request with the Shop Wizard.
Automatically parses the search results into individual items and appends
them to and returns a ShopWizardResult.
Parameters:
usr (User) -- User to search with
item (str, Item) -- Item to search for
area (str) -- Area to search in (ShopWizard.SHOP, ShopWizard.GALLERY)
scope (str) -- Scope to search for (ShopWizard.CONTAINING, ShopWizard.EXACT)
min (str) -- Minimum price
max (str) -- Maximum price
Returns
ShopWizardResult - Search results
Raises
activeQuest
shopWizBanned
parseException
invalidSearch
"""
if not usr:
raise invalidUser
if not item:
raise invalidSearch
if area != ShopWizard.SHOP and area != ShopWizard.GALLERY:
logging.getLogger("neolib.shop").info("Invalid area supplied for shop wizard search: " + area)
raise invalidSearch
if scope != ShopWizard.CONTAINING and scope != ShopWizard.EXACT:
logging.getLogger("neolib.shop").info("Invalid scope supplied for shop wizard search: " + area)
raise invalidSearch
if int(min) < 0:
logging.getLogger("neolib.shop").info("Invalid min value supplied for shop wizard search: " + min)
raise invalidSearch
if int(max) > 99999:
logging.getLogger("neolib.shop").info("Invalid max value supplied for shop wizard search: " + max)
raise invalidSearch
if isinstance(item, Item):
item = item.name
pg = usr.getPage("http://www.neopets.com/market.phtml?type=wizard")
form = pg.form(action="market.phtml")
form.update({'shopwizard': item, 'table': area, 'criteria': scope, 'min_price': str(min), 'max_price': str(max)})
pg = form.submit()
# Indicates shop wizard banned
if "too many searches" in pg.content:
time = pg.find("b", text = "Whoa there, too many searches!").parent.p.b.item
e = shopWizBanned()
e.time = time
raise e
# Indicates a faerie quest
if "You're working for a faerie" in pg.content:
logging.getLogger("neolib.shop").info("Could not search for " + item + ". A Faerie quest is active")
raise activeQuest
if "did not find" in pg.content:
if item in pg.content:
return False # Indicates UB item
elif "...</span>" in pg.content:
# Probably invalid item
raise invalidSearch
return ShopWizardResult(pg, usr)
@staticmethod
def price(usr, item, searches = 2, method = "AVERAGE", deduct = 0):
""" Searches the shop wizard for given item and determines price with given method
Searches the shop wizard x times (x being number given in searches) for the
given item and collects the lowest price from each result. Uses the given
pricing method to determine and return the price of the item. Below is information
on each pricing method available:
ShopWizard.AVERAGE -- Average of the lowest prices
ShopWizard.LOWDEDUCT -- Deducts x (x = deduct) from the lowest price
ShopWizard.AVGDEDUCT -- Deducts x (x = deduct) from the average of the lowest prices
ShopWizard.LOW -- Returns the lowest price
ShopWizard.RETLOW -- Returns an Item instance of the lowest price found
Parameters:
usr (User) -- User to search with
item (str, Item) -- Item to search for
searches (int) -- Number of times to search for the item
method (str) -- Pricing method
deduct (int) -- Amount to deduct from the price (if applicable)
Returns
int -- The item price
"""
if not method in ShopWizard.methods: raise invalidMethod()
if isinstance(item, Item):
item = item.name
prices = []
dets = {}
for x in range(0, searches):
results = ShopWizard.search(usr, item)
# Set to -1 if not found
if not results:
prices.append(-1)
continue
prices.append(int(results[0].price))
dets[str(results[0].price)] = (results[0].owner, results[0].id)
time.sleep(ShopWizard.waitTime)
# Determines if item was UB
if sum(prices) == len(prices) * -1:
return False
prices = list(filter(lambda x: x != -1, prices))
if method == ShopWizard.RETLOW:
price = sorted(prices)[0]
return (price, dets[str(price)][0], dets[str(price)][1])
return ShopWizard.__determinePrice(prices, method, deduct)
@staticmethod
def __determinePrice(prices, method, deduct):
price = 1
if method == ShopWizard.AVERAGE:
price = int(sum(prices) / len(prices))
elif method == ShopWizard.LOWDEDUCT:
if deduct < 1 and deduct > 0:
price = int(sorted(prices)[0] * (1 - deduct))
else:
price = sorted(prices)[0] - deduct
if price <= 0:
price = 1
elif method == ShopWizard.AVGDEDUCT:
if deduct < 1 and deduct > 0:
price = int((sum(prices) / len(prices)) * (1 - deduct))
else:
price = int(sum(prices) / len(prices)) - deduct
if price <= 0:
price = 1
elif method == ShopWizard.LOW:
price = sorted(prices)[0]
else:
logging.getLogger("neolib.shop").exception("Invalid method given in ShopWizard.priceItem: " + method)
raise invalidMethod
return price
| {
"content_hash": "6a57ad1215adfb32c2e782bdd566fd08",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 121,
"avg_line_length": 36.883720930232556,
"alnum_prop": 0.5712484237074401,
"repo_name": "jmgilman/Neolib",
"id": "c3385970e06c99adc982f23670bfcd9f6b1c32e6",
"size": "7930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neolib/shop/ShopWizard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "459691"
}
],
"symlink_target": ""
} |
"""
USAGE: %(program)s TEXT_INPUT WORD2VEC_OUTPUT TEXT_OUTPUT
Example script for training a word2vec model. Parameters for word2vec should be
optimized per language. TEXT_OUTPUT, true of false, if vectors should be outputted to a text file.
"""
import logging
import multiprocessing
import os
import sys
from gensim.models.word2vec import LineSentence
from gensim.models.word2vec import Word2Vec
if __name__ == '__main__':
program = os.path.basename(sys.argv[0])
logger = logging.getLogger(program)
logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')
logging.root.setLevel(level=logging.INFO)
logger.info("Running %s", ' '.join(sys.argv))
# Check and process input arguments.
if len(sys.argv) < 4:
print(globals()['__doc__'] % locals())
sys.exit(1)
inp, outp, veco = sys.argv[1:4]
max_length = 0
with open(inp, 'r') as f:
for line in f.readlines():
max_length = max(max_length, len(line))
logger.info("Max article length: %s words.", max_length)
params = {
'size': 400,
'window': 10,
'min_count': 10,
'workers': max(1, multiprocessing.cpu_count() - 1),
'sample': 1E-5,
}
word2vec = Word2Vec(LineSentence(inp, max_sentence_length=max_length),
**params)
word2vec.save(outp)
if veco:
word2vec.wv.save_word2vec_format(outp + '.model.txt', binary=False)
| {
"content_hash": "0ebc62863b8c3b84c4e2b3f23e40dff9",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 98,
"avg_line_length": 29,
"alnum_prop": 0.6337931034482759,
"repo_name": "hgrif/wiki-word2vec",
"id": "2bd9c8ac9ca64ef37b9aaeb60594c7e21c806a59",
"size": "1497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "create_word2vec.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "604"
},
{
"name": "Python",
"bytes": "3108"
}
],
"symlink_target": ""
} |
"""
Implements UMAP visualizations of documents in 2D space.
"""
##########################################################################
## Imports
##########################################################################
import warnings
import numpy as np
from collections import defaultdict
from yellowbrick.draw import manual_legend
from yellowbrick.text.base import TextVisualizer
from yellowbrick.style.colors import resolve_colors
from yellowbrick.exceptions import YellowbrickValueError
from sklearn.pipeline import Pipeline
try:
from umap import UMAP
except ImportError:
UMAP = None
except (RuntimeError, AttributeError):
UMAP = None
warnings.warn(
"Error Importing UMAP. UMAP does not support python 2.7 on Windows 32 bit."
)
##########################################################################
## Quick Methods
##########################################################################
def umap(
X,
y=None,
ax=None,
classes=None,
colors=None,
colormap=None,
alpha=0.7,
show=True,
**kwargs
):
"""
Display a projection of a vectorized corpus in two dimensions using UMAP (Uniform
Manifold Approximation and Projection), a nonlinear dimensionality reduction method
that is particularly well suited to embedding in two or three dimensions for
visualization as a scatter plot. UMAP is a relatively new technique but is often
used to visualize clusters or groups of data points and their relative proximities.
It typically is fast, scalable, and can be applied directly to sparse matrices
eliminating the need to run a ``TruncatedSVD`` as a pre-processing step.
The current default for UMAP is Euclidean distance. Hellinger distance would be a
more appropriate distance function to use with CountVectorize data. That will be
released in a forthcoming version of UMAP. In the meantime cosine distance is likely
a better text default that Euclidean and can be set using the keyword argument
``metric='cosine'``.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with umap.
y : ndarray or Series of length n
An optional array or series of target or class values for instances.
If this is specified, then the points will be colored according to
their class. Often cluster labels are passed in to color the documents
in cluster space, so this method is used both for classification and
clustering methods.
ax : matplotlib axes
The axes to plot the figure on.
classes : list of strings
The names of the classes in the target, used to create a legend.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
alpha : float, default: 0.7
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
show : bool, default: True
If True, calls ``show()``, which in turn calls ``plt.show()`` however
you cannot call ``plt.savefig`` from this signature, nor
``clear_figure``. If False, simply calls ``finalize()``
kwargs : dict
Pass any additional keyword arguments to the UMAP transformer.
-------
visualizer: UMAPVisualizer
Returns the fitted, finalized visualizer
"""
# Instantiate the visualizer
visualizer = UMAPVisualizer(
ax=ax, classes=classes, colors=colors, colormap=colormap, alpha=alpha, **kwargs
)
# Fit the visualizer (calls draw)
visualizer.fit(X, y, **kwargs)
if show:
visualizer.show()
else:
visualizer.finalize()
# Return the visualizer object
return visualizer
##########################################################################
## UMAPVisualizer
##########################################################################
class UMAPVisualizer(TextVisualizer):
"""
Display a projection of a vectorized corpus in two dimensions using UMAP (Uniform
Manifold Approximation and Projection), a nonlinear dimensionality reduction method
that is particularly well suited to embedding in two or three dimensions for
visualization as a scatter plot. UMAP is a relatively new technique but is often
used to visualize clusters or groups of data points and their relative proximities.
It typically is fast, scalable, and can be applied directly to sparse matrices
eliminating the need to run a ``TruncatedSVD`` as a pre-processing step.
The current default for UMAP is Euclidean distance. Hellinger distance would be a
more appropriate distance function to use with CountVectorize data. That will be
released in a forthcoming version of UMAP. In the meantime cosine distance is likely
a better text default that Euclidean and can be set using the keyword argument
``metric='cosine'``.
For more, see https://github.com/lmcinnes/umap
Parameters
----------
ax : matplotlib axes
The axes to plot the figure on.
labels : list of strings
The names of the classes in the target, used to create a legend.
Labels must match names of classes in sorted order.
colors : list or tuple of colors
Specify the colors for each individual class
colormap : string or matplotlib cmap
Sequential colormap for continuous target
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random. The random state is applied to the preliminary
decomposition as well as UMAP.
alpha : float, default: 0.7
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
kwargs : dict
Pass any additional keyword arguments to the UMAP transformer.
Examples
--------
>>> model = MyVisualizer(metric='cosine')
>>> model.fit(X)
>>> model.show()
"""
# NOTE: cannot be np.nan
NULL_CLASS = None
def __init__(
self,
ax=None,
labels=None,
classes=None,
colors=None,
colormap=None,
random_state=None,
alpha=0.7,
**kwargs
):
if UMAP is None:
raise YellowbrickValueError(
(
"umap package doesn't seem to be installed."
"Please install UMAP via: pip install umap-learn"
)
)
# Visual Parameters
self.alpha = alpha
self.labels = labels
self.colors = colors
self.colormap = colormap
self.random_state = random_state
# Fetch UMAP kwargs from kwargs by popping only keys belonging to UMAP params
umap_kwargs = {
key: kwargs.pop(key) for key in UMAP().get_params() if key in kwargs
}
# UMAP doesn't require any pre-processing before embedding and thus doesn't
# require a pipeline.
self.transformer_ = self.make_transformer(umap_kwargs)
# Call super at the end so that size and title are set correctly
super(UMAPVisualizer, self).__init__(ax=ax, **kwargs)
def make_transformer(self, umap_kwargs={}):
"""
Creates an internal transformer pipeline to project the data set into
2D space using UMAP. This method will reset the transformer on the
class.
Parameters
----------
umap_kwargs : dict
Keyword arguments for the internal UMAP transformer
Returns
-------
transformer : Pipeline
Pipelined transformer for UMAP projections
"""
# Create the pipeline steps
steps = []
# Add the UMAP manifold
steps.append(
(
"umap",
UMAP(n_components=2, random_state=self.random_state, **umap_kwargs),
)
)
# return the pipeline
return Pipeline(steps)
def fit(self, X, y=None, **kwargs):
"""
The fit method is the primary drawing input for the UMAP projection
since the visualization requires both X and an optional y value. The
fit method expects an array of numeric vectors, so text documents must
be vectorized before passing them to this method.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix of n instances with m features representing the corpus of
vectorized documents to visualize with UMAP.
y : ndarray or Series of length n
An optional array or series of target or class values for
instances. If this is specified, then the points will be colored
according to their class. Often cluster labels are passed in to
color the documents in cluster space, so this method is used both
for classification and clustering methods.
kwargs : dict
Pass generic arguments to the drawing method
Returns
-------
self : instance
Returns the instance of the transformer/visualizer
"""
# Store the classes we observed in y
if y is not None:
self.classes_ = np.unique(y)
elif y is None and self.labels is not None:
self.classes_ = np.array([self.labels[0]])
else:
self.classes_ = np.array([self.NULL_CLASS])
# Fit our internal transformer and transform the data.
vecs = self.transformer_.fit_transform(X)
self.n_instances_ = vecs.shape[0]
# Draw the vectors
self.draw(vecs, y, **kwargs)
# Fit always returns self.
return self
def draw(self, points, target=None, **kwargs):
"""
Called from the fit method, this method draws the UMAP scatter plot,
from a set of decomposed points in 2 dimensions. This method also
accepts a third dimension, target, which is used to specify the colors
of each of the points. If the target is not specified, then the points
are plotted as a single cloud to show similar documents.
"""
# Resolve the labels with the classes
labels = self.labels if self.labels is not None else self.classes_
if len(labels) != len(self.classes_):
raise YellowbrickValueError(
(
"number of supplied labels ({}) does not "
"match the number of classes ({})"
).format(len(labels), len(self.classes_))
)
# Create the color mapping for the labels.
self.color_values_ = resolve_colors(
n_colors=len(labels), colormap=self.colormap, colors=self.colors
)
colors = dict(zip(labels, self.color_values_))
# Transform labels into a map of class to label
labels = dict(zip(self.classes_, labels))
# Expand the points into vectors of x and y for scatter plotting,
# assigning them to their label if the label has been passed in.
# Additionally, filter classes not specified directly by the user.
series = defaultdict(lambda: {"x": [], "y": []})
if target is not None:
for t, point in zip(target, points):
label = labels[t]
series[label]["x"].append(point[0])
series[label]["y"].append(point[1])
else:
label = self.classes_[0]
for x, y in points:
series[label]["x"].append(x)
series[label]["y"].append(y)
# Plot the points
for label, points in series.items():
self.ax.scatter(
points["x"], points["y"], c=colors[label], alpha=self.alpha, label=label
)
return self.ax
def finalize(self, **kwargs):
"""
Finalize the drawing by adding a title and legend, and removing the
axes objects that do not convey information about UMAP.
"""
self.set_title("UMAP Projection of {} Documents".format(self.n_instances_))
# Remove the ticks
self.ax.set_yticks([])
self.ax.set_xticks([])
# Add the legend outside of the figure box.
if not all(self.classes_ == np.array([self.NULL_CLASS])):
box = self.ax.get_position()
self.ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
manual_legend(
self,
self.classes_,
self.color_values_,
loc="center left",
bbox_to_anchor=(1, 0.5),
)
| {
"content_hash": "fda4a030c175e0b9baf3f270fa78d89b",
"timestamp": "",
"source": "github",
"line_count": 375,
"max_line_length": 88,
"avg_line_length": 35.138666666666666,
"alnum_prop": 0.611747742278212,
"repo_name": "DistrictDataLabs/yellowbrick",
"id": "276257b6227f931a8ea743ade974ffda46a90609",
"size": "13473",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "yellowbrick/text/umap_vis.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1069"
},
{
"name": "Python",
"bytes": "1612806"
},
{
"name": "TeX",
"bytes": "3743"
}
],
"symlink_target": ""
} |
from django.conf.urls import url
from .views import (EveLogin, EveCallback,
Dashboard, DisplayTopic,
LogOffSafely, Settings,
session_required,
DiscordCallback, DiscordLogin,
CreateTopic, CreateComment)
urlpatterns = [
url(r'^eve_callback/$', EveCallback.as_view(), name='eve_callback'),
url(r'^$', EveLogin.as_view(), name='login'),
url(r'^dashboard/$', session_required(Dashboard.as_view()),
name='dashboard'),
url(r'^logout/$', LogOffSafely.as_view(), name='logout'),
url(r'^settings/$', session_required(Settings.as_view()), name='settings'),
url(r'^dashboard/(?P<topic_slug>[a-zA-Z0-9]+)/$', session_required(
DisplayTopic.as_view()),
name='topic'),
url(r'^discord_login/$', session_required(DiscordLogin.as_view()),
name='discord_login'),
url(r'^discord_callback/$', DiscordCallback.as_view(),
name='discord_callback'),
url(r'^create_topic/$', session_required(CreateTopic.as_view()),
name='create_topic'),
url(r'^create_comment/$', session_required(CreateComment.as_view()),
name='create_comment')
]
| {
"content_hash": "d959bad68b52934cf1904ce378b1109b",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 79,
"avg_line_length": 38.935483870967744,
"alnum_prop": 0.6006628003314002,
"repo_name": "prusya/midas22",
"id": "868ba81037ffae103672c2c0c68cdd0ebc152a5c",
"size": "1207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eve_sso/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2977"
},
{
"name": "HTML",
"bytes": "19296"
},
{
"name": "Python",
"bytes": "47215"
}
],
"symlink_target": ""
} |
"""
The wrapper of the RHN Satellite/Spacewalk's mod_python handler
adding the coverage collecting functionality.
"""
from spacewalkcoverage.spacewalkcoverage import SpacewalkCoverage
# Import the decorated server
import spacewalk.server.apacheServer
cov = SpacewalkCoverage()
def HeaderParserHandler(req):
cov.start()
response = spacewalk.server.apacheServer.HeaderParserHandler(req)
cov.stop()
return response
def Handler(req):
cov.start()
response = spacewalk.server.apacheServer.Handler(req)
cov.stop()
return response
def CleanupHandler(req):
cov.start()
response = spacewalk.server.apacheServer.CleanupHandler(req)
cov.stop()
return response
# Keep log handler intact
LogHandler = spacewalk.server.apacheServer.LogHandler
| {
"content_hash": "3ffc193c505dffdaca56dad8254a2d9f",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 69,
"avg_line_length": 23.08823529411765,
"alnum_prop": 0.7605095541401274,
"repo_name": "vlki/spacewalk-xmlrpc-tests",
"id": "47005ae9b66faf157080b04244bfee298399f273",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coverage/python_lib/spacewalkcoverage/server/apacheServer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26746"
},
{
"name": "Shell",
"bytes": "30730"
}
],
"symlink_target": ""
} |
import unittest
from unittest import TestCase
from contextlib import contextmanager
from posix import stat_result, statvfs_result
import array
from swift.common import ring, utils
from shutil import rmtree
import os
import mock
from swift import __version__ as swiftver
from swift.common.swob import Request
from swift.common.middleware import recon
def fake_check_mount(a, b):
raise OSError('Input/Output Error')
class FakeApp(object):
def __call__(self, env, start_response):
return "FAKE APP"
def start_response(*args):
pass
class FakeFromCache(object):
def __init__(self, out=None):
self.fakeout = out
self.fakeout_calls = []
def fake_from_recon_cache(self, *args, **kwargs):
self.fakeout_calls.append((args, kwargs))
return self.fakeout
class OpenAndReadTester(object):
def __init__(self, output_iter):
self.index = 0
self.out_len = len(output_iter) - 1
self.data = output_iter
self.output_iter = iter(output_iter)
self.read_calls = []
self.open_calls = []
def __iter__(self):
return self
def next(self):
if self.index == self.out_len:
raise StopIteration
else:
line = self.data[self.index]
self.index += 1
return line
def read(self, *args, **kwargs):
self.read_calls.append((args, kwargs))
try:
return self.output_iter.next()
except StopIteration:
return ''
@contextmanager
def open(self, *args, **kwargs):
self.open_calls.append((args, kwargs))
yield self
class MockOS(object):
def __init__(self, ls_out=None, im_out=False, statvfs_out=None):
self.ls_output = ls_out
self.ismount_output = im_out
self.statvfs_output = statvfs_out
self.listdir_calls = []
self.statvfs_calls = []
self.ismount_calls = []
def fake_listdir(self, *args, **kwargs):
self.listdir_calls.append((args, kwargs))
return self.ls_output
def fake_ismount(self, *args, **kwargs):
self.ismount_calls.append((args, kwargs))
if isinstance(self.ismount_output, Exception):
raise self.ismount_output
else:
return self.ismount_output
def fake_statvfs(self, *args, **kwargs):
self.statvfs_calls.append((args, kwargs))
return statvfs_result(self.statvfs_output)
class FakeRecon(object):
def __init__(self):
self.fake_replication_rtype = None
self.fake_updater_rtype = None
self.fake_auditor_rtype = None
self.fake_expirer_rtype = None
def fake_mem(self):
return {'memtest': "1"}
def fake_load(self):
return {'loadtest': "1"}
def fake_async(self):
return {'asynctest': "1"}
def fake_get_device_info(self):
return {"/srv/1/node": ["sdb1"]}
def fake_replication(self, recon_type):
self.fake_replication_rtype = recon_type
return {'replicationtest': "1"}
def fake_updater(self, recon_type):
self.fake_updater_rtype = recon_type
return {'updatertest': "1"}
def fake_auditor(self, recon_type):
self.fake_auditor_rtype = recon_type
return {'auditortest': "1"}
def fake_expirer(self, recon_type):
self.fake_expirer_rtype = recon_type
return {'expirertest': "1"}
def fake_mounted(self):
return {'mountedtest': "1"}
def fake_unmounted(self):
return {'unmountedtest': "1"}
def fake_no_unmounted(self):
return []
def fake_diskusage(self):
return {'diskusagetest': "1"}
def fake_ringmd5(self):
return {'ringmd5test': "1"}
def fake_swiftconfmd5(self):
return {'/etc/swift/swift.conf': "abcdef"}
def fake_quarantined(self):
return {'quarantinedtest': "1"}
def fake_sockstat(self):
return {'sockstattest': "1"}
def nocontent(self):
return None
def raise_IOError(self, *args, **kwargs):
raise IOError
def raise_ValueError(self, *args, **kwargs):
raise ValueError
def raise_Exception(self, *args, **kwargs):
raise Exception
class TestReconSuccess(TestCase):
def setUp(self):
# can't use mkdtemp here as 2.6 gzip puts the filename in the header
# which will cause ring md5 checks to fail
self.tempdir = '/tmp/swift_recon_md5_test'
utils.mkdirs(self.tempdir)
self._create_rings()
self.app = recon.ReconMiddleware(FakeApp(),
{'swift_dir': self.tempdir})
self.mockos = MockOS()
self.fakecache = FakeFromCache()
self.real_listdir = os.listdir
self.real_ismount = utils.ismount
self.real_statvfs = os.statvfs
os.listdir = self.mockos.fake_listdir
utils.ismount = self.mockos.fake_ismount
os.statvfs = self.mockos.fake_statvfs
self.real_from_cache = self.app._from_recon_cache
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
self.frecon = FakeRecon()
def tearDown(self):
os.listdir = self.real_listdir
utils.ismount = self.real_ismount
os.statvfs = self.real_statvfs
del self.mockos
self.app._from_recon_cache = self.real_from_cache
del self.fakecache
rmtree(self.tempdir)
def _create_rings(self):
def fake_time():
return 0
def fake_base(fname):
# least common denominator with gzip versions is to
# not use the .gz extension in the gzip header
return fname[:-3]
accountgz = os.path.join(self.tempdir, 'account.ring.gz')
containergz = os.path.join(self.tempdir, 'container.ring.gz')
objectgz = os.path.join(self.tempdir, 'object.ring.gz')
objectgz_1 = os.path.join(self.tempdir, 'object-1.ring.gz')
objectgz_2 = os.path.join(self.tempdir, 'object-2.ring.gz')
# make the rings unique so they have different md5 sums
intended_replica2part2dev_id_a = [
array.array('H', [3, 1, 3, 1]),
array.array('H', [0, 3, 1, 4]),
array.array('H', [1, 4, 0, 3])]
intended_replica2part2dev_id_c = [
array.array('H', [4, 3, 0, 1]),
array.array('H', [0, 1, 3, 4]),
array.array('H', [3, 4, 0, 1])]
intended_replica2part2dev_id_o = [
array.array('H', [0, 1, 0, 1]),
array.array('H', [0, 1, 0, 1]),
array.array('H', [3, 4, 3, 4])]
intended_replica2part2dev_id_o_1 = [
array.array('H', [1, 0, 1, 0]),
array.array('H', [1, 0, 1, 0]),
array.array('H', [4, 3, 4, 3])]
intended_replica2part2dev_id_o_2 = [
array.array('H', [1, 1, 1, 0]),
array.array('H', [1, 0, 1, 3]),
array.array('H', [4, 2, 4, 3])]
intended_devs = [{'id': 0, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'device': 'sda1'},
{'id': 1, 'zone': 0, 'weight': 1.0,
'ip': '10.1.1.1', 'port': 6000,
'device': 'sdb1'},
None,
{'id': 3, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.1', 'port': 6000,
'device': 'sdc1'},
{'id': 4, 'zone': 2, 'weight': 1.0,
'ip': '10.1.2.2', 'port': 6000,
'device': 'sdd1'}]
# eliminate time from the equation as gzip 2.6 includes
# it in the header resulting in md5 file mismatch, also
# have to mock basename as one version uses it, one doesn't
with mock.patch("time.time", fake_time):
with mock.patch("os.path.basename", fake_base):
ring.RingData(intended_replica2part2dev_id_a,
intended_devs, 5).save(accountgz, mtime=None)
ring.RingData(intended_replica2part2dev_id_c,
intended_devs, 5).save(containergz, mtime=None)
ring.RingData(intended_replica2part2dev_id_o,
intended_devs, 5).save(objectgz, mtime=None)
ring.RingData(intended_replica2part2dev_id_o_1,
intended_devs, 5).save(objectgz_1, mtime=None)
ring.RingData(intended_replica2part2dev_id_o_2,
intended_devs, 5).save(objectgz_2, mtime=None)
def test_get_ring_md5(self):
def fake_open(self, f):
raise IOError
expt_out = {'%s/account.ring.gz' % self.tempdir:
'd288bdf39610e90d4f0b67fa00eeec4f',
'%s/container.ring.gz' % self.tempdir:
'9a5a05a8a4fbbc61123de792dbe4592d',
'%s/object-1.ring.gz' % self.tempdir:
'3f1899b27abf5f2efcc67d6fae1e1c64',
'%s/object-2.ring.gz' % self.tempdir:
'8f0e57079b3c245d9b3d5a428e9312ee',
'%s/object.ring.gz' % self.tempdir:
'da02bfbd0bf1e7d56faea15b6fe5ab1e'}
self.assertEquals(sorted(self.app.get_ring_md5().items()),
sorted(expt_out.items()))
# cover error path
self.app.get_ring_md5(openr=fake_open)
def test_from_recon_cache(self):
oart = OpenAndReadTester(['{"notneeded": 5, "testkey1": "canhazio"}'])
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('test.cache', 'r'), {})])
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': 'canhazio'})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_ioerror(self):
oart = self.frecon.raise_IOError
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_valueerror(self):
oart = self.frecon.raise_ValueError
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_from_recon_cache_exception(self):
oart = self.frecon.raise_Exception
self.app._from_recon_cache = self.real_from_cache
rv = self.app._from_recon_cache(['testkey1', 'notpresentkey'],
'test.cache', openr=oart)
self.assertEquals(rv, {'notpresentkey': None, 'testkey1': None})
self.app._from_recon_cache = self.fakecache.fake_from_recon_cache
def test_get_mounted(self):
mounts_content = [
'rootfs / rootfs rw 0 0',
'none /sys sysfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /proc proc rw,nosuid,nodev,noexec,relatime 0 0',
'none /dev devtmpfs rw,relatime,size=248404k,nr_inodes=62101,'
'mode=755 0 0',
'none /dev/pts devpts rw,nosuid,noexec,relatime,gid=5,mode=620,'
'ptmxmode=000 0 0',
'/dev/disk/by-uuid/e5b143bd-9f31-49a7-b018-5e037dc59252 / ext4'
' rw,relatime,errors=remount-ro,barrier=1,data=ordered 0 0',
'none /sys/fs/fuse/connections fusectl rw,relatime 0 0',
'none /sys/kernel/debug debugfs rw,relatime 0 0',
'none /sys/kernel/security securityfs rw,relatime 0 0',
'none /dev/shm tmpfs rw,nosuid,nodev,relatime 0 0',
'none /var/run tmpfs rw,nosuid,relatime,mode=755 0 0',
'none /var/lock tmpfs rw,nosuid,nodev,noexec,relatime 0 0',
'none /lib/init/rw tmpfs rw,nosuid,relatime,mode=755 0 0',
'/dev/loop0 /mnt/sdb1 xfs rw,noatime,nodiratime,attr2,nobarrier,'
'logbufs=8,noquota 0 0',
'rpc_pipefs /var/lib/nfs/rpc_pipefs rpc_pipefs rw,relatime 0 0',
'nfsd /proc/fs/nfsd nfsd rw,relatime 0 0',
'none /proc/fs/vmblock/mountPoint vmblock rw,relatime 0 0',
'']
mounted_resp = [
{'device': 'rootfs', 'path': '/'},
{'device': 'none', 'path': '/sys'},
{'device': 'none', 'path': '/proc'},
{'device': 'none', 'path': '/dev'},
{'device': 'none', 'path': '/dev/pts'},
{'device': '/dev/disk/by-uuid/'
'e5b143bd-9f31-49a7-b018-5e037dc59252', 'path': '/'},
{'device': 'none', 'path': '/sys/fs/fuse/connections'},
{'device': 'none', 'path': '/sys/kernel/debug'},
{'device': 'none', 'path': '/sys/kernel/security'},
{'device': 'none', 'path': '/dev/shm'},
{'device': 'none', 'path': '/var/run'},
{'device': 'none', 'path': '/var/lock'},
{'device': 'none', 'path': '/lib/init/rw'},
{'device': '/dev/loop0', 'path': '/mnt/sdb1'},
{'device': 'rpc_pipefs', 'path': '/var/lib/nfs/rpc_pipefs'},
{'device': 'nfsd', 'path': '/proc/fs/nfsd'},
{'device': 'none', 'path': '/proc/fs/vmblock/mountPoint'}]
oart = OpenAndReadTester(mounts_content)
rv = self.app.get_mounted(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/mounts', 'r'), {})])
self.assertEquals(rv, mounted_resp)
def test_get_load(self):
oart = OpenAndReadTester(['0.03 0.03 0.00 1/220 16306'])
rv = self.app.get_load(openr=oart.open)
self.assertEquals(oart.read_calls, [((), {})])
self.assertEquals(oart.open_calls, [(('/proc/loadavg', 'r'), {})])
self.assertEquals(rv, {'5m': 0.029999999999999999, '15m': 0.0,
'processes': 16306, 'tasks': '1/220',
'1m': 0.029999999999999999})
def test_get_mem(self):
meminfo_content = ['MemTotal: 505840 kB',
'MemFree: 26588 kB',
'Buffers: 44948 kB',
'Cached: 146376 kB',
'SwapCached: 14736 kB',
'Active: 194900 kB',
'Inactive: 193412 kB',
'Active(anon): 94208 kB',
'Inactive(anon): 102848 kB',
'Active(file): 100692 kB',
'Inactive(file): 90564 kB',
'Unevictable: 0 kB',
'Mlocked: 0 kB',
'SwapTotal: 407544 kB',
'SwapFree: 313436 kB',
'Dirty: 104 kB',
'Writeback: 0 kB',
'AnonPages: 185268 kB',
'Mapped: 9592 kB',
'Shmem: 68 kB',
'Slab: 61716 kB',
'SReclaimable: 46620 kB',
'SUnreclaim: 15096 kB',
'KernelStack: 1760 kB',
'PageTables: 8832 kB',
'NFS_Unstable: 0 kB',
'Bounce: 0 kB',
'WritebackTmp: 0 kB',
'CommitLimit: 660464 kB',
'Committed_AS: 565608 kB',
'VmallocTotal: 34359738367 kB',
'VmallocUsed: 266724 kB',
'VmallocChunk: 34359467156 kB',
'HardwareCorrupted: 0 kB',
'HugePages_Total: 0',
'HugePages_Free: 0',
'HugePages_Rsvd: 0',
'HugePages_Surp: 0',
'Hugepagesize: 2048 kB',
'DirectMap4k: 10240 kB',
'DirectMap2M: 514048 kB',
'']
meminfo_resp = {'WritebackTmp': '0 kB',
'SwapTotal': '407544 kB',
'Active(anon)': '94208 kB',
'SwapFree': '313436 kB',
'DirectMap4k': '10240 kB',
'KernelStack': '1760 kB',
'MemFree': '26588 kB',
'HugePages_Rsvd': '0',
'Committed_AS': '565608 kB',
'Active(file)': '100692 kB',
'NFS_Unstable': '0 kB',
'VmallocChunk': '34359467156 kB',
'Writeback': '0 kB',
'Inactive(file)': '90564 kB',
'MemTotal': '505840 kB',
'VmallocUsed': '266724 kB',
'HugePages_Free': '0',
'AnonPages': '185268 kB',
'Active': '194900 kB',
'Inactive(anon)': '102848 kB',
'CommitLimit': '660464 kB',
'Hugepagesize': '2048 kB',
'Cached': '146376 kB',
'SwapCached': '14736 kB',
'VmallocTotal': '34359738367 kB',
'Shmem': '68 kB',
'Mapped': '9592 kB',
'SUnreclaim': '15096 kB',
'Unevictable': '0 kB',
'SReclaimable': '46620 kB',
'Mlocked': '0 kB',
'DirectMap2M': '514048 kB',
'HugePages_Surp': '0',
'Bounce': '0 kB',
'Inactive': '193412 kB',
'PageTables': '8832 kB',
'HardwareCorrupted': '0 kB',
'HugePages_Total': '0',
'Slab': '61716 kB',
'Buffers': '44948 kB',
'Dirty': '104 kB'}
oart = OpenAndReadTester(meminfo_content)
rv = self.app.get_mem(openr=oart.open)
self.assertEquals(oart.open_calls, [(('/proc/meminfo', 'r'), {})])
self.assertEquals(rv, meminfo_resp)
def test_get_async_info(self):
from_cache_response = {'async_pending': 5}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_async_info()
self.assertEquals(rv, {'async_pending': 5})
def test_get_replication_info_account(self):
from_cache_response = {
"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0},
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25}
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('account')
self.assertEquals(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
'/var/cache/swift/account.recon'), {})])
self.assertEquals(rv, {
"replication_stats": {
"attempted": 1, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 2, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 1333044050.855202,
"success": 2, "ts_repl": 0},
"replication_time": 0.2615511417388916,
"replication_last": 1357969645.25})
def test_get_replication_info_container(self):
from_cache_response = {
"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('container')
self.assertEquals(self.fakecache.fakeout_calls,
[((['replication_time', 'replication_stats',
'replication_last'],
'/var/cache/swift/container.recon'), {})])
self.assertEquals(rv, {
"replication_time": 200.0,
"replication_stats": {
"attempted": 179, "diff": 0,
"diff_capped": 0, "empty": 0,
"failure": 0, "hashmatch": 0,
"no_change": 358, "remote_merge": 0,
"remove": 0, "rsync": 0,
"start": 5.5, "success": 358,
"ts_repl": 0},
"replication_last": 1357969645.25})
def test_get_replication_object(self):
from_cache_response = {"object_replication_time": 200.0,
"object_replication_last": 1357962809.15}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_replication_info('object')
self.assertEquals(self.fakecache.fakeout_calls,
[((['object_replication_time',
'object_replication_last'],
'/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {'object_replication_time': 200.0,
'object_replication_last': 1357962809.15})
def test_get_updater_info_container(self):
from_cache_response = {"container_updater_sweep": 18.476239919662476}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('container')
self.assertEquals(self.fakecache.fakeout_calls,
[((['container_updater_sweep'],
'/var/cache/swift/container.recon'), {})])
self.assertEquals(rv, {"container_updater_sweep": 18.476239919662476})
def test_get_updater_info_object(self):
from_cache_response = {"object_updater_sweep": 0.79848217964172363}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_updater_info('object')
self.assertEquals(self.fakecache.fakeout_calls,
[((['object_updater_sweep'],
'/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {"object_updater_sweep": 0.79848217964172363})
def test_get_auditor_info_account(self):
from_cache_response = {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('account')
self.assertEquals(self.fakecache.fakeout_calls,
[((['account_audits_passed',
'account_auditor_pass_completed',
'account_audits_since',
'account_audits_failed'],
'/var/cache/swift/account.recon'), {})])
self.assertEquals(rv, {"account_auditor_pass_completed": 0.24,
"account_audits_failed": 0,
"account_audits_passed": 6,
"account_audits_since": "1333145374.1373529"})
def test_get_auditor_info_container(self):
from_cache_response = {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('container')
self.assertEquals(self.fakecache.fakeout_calls,
[((['container_audits_passed',
'container_auditor_pass_completed',
'container_audits_since',
'container_audits_failed'],
'/var/cache/swift/container.recon'), {})])
self.assertEquals(rv, {"container_auditor_pass_completed": 0.24,
"container_audits_failed": 0,
"container_audits_passed": 6,
"container_audits_since": "1333145374.1373529"})
def test_get_auditor_info_object(self):
from_cache_response = {
"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('object')
self.assertEquals(self.fakecache.fakeout_calls,
[((['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
'/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {
"object_auditor_stats_ALL": {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
"object_auditor_stats_ZBF": {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}})
def test_get_auditor_info_object_parallel_once(self):
from_cache_response = {
"object_auditor_stats_ALL": {
'disk1': {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
'disk2': {
"audit_time": 115,
"bytes_processed": 234660,
"completed": 115,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}},
"object_auditor_stats_ZBF": {'disk1disk2': {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}}
self.fakecache.fakeout_calls = []
self.fakecache.fakeout = from_cache_response
rv = self.app.get_auditor_info('object')
self.assertEquals(self.fakecache.fakeout_calls,
[((['object_auditor_stats_ALL',
'object_auditor_stats_ZBF'],
'/var/cache/swift/object.recon'), {})])
self.assertEquals(rv, {
"object_auditor_stats_ALL": {
'disk1': {
"audit_time": 115.14418768882751,
"bytes_processed": 234660,
"completed": 115.4512460231781,
"errors": 0,
"files_processed": 2310,
"quarantined": 0},
'disk2': {
"audit_time": 115,
"bytes_processed": 234660,
"completed": 115,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}},
"object_auditor_stats_ZBF": {'disk1disk2': {
"audit_time": 45.877294063568115,
"bytes_processed": 0,
"completed": 46.181446075439453,
"errors": 0,
"files_processed": 2310,
"quarantined": 0}}})
def test_get_unmounted(self):
unmounted_resp = [{'device': 'fakeone', 'mounted': False},
{'device': 'faketwo', 'mounted': False}]
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEquals(rv, unmounted_resp)
def test_get_unmounted_everything_normal(self):
unmounted_resp = []
self.mockos.ls_output = ['fakeone', 'faketwo']
self.mockos.ismount_output = True
rv = self.app.get_unmounted()
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEquals(rv, unmounted_resp)
def test_get_unmounted_checkmount_fail(self):
unmounted_resp = [{'device': 'fakeone', 'mounted': 'brokendrive'}]
self.mockos.ls_output = ['fakeone']
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_unmounted()
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEquals(self.mockos.ismount_calls,
[(('/srv/node/fakeone',), {})])
self.assertEquals(rv, unmounted_resp)
def test_no_get_unmounted(self):
def fake_checkmount_true(*args):
return True
unmounted_resp = []
self.mockos.ls_output = []
self.mockos.ismount_output = False
rv = self.app.get_unmounted()
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEquals(rv, unmounted_resp)
def test_get_diskusage(self):
#posix.statvfs_result(f_bsize=4096, f_frsize=4096, f_blocks=1963185,
# f_bfree=1113075, f_bavail=1013351,
# f_files=498736,
# f_ffree=397839, f_favail=397839, f_flag=0,
# f_namemax=255)
statvfs_content = (4096, 4096, 1963185, 1113075, 1013351, 498736,
397839, 397839, 0, 255)
du_resp = [{'device': 'canhazdrive1', 'avail': 4150685696,
'mounted': True, 'used': 3890520064, 'size': 8041205760}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.statvfs_output = statvfs_content
self.mockos.ismount_output = True
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.statvfs_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
def test_get_diskusage_checkmount_fail(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'brokendrive', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
self.mockos.ismount_output = OSError('brokendrive')
rv = self.app.get_diskusage()
self.assertEquals(self.mockos.listdir_calls, [(('/srv/node',), {})])
self.assertEquals(self.mockos.ismount_calls,
[(('/srv/node/canhazdrive1',), {})])
self.assertEquals(rv, du_resp)
@mock.patch("swift.common.middleware.recon.check_mount", fake_check_mount)
def test_get_diskusage_oserror(self):
du_resp = [{'device': 'canhazdrive1', 'avail': '',
'mounted': 'Input/Output Error', 'used': '', 'size': ''}]
self.mockos.ls_output = ['canhazdrive1']
rv = self.app.get_diskusage()
self.assertEquals(rv, du_resp)
def test_get_quarantine_count(self):
dirs = [['sda'], ['accounts', 'containers', 'objects', 'objects-1']]
self.mockos.ismount_output = True
def fake_lstat(*args, **kwargs):
#posix.lstat_result(st_mode=1, st_ino=2, st_dev=3, st_nlink=4,
# st_uid=5, st_gid=6, st_size=7, st_atime=8,
# st_mtime=9, st_ctime=10)
return stat_result((1, 2, 3, 4, 5, 6, 7, 8, 9, 10))
def fake_exists(*args, **kwargs):
return True
def fake_listdir(*args, **kwargs):
return dirs.pop(0)
with mock.patch("os.lstat", fake_lstat):
with mock.patch("os.path.exists", fake_exists):
with mock.patch("os.listdir", fake_listdir):
rv = self.app.get_quarantine_count()
self.assertEquals(rv, {'objects': 4, 'accounts': 2, 'policies':
{'1': {'objects': 2}, '0': {'objects': 2}},
'containers': 2})
def test_get_socket_info(self):
sockstat_content = ['sockets: used 271',
'TCP: inuse 30 orphan 0 tw 0 alloc 31 mem 0',
'UDP: inuse 16 mem 4', 'UDPLITE: inuse 0',
'RAW: inuse 0', 'FRAG: inuse 0 memory 0',
'']
oart = OpenAndReadTester(sockstat_content)
self.app.get_socket_info(openr=oart.open)
self.assertEquals(oart.open_calls, [
(('/proc/net/sockstat', 'r'), {}),
(('/proc/net/sockstat6', 'r'), {})])
class TestReconMiddleware(unittest.TestCase):
def fake_list(self, path):
return ['a', 'b']
def setUp(self):
self.frecon = FakeRecon()
self.real_listdir = os.listdir
os.listdir = self.fake_list
self.app = recon.ReconMiddleware(FakeApp(), {'object_recon': "true"})
os.listdir = self.real_listdir
#self.app.object_recon = True
self.app.get_mem = self.frecon.fake_mem
self.app.get_load = self.frecon.fake_load
self.app.get_async_info = self.frecon.fake_async
self.app.get_device_info = self.frecon.fake_get_device_info
self.app.get_replication_info = self.frecon.fake_replication
self.app.get_auditor_info = self.frecon.fake_auditor
self.app.get_updater_info = self.frecon.fake_updater
self.app.get_expirer_info = self.frecon.fake_expirer
self.app.get_mounted = self.frecon.fake_mounted
self.app.get_unmounted = self.frecon.fake_unmounted
self.app.get_diskusage = self.frecon.fake_diskusage
self.app.get_ring_md5 = self.frecon.fake_ringmd5
self.app.get_swift_conf_md5 = self.frecon.fake_swiftconfmd5
self.app.get_quarantine_count = self.frecon.fake_quarantined
self.app.get_socket_info = self.frecon.fake_sockstat
def test_recon_get_mem(self):
get_mem_resp = ['{"memtest": "1"}']
req = Request.blank('/recon/mem', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_mem_resp)
def test_recon_get_version(self):
req = Request.blank('/recon/version',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, [utils.json.dumps({'version': swiftver})])
def test_recon_get_load(self):
get_load_resp = ['{"loadtest": "1"}']
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_load_resp)
def test_recon_get_async(self):
get_async_resp = ['{"asynctest": "1"}']
req = Request.blank('/recon/async', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_async_resp)
def test_get_device_info(self):
get_device_resp = ['{"/srv/1/node": ["sdb1"]}']
req = Request.blank('/recon/devices',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_device_resp)
def test_recon_get_replication_notype(self):
get_replication_resp = ['{"replicationtest": "1"}']
req = Request.blank('/recon/replication',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_replication_all(self):
get_replication_resp = ['{"replicationtest": "1"}']
#test account
req = Request.blank('/recon/replication/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'account')
self.frecon.fake_replication_rtype = None
#test container
req = Request.blank('/recon/replication/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'container')
self.frecon.fake_replication_rtype = None
#test object
req = Request.blank('/recon/replication/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_replication_resp)
self.assertEquals(self.frecon.fake_replication_rtype, 'object')
self.frecon.fake_replication_rtype = None
def test_recon_get_auditor_invalid(self):
get_auditor_resp = ['Invalid path: /recon/auditor/invalid']
req = Request.blank('/recon/auditor/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
def test_recon_get_auditor_notype(self):
get_auditor_resp = ['Invalid path: /recon/auditor']
req = Request.blank('/recon/auditor',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
def test_recon_get_auditor_all(self):
get_auditor_resp = ['{"auditortest": "1"}']
req = Request.blank('/recon/auditor/account',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
self.assertEquals(self.frecon.fake_auditor_rtype, 'account')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
self.assertEquals(self.frecon.fake_auditor_rtype, 'container')
self.frecon.fake_auditor_rtype = None
req = Request.blank('/recon/auditor/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_auditor_resp)
self.assertEquals(self.frecon.fake_auditor_rtype, 'object')
self.frecon.fake_auditor_rtype = None
def test_recon_get_updater_invalid(self):
get_updater_resp = ['Invalid path: /recon/updater/invalid']
req = Request.blank('/recon/updater/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_updater_notype(self):
get_updater_resp = ['Invalid path: /recon/updater']
req = Request.blank('/recon/updater',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_updater(self):
get_updater_resp = ['{"updatertest": "1"}']
req = Request.blank('/recon/updater/container',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(self.frecon.fake_updater_rtype, 'container')
self.frecon.fake_updater_rtype = None
self.assertEquals(resp, get_updater_resp)
req = Request.blank('/recon/updater/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
self.assertEquals(self.frecon.fake_updater_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_expirer_invalid(self):
get_updater_resp = ['Invalid path: /recon/expirer/invalid']
req = Request.blank('/recon/expirer/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_expirer_notype(self):
get_updater_resp = ['Invalid path: /recon/expirer']
req = Request.blank('/recon/expirer',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_updater_resp)
def test_recon_get_expirer_object(self):
get_expirer_resp = ['{"expirertest": "1"}']
req = Request.blank('/recon/expirer/object',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_expirer_resp)
self.assertEquals(self.frecon.fake_expirer_rtype, 'object')
self.frecon.fake_updater_rtype = None
def test_recon_get_mounted(self):
get_mounted_resp = ['{"mountedtest": "1"}']
req = Request.blank('/recon/mounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_mounted_resp)
def test_recon_get_unmounted(self):
get_unmounted_resp = ['{"unmountedtest": "1"}']
self.app.get_unmounted = self.frecon.fake_unmounted
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_unmounted_resp)
def test_recon_no_get_unmounted(self):
get_unmounted_resp = '[]'
self.app.get_unmounted = self.frecon.fake_no_unmounted
req = Request.blank('/recon/unmounted',
environ={'REQUEST_METHOD': 'GET'})
resp = ''.join(self.app(req.environ, start_response))
self.assertEquals(resp, get_unmounted_resp)
def test_recon_get_diskusage(self):
get_diskusage_resp = ['{"diskusagetest": "1"}']
req = Request.blank('/recon/diskusage',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_diskusage_resp)
def test_recon_get_ringmd5(self):
get_ringmd5_resp = ['{"ringmd5test": "1"}']
req = Request.blank('/recon/ringmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_ringmd5_resp)
def test_recon_get_swiftconfmd5(self):
get_swiftconfmd5_resp = ['{"/etc/swift/swift.conf": "abcdef"}']
req = Request.blank('/recon/swiftconfmd5',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_swiftconfmd5_resp)
def test_recon_get_quarantined(self):
get_quarantined_resp = ['{"quarantinedtest": "1"}']
req = Request.blank('/recon/quarantined',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_quarantined_resp)
def test_recon_get_sockstat(self):
get_sockstat_resp = ['{"sockstattest": "1"}']
req = Request.blank('/recon/sockstat',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, get_sockstat_resp)
def test_recon_invalid_path(self):
req = Request.blank('/recon/invalid',
environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, ['Invalid path: /recon/invalid'])
def test_no_content(self):
self.app.get_load = self.frecon.nocontent
req = Request.blank('/recon/load', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, ['Internal server error.'])
def test_recon_pass(self):
req = Request.blank('/', environ={'REQUEST_METHOD': 'GET'})
resp = self.app(req.environ, start_response)
self.assertEquals(resp, 'FAKE APP')
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "d69b049f64f467d5208614a78da74fcf",
"timestamp": "",
"source": "github",
"line_count": 1073,
"max_line_length": 79,
"avg_line_length": 44.071761416589005,
"alnum_prop": 0.5246040305356425,
"repo_name": "dpgoetz/swift",
"id": "66e97c3088f18fcfe469fd8e51391f8187b2bba3",
"size": "47884",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/unit/common/middleware/test_recon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "3611"
},
{
"name": "Gettext Catalog",
"bytes": "67525"
},
{
"name": "Modelica",
"bytes": "17"
},
{
"name": "Python",
"bytes": "5402632"
},
{
"name": "Shell",
"bytes": "950"
}
],
"symlink_target": ""
} |
import logging
from collections import defaultdict
import networkx
from .. import SIM_PROCEDURES
from .. import options as o
from ..knowledge_base import KnowledgeBase
from ..errors import AngrError, AngrCFGError
from ..manager import SimulationManager
from ..misc.graph import shallow_reverse
from . import Analysis, register_analysis
l = logging.getLogger("angr.analyses.veritesting")
class VeritestingError(Exception):
pass
class CallTracingFilter(object):
"""
Filter to apply during CFG creation on a given state and jumpkind to determine if it should be skipped at a certain
depth
"""
whitelist = {
SIM_PROCEDURES['cgc']['receive'],
SIM_PROCEDURES['cgc']['transmit'],
SIM_PROCEDURES['posix']['read'],
}
cfg_cache = { }
def __init__(self, project, depth, blacklist=None):
self.project = project
self.blacklist = [ ] if blacklist is None else blacklist
self._skipped_targets = set()
self.depth = depth
def filter(self, call_target_state, jumpkind):
"""
The call will be skipped if it returns True.
:param call_target_state: The new state of the call target.
:param jumpkind: The Jumpkind of this call.
:returns: True if we want to skip this call, False otherwise.
"""
ACCEPT = False
REJECT = True
l.debug('Filtering calling target %s', call_target_state.ip)
# Currently we always skip the call, unless the target function satisfies one of the following conditions:
# 1) It's a SimProcedure that are in the whitelist
# 2) It's a function that has no loops, and no calls/syscalls,
# 3) It's a function that has no loops, and only has calls to another function that will not be filtered out by
# this filter
# Generate a CFG
ip = call_target_state.ip
if self.depth >= 5:
l.debug('Rejecting target %s - too deep, depth is %d', ip, self.depth)
return REJECT
try:
addr = call_target_state.se.eval_one(ip)
except (SimValueError, SimSolverModeError):
self._skipped_targets.add(-1)
l.debug('Rejecting target %s - cannot be concretized', ip)
return REJECT
# Is it in our blacklist?
if addr in self.blacklist:
self._skipped_targets.add(addr)
l.debug('Rejecting target 0x%x - blacklisted', addr)
return REJECT
# If the target is a SimProcedure, is it on our whitelist?
if self.project.is_hooked(addr) and type(self.project._sim_procedures[addr][0]) in CallTracingFilter.whitelist:
# accept!
l.debug('Accepting target 0x%x, jumpkind %s', addr, jumpkind)
return ACCEPT
# If it's a syscall, let's see if the real syscall is inside our whitelist
if jumpkind.startswith('Ijk_Sys'):
call_target_state.history.jumpkind = jumpkind
successors_ = self.project.factory.successors(call_target_state)
try:
next_run = successors_.artifacts['procedure']
except KeyError:
l.warning('CallTracingFilter.filter(): except artifacts[\'procedure\'] in %s. Reject.', successors_)
return REJECT
if type(next_run) in CallTracingFilter.whitelist:
# accept!
l.debug('Accepting target 0x%x, jumpkind %s', addr, jumpkind)
return ACCEPT
else:
# reject
l.debug('Rejecting target 0x%x - syscall %s not in whitelist', addr, type(next_run))
return REJECT
cfg_key = (addr, jumpkind)
if cfg_key not in self.cfg_cache:
new_blacklist = self.blacklist[ :: ]
new_blacklist.append(addr)
tracing_filter = CallTracingFilter(self.project, depth=self.depth + 1, blacklist=new_blacklist)
cfg = self.project.analyses.CFGAccurate(starts=((addr, jumpkind),),
initial_state=call_target_state,
context_sensitivity_level=0,
call_depth=1,
call_tracing_filter=tracing_filter.filter,
normalize=True,
kb=KnowledgeBase(self.project, self.project.loader.main_object)
)
self.cfg_cache[cfg_key] = (cfg, tracing_filter)
try:
cfg.force_unroll_loops(1)
except AngrCFGError:
# Exceptions occurred during loop unrolling
# reject
l.debug('Rejecting target %#x - loop unrolling failed', addr)
return REJECT
else:
l.debug('Loading CFG from CFG cache')
cfg, tracing_filter = self.cfg_cache[cfg_key]
if cfg._loop_back_edges:
# It has loops!
self._skipped_targets.add(addr)
l.debug('Rejecting target 0x%x - it has loops', addr)
return REJECT
sim_procedures = [ n for n in cfg.graph.nodes() if n.simprocedure_name is not None ]
for sp_node in sim_procedures:
if not self.project.is_hooked(sp_node.addr):
# This is probably a PathTerminator
# Just skip it for now
continue
if self.project._sim_procedures[sp_node.addr].procedure not in CallTracingFilter.whitelist:
self._skipped_targets.add(addr)
l.debug('Rejecting target 0x%x - contains SimProcedures outside whitelist', addr)
return REJECT
if len(tracing_filter._skipped_targets):
# Bummer
self._skipped_targets.add(addr)
l.debug('Rejecting target 0x%x - should be skipped', addr)
return REJECT
# accept!
l.debug('Accepting target 0x%x, jumpkind %s', addr, jumpkind)
return ACCEPT
class Veritesting(Analysis):
"""
An exploration technique made for condensing chunks of code to single (nested) if-then-else constraints via CFG
accurate to conduct Static Symbolic Execution SSE (conversion to single constraint)
"""
# A cache for CFG we generated before
cfg_cache = { }
# Names of all stashes we will return from Veritesting
all_stashes = ('successful', 'errored', 'deadended', 'deviated', 'unconstrained')
def __init__(
self, input_state, boundaries=None, loop_unrolling_limit=10, enable_function_inlining=False,
terminator=None, deviation_filter=None
):
"""
SSE stands for Static Symbolic Execution, and we also implemented an extended version of Veritesting (Avgerinos,
Thanassis, et al, ICSE 2014).
:param input_state: The initial state to begin the execution with.
:param boundaries: Addresses where execution should stop.
:param loop_unrolling_limit: The maximum times that Veritesting should unroll a loop for.
:param enable_function_inlining: Whether we should enable function inlining and syscall inlining.
:param terminator: A callback function that takes a state as parameter. Veritesting will terminate
if this function returns True.
:param deviation_filter: A callback function that takes a state as parameter. Veritesting will put the
state into "deviated" stash if this function returns True.
"""
block = self.project.factory.block(input_state.addr)
branches = block.vex.constant_jump_targets_and_jumpkinds
# if we are not at a conditional jump, just do a normal step
if not branches.values() == ['Ijk_Boring', 'Ijk_Boring']:
self.result, self.final_manager = False, None
return
# otherwise do a veritesting step
self._input_state = input_state.copy()
self._boundaries = boundaries if boundaries is not None else [ ]
self._loop_unrolling_limit = loop_unrolling_limit
self._enable_function_inlining = enable_function_inlining
self._terminator = terminator
self._deviation_filter = deviation_filter
# set up the cfg stuff
self._cfg, self._loop_graph = self._make_cfg()
self._loop_backedges = self._cfg._loop_back_edges
self._loop_heads = set([ dst.addr for _, dst in self._loop_backedges ])
l.info("Static symbolic execution starts at %#x", self._input_state.addr)
l.debug(
"The execution will terminate at the following addresses: [ %s ]",
", ".join([ hex(i) for i in self._boundaries ])
)
l.debug("A loop will be unrolled by a maximum of %d times.", self._loop_unrolling_limit)
if self._enable_function_inlining:
l.debug("Function inlining is enabled.")
else:
l.debug("Function inlining is disabled.")
self.result, self.final_manager = self._veritesting()
def _veritesting(self):
"""
Perform static symbolic execution starting from the given point.
returns (bool, SimulationManager): tuple of the success/failure of veritesting and the subsequent SimulationManager after
execution
"""
s = self._input_state.copy()
try:
new_manager = self._execute_and_merge(s)
except (ClaripyError, SimError, AngrError):
if not BYPASS_VERITESTING_EXCEPTIONS in s.options:
raise
else:
l.warning("Veritesting caught an exception.", exc_info=True)
return False, SimulationManager(self.project, stashes={'deviated': [s]})
except VeritestingError as ex:
l.warning("Exception occurred: %s", str(ex))
return False, SimulationManager(self.project, stashes={'deviated': [s]})
l.info(
'Returning new paths: (successful: %s, deadended: %s, errored: %s, deviated: %s)',
len(new_manager.successful), len(new_manager.deadended),
len(new_manager.errored), len(new_manager.deviated)
)
return True, new_manager
def _execute_and_merge(self, state):
"""
Symbolically execute the program in a static manner. The basic idea is that we look ahead by creating a CFG,
then perform a _controlled symbolic exploration_ based on the CFG, one path at a time. The controlled symbolic
exploration stops when it sees a branch whose both directions are all feasible, or it shall wait for a merge
from another path.
A basic block will not be executed for more than *loop_unrolling_limit* times. If that is the case, a new state
will be returned.
:param SimState state: The initial state to start the execution.
:returns: A list of new states.
"""
# Find all merge points
merge_points = self._get_all_merge_points(self._cfg, self._loop_graph)
l.debug('Merge points: %s', [ hex(i[0]) for i in merge_points ])
#
# Controlled symbolic exploration
#
# Initialize the beginning state
initial_state = state
initial_state.globals['loop_ctrs'] = defaultdict(int)
manager = SimulationManager(
self.project,
active_states=[ initial_state ],
immutable=False,
resilience=o.BYPASS_VERITESTING_EXCEPTIONS in initial_state.options
)
# Initialize all stashes
for stash in self.all_stashes:
manager.stashes[stash] = [ ]
# immediate_dominators = cfg.immediate_dominators(cfg.get_any_node(ip_int))
while manager.active:
# Step one step forward
l.debug('Steps %s with %d active states: [ %s ]',
manager,
len(manager.active),
manager.active)
# Apply self.deviation_func on every single active state, and move them to deviated stash if needed
if self._deviation_filter is not None:
manager.stash(filter_func=self._deviation_filter, from_stash='active', to_stash='deviated')
# Mark all those paths that are out of boundaries as successful
manager.stash(
filter_func=self.is_overbound,
from_stash='active', to_stash='successful'
)
manager.step(successor_func=self._get_successors)
if self._terminator is not None and self._terminator(manager):
for p in manager.unfuck:
self._unfuck(p)
break
# Stash all paths that we do not see in our CFG
manager.stash(
filter_func=self.is_not_in_cfg,
to_stash="deviated"
)
# Stash all paths that we do not care about
manager.stash(
filter_func= lambda state: (
state.history.jumpkind not in
('Ijk_Boring', 'Ijk_Call', 'Ijk_Ret', 'Ijk_NoHook')
and not state.history.jumpkind.startswith('Ijk_Sys')
),
to_stash="deadended"
)
if manager.deadended:
l.debug('Now we have some deadended paths: %s', manager.deadended)
# Stash all possible states that we should merge later
for merge_point_addr, merge_point_looping_times in merge_points:
manager.stash_addr(
merge_point_addr,
to_stash="_merge_%x_%d" % (merge_point_addr, merge_point_looping_times)
)
# Try to merge a set of previously stashed paths, and then unstash them
if not manager.active:
manager = self._join_merge_points(manager, merge_points)
if any(len(manager.stashes[stash_name]) for stash_name in self.all_stashes):
# Remove all stashes other than errored or deadended
manager.stashes = {
name: stash for name, stash in manager.stashes.items()
if name in self.all_stashes
}
for stash in manager.stashes:
manager.apply(self._unfuck, stash=stash)
return manager
def _join_merge_points(self, manager, merge_points):
"""
Merges together the appropriate execution points and unstashes them from the intermidiate merge_x_y stashes to
pruned (dropped), deadend or active stashes
param SimulationManager manager: current simulation context being stepped through
param [(int, int)] merge_points: list of address and loop counters of execution points to merge
returns SimulationManager: new manager with edited stashes
"""
merged_anything = False
for merge_point_addr, merge_point_looping_times in merge_points:
if merged_anything:
break
stash_name = "_merge_%x_%d" % (merge_point_addr, merge_point_looping_times)
if stash_name not in manager.stashes:
continue
stash_size = len(manager.stashes[stash_name])
if stash_size == 0:
continue
if stash_size == 1:
l.info("Skipping merge of 1 state in stash %s.", stash_size)
manager.move(stash_name, 'active')
continue
# let everyone know of the impending disaster
l.info("Merging %d states in stash %s", stash_size, stash_name)
# Try to prune the stash, so unsatisfiable states will be thrown away
manager.prune(from_stash=stash_name, to_stash='pruned')
if 'pruned' in manager.stashes and len(manager.pruned):
l.debug('... pruned %d paths from stash %s', len(manager.pruned), stash_name)
# Remove the pruned stash to save memory
manager.drop(stash='pruned')
# merge things callstack by callstack
while len(manager.stashes[stash_name]):
r = manager.stashes[stash_name][0]
manager.move(
stash_name, 'merge_tmp',
lambda p: p.callstack == r.callstack #pylint:disable=cell-var-from-loop
)
old_count = len(manager.merge_tmp)
l.debug("... trying to merge %d states.", old_count)
# merge the loop_ctrs
new_loop_ctrs = defaultdict(int)
for m in manager.merge_tmp:
for head_addr, looping_times in m.globals['loop_ctrs'].iteritems():
new_loop_ctrs[head_addr] = max(
looping_times,
m.globals['loop_ctrs'][head_addr]
)
manager.merge(stash='merge_tmp')
for m in manager.merge_tmp:
m.globals['loop_ctrs'] = new_loop_ctrs
new_count = len(manager.stashes['merge_tmp'])
l.debug("... after merge: %d states.", new_count)
merged_anything |= new_count != old_count
if len(manager.merge_tmp) > 1:
l.warning("More than 1 state after Veritesting merge.")
manager.move('merge_tmp', 'active')
elif any(
loop_ctr >= self._loop_unrolling_limit + 1 for loop_ctr in
manager.one_merge_tmp.globals['loop_ctrs'].itervalues()
):
l.debug("... merged state is overlooping")
manager.move('merge_tmp', 'deadended')
else:
l.debug('... merged state going to active stash')
manager.move('merge_tmp', 'active')
return manager
#
# Path management
#
def is_not_in_cfg(self, s):
"""
Returns if s.addr is not a proper node in our CFG.
:param SimState s: The SimState instance to test.
:returns bool: False if our CFG contains p.addr, True otherwise.
"""
n = self._cfg.get_any_node(s.addr, is_syscall=s.history.jumpkind.startswith('Ijk_Sys'))
if n is None:
return True
if n.simprocedure_name == 'PathTerminator':
return True
return False
def _get_successors(self, state):
"""
Gets the successors to the current state by step, saves copy of state and finally stashes new unconstrained states
to manager.
:param SimState state: Current state to step on from
:returns SimSuccessors: The SimSuccessors object
"""
size_of_next_irsb = self._cfg.get_any_node(state.addr).size
return self.project.factory.successors(state, size=size_of_next_irsb)
def is_overbound(self, state):
"""
Filter out all states that run out of boundaries or loop too many times.
param SimState state: SimState instance to check
returns bool: True if outside of mem/loop_ctr boundary
"""
ip = state.addr
if ip in self._boundaries:
l.debug("... terminating Veritesting due to overbound")
return True
try:
# If the address is not in the list (which could mean it is
# not at the top of a block), check directly in the blocks
# (Blocks are repeatedly created for every check, but with
# the IRSB cache in angr lifter it should be OK.)
if set(self._boundaries).intersection(set(self.project.factory.block(ip).instruction_addrs)):
return True
except (AngrError, SimError):
pass
if (
ip in self._loop_heads # This is the beginning of the loop
or state.history.jumpkind == 'Ijk_Call' # We also wanna catch recursive function calls
):
state.globals['loop_ctrs'][ip] += 1
if state.globals['loop_ctrs'][ip] >= self._loop_unrolling_limit + 1:
l.debug('... terminating Veritesting due to overlooping')
return True
l.debug('... accepted')
return False
@staticmethod
def _unfuck(s):
"""
Deletes the loop counter from state's information dictionary
:param SimState s: SimState instance to update
:returns SimState: same SimState with deleted loop counter
"""
del s.globals['loop_ctrs']
return s
#
# Merge point determination
#
def _make_cfg(self):
"""
Builds a CFG from the current function.
Saved in cfg_cache.
returns (CFGAccurate, networkx.DiGraph): Tuple of the CFG and networkx representation of it
"""
state = self._input_state
ip_int = state.addr
cfg_key = (ip_int, state.history.jumpkind)
if cfg_key in self.cfg_cache:
cfg, cfg_graph_with_loops = self.cfg_cache[cfg_key]
else:
if self._enable_function_inlining:
call_tracing_filter = CallTracingFilter(self.project, depth=0)
filter = call_tracing_filter.filter #pylint:disable=redefined-builtin
else:
filter = None
# To better handle syscalls, we make a copy of all registers if they are not symbolic
cfg_initial_state = self.project.factory.blank_state(mode='fastpath')
# FIXME: This is very hackish
# FIXME: And now only Linux-like syscalls are supported
if self.project.arch.name == 'X86':
if not state.se.symbolic(state.regs.eax):
cfg_initial_state.regs.eax = state.regs.eax
elif self.project.arch.name == 'AMD64':
if not state.se.symbolic(state.regs.rax):
cfg_initial_state.regs.rax = state.regs.rax
cfg = self.project.analyses.CFGAccurate(
starts=((ip_int, state.history.jumpkind),),
context_sensitivity_level=0,
call_depth=1,
call_tracing_filter=filter,
initial_state=cfg_initial_state,
normalize=True,
kb=KnowledgeBase(self.project, self.project.loader.main_object)
)
cfg_graph_with_loops = networkx.DiGraph(cfg.graph)
cfg.force_unroll_loops(self._loop_unrolling_limit)
self.cfg_cache[cfg_key] = (cfg, cfg_graph_with_loops)
return cfg, cfg_graph_with_loops
@staticmethod
def _post_dominate(reversed_graph, n1, n2):
"""
Checks whether `n1` post-dominates `n2` in the *original* (not reversed) graph.
:param networkx.DiGraph reversed_graph: The reversed networkx.DiGraph instance.
:param networkx.Node n1: Node 1.
:param networkx.Node n2: Node 2.
:returns bool: True/False.
"""
ds = networkx.dominating_set(reversed_graph, n1)
return n2 in ds
def _get_all_merge_points(self, cfg, graph_with_loops):
"""
Return all possible merge points in this CFG.
:param CFGAccurate cfg: The control flow graph, which must be acyclic.
:returns [(int, int)]: A list of merge points (address and number of times looped).
"""
graph = networkx.DiGraph(cfg.graph)
reversed_cyclic_graph = shallow_reverse(graph_with_loops)
# Remove all "FakeRet" edges
fakeret_edges = [
(src, dst) for src, dst, data in graph.edges(data=True)
if data['jumpkind'] in ('Ijk_FakeRet', 'Ijk_Exit')
]
graph.remove_edges_from(fakeret_edges)
# Remove all "FakeRet" edges from cyclic_graph as well
fakeret_edges = [
(src, dst) for src, dst, data in reversed_cyclic_graph.edges(data=True)
if data['jumpkind'] in ('Ijk_FakeRet', 'Ijk_Exit')
]
reversed_cyclic_graph.remove_edges_from(fakeret_edges)
# Perform a topological sort
sorted_nodes = networkx.topological_sort(graph)
nodes = [ n for n in sorted_nodes if graph.in_degree(n) > 1 and n.looping_times == 0 ]
# Reorder nodes based on post-dominance relations
nodes = sorted(nodes, cmp=lambda n1, n2: (
1 if self._post_dominate(reversed_cyclic_graph, n1, n2)
else (-1 if self._post_dominate(reversed_cyclic_graph, n2, n1) else 0)
))
return [ (n.addr, n.looping_times) for n in nodes ]
register_analysis(Veritesting, 'Veritesting')
from ..errors import SimValueError, SimSolverModeError, SimError
from ..sim_options import BYPASS_VERITESTING_EXCEPTIONS
from claripy import ClaripyError
| {
"content_hash": "7c69ceebd6749153c1ebbcd2542ab25c",
"timestamp": "",
"source": "github",
"line_count": 624,
"max_line_length": 129,
"avg_line_length": 40.44070512820513,
"alnum_prop": 0.5793936992272637,
"repo_name": "f-prettyland/angr",
"id": "b4359276d05401ca166813b39485e29f6c5565f5",
"size": "25235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/analyses/veritesting.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6375"
},
{
"name": "C++",
"bytes": "39375"
},
{
"name": "Makefile",
"bytes": "557"
},
{
"name": "Python",
"bytes": "2934645"
}
],
"symlink_target": ""
} |
import re
import warnings
from datetime import datetime, timedelta
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import haystack
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, log_query
from haystack.constants import (
DEFAULT_OPERATOR,
DJANGO_CT,
DJANGO_ID,
FUZZY_MAX_EXPANSIONS,
FUZZY_MIN_SIM,
ID,
)
from haystack.exceptions import MissingDependency, MoreLikeThisError, SkipDocument
from haystack.inputs import Clean, Exact, PythonData, Raw
from haystack.models import SearchResult
from haystack.utils import get_identifier, get_model_ct
from haystack.utils import log as logging
from haystack.utils.app_loading import haystack_get_model
# Backport support
from .constants import ALL_FIELD
try:
import elasticsearch
if (1, 0, 0) <= elasticsearch.__version__ < (2, 0, 0):
warnings.warn(
"ElasticSearch 1.x support deprecated, will be removed in 4.0",
DeprecationWarning,
)
try:
# let's try this, for elasticsearch > 1.7.0
from elasticsearch.helpers import bulk
except ImportError:
# let's try this, for elasticsearch <= 1.7.0
from elasticsearch.helpers import bulk_index as bulk
from elasticsearch.exceptions import NotFoundError
except ImportError:
raise MissingDependency(
"The 'elasticsearch' backend requires the installation of 'elasticsearch'. Please refer to the documentation."
)
DATETIME_REGEX = re.compile(
r"^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T"
r"(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d+)?$"
)
class ElasticsearchSearchBackend(BaseSearchBackend):
# Word reserved by Elasticsearch for special use.
RESERVED_WORDS = ("AND", "NOT", "OR", "TO")
# Characters reserved by Elasticsearch for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
"\\",
"+",
"-",
"&&",
"||",
"!",
"(",
")",
"{",
"}",
"[",
"]",
"^",
'"',
"~",
"*",
"?",
":",
"/",
)
# Settings to add an n-gram & edge n-gram analyzer.
DEFAULT_SETTINGS = {
"settings": {
"analysis": {
"analyzer": {
"ngram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_ngram", "lowercase"],
},
"edgengram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_edgengram", "lowercase"],
},
},
"tokenizer": {
"haystack_ngram_tokenizer": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15,
},
"haystack_edgengram_tokenizer": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15,
"side": "front",
},
},
"filter": {
"haystack_ngram": {"type": "nGram", "min_gram": 3, "max_gram": 15},
"haystack_edgengram": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15,
},
},
}
}
}
def __init__(self, connection_alias, **connection_options):
super(ElasticsearchSearchBackend, self).__init__(
connection_alias, **connection_options)
if "URL" not in connection_options:
raise ImproperlyConfigured(
"You must specify a 'URL' in your settings for connection '%s'."
% connection_alias
)
if "INDEX_NAME" not in connection_options:
raise ImproperlyConfigured(
"You must specify a 'INDEX_NAME' in your settings for connection '%s'."
% connection_alias
)
self.conn = elasticsearch.Elasticsearch(
connection_options["URL"],
timeout=self.timeout,
**connection_options.get("KWARGS", {})
)
self.index_name = connection_options["INDEX_NAME"]
self.log = logging.getLogger("haystack")
self.setup_complete = False
self.existing_mapping = {}
def _get_doc_type_option(self):
return {
"doc_type": "modelresult",
}
def _get_current_mapping(self, field_mapping):
return {"modelresult": {"properties": field_mapping}}
def setup(self):
"""
Defers loading until needed.
"""
# Get the existing mapping & cache it. We'll compare it
# during the ``update`` & if it doesn't match, we'll put the new
# mapping.
try:
self.existing_mapping = self.conn.indices.get_mapping(index=self.index_name)
except NotFoundError:
pass
except Exception:
if not self.silently_fail:
raise
unified_index = haystack.connections[self.connection_alias].get_unified_index()
self.content_field_name, field_mapping = self.build_schema(
unified_index.all_searchfields()
)
current_mapping = self._get_current_mapping(field_mapping)
if current_mapping != self.existing_mapping:
try:
# Make sure the index is there first.
self.conn.indices.create(
index=self.index_name, body=self.DEFAULT_SETTINGS, ignore=400
)
self.conn.indices.put_mapping(
index=self.index_name,
body=current_mapping,
**self._get_doc_type_option()
)
self.existing_mapping = current_mapping
except Exception:
if not self.silently_fail:
raise
self.setup_complete = True
def _prepare_object(self, index, obj):
return index.full_prepare(obj)
def update(self, index, iterable, commit=True):
if not self.setup_complete:
try:
self.setup()
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to add documents to Elasticsearch: %s", e, exc_info=True
)
return
prepped_docs = []
for obj in iterable:
try:
prepped_data = self._prepare_object(index, obj)
final_data = {}
# Convert the data to make sure it's happy.
for key, value in prepped_data.items():
final_data[key] = self._from_python(value)
final_data["_id"] = final_data[ID]
prepped_docs.append(final_data)
except SkipDocument:
self.log.debug("Indexing for object `%s` skipped", obj)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(
"%s while preparing object for update" % e.__class__.__name__,
exc_info=True,
extra={"data": {"index": index, "object": get_identifier(obj)}},
)
bulk(
self.conn,
prepped_docs,
index=self.index_name,
**self._get_doc_type_option()
)
if commit:
self.conn.indices.refresh(index=self.index_name)
def remove(self, obj_or_string, commit=True):
doc_id = get_identifier(obj_or_string)
if not self.setup_complete:
try:
self.setup()
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to remove document '%s' from Elasticsearch: %s",
doc_id,
e,
exc_info=True,
)
return
try:
self.conn.delete(
index=self.index_name,
id=doc_id,
ignore=404,
**self._get_doc_type_option()
)
if commit:
self.conn.indices.refresh(index=self.index_name)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to remove document '%s' from Elasticsearch: %s",
doc_id,
e,
exc_info=True,
)
def clear(self, models=None, commit=True):
# We actually don't want to do this here, as mappings could be
# very different.
# if not self.setup_complete:
# self.setup()
if models is not None:
assert isinstance(models, (list, tuple))
try:
if models is None:
self.conn.indices.delete(index=self.index_name, ignore=404)
self.setup_complete = False
self.existing_mapping = {}
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
# Delete by query in Elasticsearch asssumes you're dealing with
# a ``query`` root object. :/
query = {
"query": {"query_string": {"query": " OR ".join(models_to_delete)}}
}
self.conn.delete_by_query(
index=self.index_name,
body=query,
**self._get_doc_type_option()
)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
if models is not None:
self.log.error(
"Failed to clear Elasticsearch index of models '%s': %s",
",".join(models_to_delete),
e,
exc_info=True,
)
else:
self.log.error(
"Failed to clear Elasticsearch index: %s", e, exc_info=True
)
def build_search_kwargs(
self,
query_string,
sort_by=None,
start_offset=0,
end_offset=None,
fields="",
highlight=False,
facets=None,
date_facets=None,
query_facets=None,
narrow_queries=None,
spelling_query=None,
within=None,
dwithin=None,
distance_point=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**extra_kwargs
):
index = haystack.connections[self.connection_alias].get_unified_index()
content_field = index.document_field
if query_string == "*:*":
kwargs = {"query": {"match_all": {}}}
else:
kwargs = {
"query": {
"query_string": {
"default_field": content_field,
"default_operator": DEFAULT_OPERATOR,
"query": query_string,
"analyze_wildcard": True,
"auto_generate_phrase_queries": True,
"fuzzy_min_sim": FUZZY_MIN_SIM,
"fuzzy_max_expansions": FUZZY_MAX_EXPANSIONS,
}
}
}
# so far, no filters
filters = []
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs["fields"] = fields
if sort_by is not None:
order_list = []
for field, direction in sort_by:
if field == "distance" and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point["point"].coords
sort_kwargs = {
"_geo_distance": {
distance_point["field"]: [lng, lat],
"order": direction,
"unit": "km",
}
}
else:
if field == "distance":
warnings.warn(
"In order to sort by distance, you must call the '.distance(...)' method."
)
# Regular sorting.
sort_kwargs = {field: {"order": direction}}
order_list.append(sort_kwargs)
kwargs["sort"] = order_list
# From/size offsets don't seem to work right in Elasticsearch's DSL. :/
# if start_offset is not None:
# kwargs['from'] = start_offset
# if end_offset is not None:
# kwargs['size'] = end_offset - start_offset
if highlight:
# `highlight` can either be True or a dictionary containing custom parameters
# which will be passed to the backend and may override our default settings:
kwargs["highlight"] = {"fields": {content_field: {"store": "yes"}}}
if isinstance(highlight, dict):
kwargs["highlight"].update(highlight)
if self.include_spelling:
kwargs["suggest"] = {
"suggest": {
"text": spelling_query or query_string,
"term": {
# Using content_field here will result in suggestions of stemmed words.
"field": ALL_FIELD,
},
}
}
if narrow_queries is None:
narrow_queries = set()
if facets is not None:
kwargs.setdefault("facets", {})
for facet_fieldname, extra_options in facets.items():
facet_options = {"terms": {"field": facet_fieldname, "size": 100}}
# Special cases for options applied at the facet level (not the terms level).
if extra_options.pop("global_scope", False):
# Renamed "global_scope" since "global" is a python keyword.
facet_options["global"] = True
if "facet_filter" in extra_options:
facet_options["facet_filter"] = extra_options.pop("facet_filter")
facet_options["terms"].update(extra_options)
kwargs["facets"][facet_fieldname] = facet_options
if date_facets is not None:
kwargs.setdefault("facets", {})
for facet_fieldname, value in date_facets.items():
# Need to detect on gap_by & only add amount if it's more than one.
interval = value.get("gap_by").lower()
# Need to detect on amount (can't be applied on months or years).
if value.get("gap_amount", 1) != 1 and interval not in (
"month",
"year",
):
# Just the first character is valid for use.
interval = "%s%s" % (value["gap_amount"], interval[:1])
kwargs["facets"][facet_fieldname] = {
"date_histogram": {"field": facet_fieldname, "interval": interval},
"facet_filter": {
"range": {
facet_fieldname: {
"from": self._from_python(value.get("start_date")),
"to": self._from_python(value.get("end_date")),
}
}
},
}
if query_facets is not None:
kwargs.setdefault("facets", {})
for facet_fieldname, value in query_facets:
kwargs["facets"][facet_fieldname] = {
"query": {"query_string": {"query": value}}
}
if limit_to_registered_models is None:
limit_to_registered_models = getattr(
settings, "HAYSTACK_LIMIT_TO_REGISTERED_MODELS", True
)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
filters.append({"terms": {DJANGO_CT: model_choices}})
for q in narrow_queries:
filters.append(
{"fquery": {"query": {"query_string": {"query": q}}, "_cache": True}}
)
if within is not None:
from haystack.utils.geo import generate_bounding_box
((south, west), (north, east)) = generate_bounding_box(
within["point_1"], within["point_2"]
)
within_filter = {
"geo_bounding_box": {
within["field"]: {
"top_left": {"lat": north, "lon": west},
"bottom_right": {"lat": south, "lon": east},
}
}
}
filters.append(within_filter)
if dwithin is not None:
lng, lat = dwithin["point"].coords
# NB: the 1.0.0 release of elasticsearch introduce an
# incompatible change on the distance filter formating
if elasticsearch.VERSION >= (1, 0, 0):
distance = "%(dist).6f%(unit)s" % {
"dist": dwithin["distance"].km,
"unit": "km",
}
else:
distance = dwithin["distance"].km
dwithin_filter = {
"geo_distance": {
"distance": distance,
dwithin["field"]: {"lat": lat, "lon": lng},
}
}
filters.append(dwithin_filter)
# if we want to filter, change the query type to filteres
if filters:
kwargs["query"] = {"filtered": {"query": kwargs.pop("query")}}
if len(filters) == 1:
kwargs["query"]["filtered"]["filter"] = filters[0]
else:
kwargs["query"]["filtered"]["filter"] = {"bool": {"must": filters}}
if extra_kwargs:
kwargs.update(extra_kwargs)
return kwargs
@log_query
def search(self, query_string, **kwargs):
if len(query_string) == 0:
return {"results": [], "hits": 0}
if not self.setup_complete:
self.setup()
search_kwargs = self.build_search_kwargs(query_string, **kwargs)
search_kwargs["from"] = kwargs.get("start_offset", 0)
order_fields = set()
for order in search_kwargs.get("sort", []):
for key in order.keys():
order_fields.add(key)
geo_sort = "_geo_distance" in order_fields
end_offset = kwargs.get("end_offset")
start_offset = kwargs.get("start_offset", 0)
if end_offset is not None and end_offset > start_offset:
search_kwargs["size"] = end_offset - start_offset
try:
raw_results = self.conn.search(
body=search_kwargs,
index=self.index_name,
_source=True,
**self._get_doc_type_option()
)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to query Elasticsearch using '%s': %s",
query_string,
e,
exc_info=True,
)
raw_results = {}
return self._process_results(
raw_results,
highlight=kwargs.get("highlight"),
result_class=kwargs.get("result_class", SearchResult),
distance_point=kwargs.get("distance_point"),
geo_sort=geo_sort,
)
def more_like_this(
self,
model_instance,
additional_query_string=None,
start_offset=0,
end_offset=None,
models=None,
limit_to_registered_models=None,
result_class=None,
**kwargs
):
from haystack import connections
if not self.setup_complete:
self.setup()
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = (
connections[self.connection_alias]
.get_unified_index()
.get_index(model_klass)
)
field_name = index.get_content_field()
params = {}
if start_offset is not None:
params["search_from"] = start_offset
if end_offset is not None:
params["search_size"] = end_offset - start_offset
doc_id = get_identifier(model_instance)
try:
mlt_kwargs = dict(self._get_doc_type_option())
mlt_kwargs.update(params)
raw_results = self.conn.mlt(
index=self.index_name,
id=doc_id,
mlt_fields=[field_name],
**mlt_kwargs
)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error(
"Failed to fetch More Like This from Elasticsearch for document '%s': %s",
doc_id,
e,
exc_info=True,
)
raw_results = {}
return self._process_results(raw_results, result_class=result_class)
def _process_hits(self, raw_results):
return raw_results.get("hits", {}).get("total", 0)
def _process_results(
self,
raw_results,
highlight=False,
result_class=None,
distance_point=None,
geo_sort=False,
):
from haystack import connections
results = []
hits = self._process_hits(raw_results)
facets = {}
spelling_suggestion = None
if result_class is None:
result_class = SearchResult
if self.include_spelling and "suggest" in raw_results:
raw_suggest = raw_results["suggest"].get("suggest")
if raw_suggest:
spelling_suggestion = " ".join(
[
word["text"]
if len(word["options"]) == 0
else word["options"][0]["text"]
for word in raw_suggest
]
)
if "facets" in raw_results:
facets = {"fields": {}, "dates": {}, "queries": {}}
# ES can return negative timestamps for pre-1970 data. Handle it.
def from_timestamp(tm):
if tm >= 0:
return datetime.utcfromtimestamp(tm)
else:
return datetime(1970, 1, 1) + timedelta(seconds=tm)
for facet_fieldname, facet_info in raw_results["facets"].items():
if facet_info.get("_type", "terms") == "terms":
facets["fields"][facet_fieldname] = [
(individual["term"], individual["count"])
for individual in facet_info["terms"]
]
elif facet_info.get("_type", "terms") == "date_histogram":
# Elasticsearch provides UTC timestamps with an extra three
# decimals of precision, which datetime barfs on.
facets["dates"][facet_fieldname] = [
(from_timestamp(individual["time"] / 1000), individual["count"])
for individual in facet_info["entries"]
]
elif facet_info.get("_type", "terms") == "query":
facets["queries"][facet_fieldname] = facet_info["count"]
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
content_field = unified_index.document_field
for raw_result in raw_results.get("hits", {}).get("hits", []):
source = raw_result["_source"]
app_label, model_name = source[DJANGO_CT].split(".")
additional_fields = {}
model = haystack_get_model(app_label, model_name)
if model and model in indexed_models:
index = source and unified_index.get_index(model)
for key, value in source.items():
string_key = str(key)
if string_key in index.fields and hasattr(
index.fields[string_key], "convert"
):
additional_fields[string_key] = index.fields[
string_key
].convert(value)
else:
additional_fields[string_key] = self._to_python(value)
del additional_fields[DJANGO_CT]
del additional_fields[DJANGO_ID]
if "highlight" in raw_result:
additional_fields["highlighted"] = raw_result["highlight"].get(
content_field, ""
)
if distance_point:
additional_fields["_point_of_origin"] = distance_point
if geo_sort and raw_result.get("sort"):
from django.contrib.gis.measure import Distance
additional_fields["_distance"] = Distance(
km=float(raw_result["sort"][0])
)
else:
additional_fields["_distance"] = None
result = result_class(
app_label,
model_name,
source[DJANGO_ID],
raw_result["_score"],
**additional_fields
)
results.append(result)
else:
hits -= 1
return {
"results": results,
"hits": hits,
"facets": facets,
"spelling_suggestion": spelling_suggestion,
}
def _get_common_mapping(self):
return {
DJANGO_CT: {
"type": "string",
"index": "not_analyzed",
"include_in_all": False,
},
DJANGO_ID: {
"type": "string",
"index": "not_analyzed",
"include_in_all": False,
},
}
def build_schema(self, fields):
content_field_name = ""
mapping = self._get_common_mapping()
for _, field_class in fields.items():
field_mapping = FIELD_MAPPINGS.get(
field_class.field_type, DEFAULT_FIELD_MAPPING
).copy()
if field_class.boost != 1.0:
field_mapping["boost"] = field_class.boost
if field_class.document is True:
content_field_name = field_class.index_fieldname
# Do this last to override `text` fields.
if field_mapping["type"] == "string":
if field_class.indexed is False or hasattr(field_class, "facet_for"):
field_mapping["index"] = "not_analyzed"
del field_mapping["analyzer"]
mapping[field_class.index_fieldname] = field_mapping
return (content_field_name, mapping)
def _iso_datetime(self, value):
"""
If value appears to be something datetime-like, return it in ISO format.
Otherwise, return None.
"""
if hasattr(value, "strftime"):
if hasattr(value, "hour"):
return value.isoformat()
else:
return "%sT00:00:00" % value.isoformat()
def _from_python(self, value):
"""Convert more Python data types to ES-understandable JSON."""
iso = self._iso_datetime(value)
if iso:
return iso
elif isinstance(value, bytes):
# TODO: Be stricter.
return str(value, errors="replace")
elif isinstance(value, set):
return list(value)
return value
def _to_python(self, value):
"""Convert values from ElasticSearch to native Python values."""
if isinstance(value, (int, float, complex, list, tuple, bool)):
return value
if isinstance(value, str):
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime(
date_values["year"],
date_values["month"],
date_values["day"],
date_values["hour"],
date_values["minute"],
date_values["second"],
)
try:
# This is slightly gross but it's hard to tell otherwise what the
# string's original type might have been. Be careful who you trust.
converted_value = eval(value)
# Try to handle most built-in types.
if isinstance(
converted_value, (int, list, tuple, set, dict, float, complex)
):
return converted_value
except Exception:
# If it fails (SyntaxError or its ilk) or we don't trust it,
# continue on.
pass
return value
# DRL_FIXME: Perhaps move to something where, if none of these
# match, call a custom method on the form that returns, per-backend,
# the right type of storage?
DEFAULT_FIELD_MAPPING = {"type": "string", "analyzer": "snowball"}
FIELD_MAPPINGS = {
"edge_ngram": {"type": "string", "analyzer": "edgengram_analyzer"},
"ngram": {"type": "string", "analyzer": "ngram_analyzer"},
"date": {"type": "date"},
"datetime": {"type": "date"},
"location": {"type": "geo_point"},
"boolean": {"type": "boolean"},
"float": {"type": "float"},
"long": {"type": "long"},
"integer": {"type": "long"},
}
# Sucks that this is almost an exact copy of what's in the Solr backend,
# but we can't import due to dependencies.
class ElasticsearchSearchQuery(BaseSearchQuery):
def matching_all_fragment(self):
return "*:*"
def build_query_fragment(self, field, filter_type, value):
from haystack import connections
query_frag = ""
if not hasattr(value, "input_type_name"):
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, "values_list"):
value = list(value)
if isinstance(value, str):
# It's not an ``InputType``. Assume ``Clean``.
value = Clean(value)
else:
value = PythonData(value)
# Prepare the query using the InputType.
prepared_value = value.prepare(self)
if not isinstance(prepared_value, (set, list, tuple)):
# Then convert whatever we get back to what pysolr wants if needed.
prepared_value = self.backend._from_python(prepared_value)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == "content":
index_fieldname = ""
else:
index_fieldname = "%s:" % connections[
self._using
].get_unified_index().get_index_fieldname(field)
filter_types = {
"content": "%s",
"contains": "*%s*",
"endswith": "*%s",
"startswith": "%s*",
"exact": "%s",
"gt": "{%s TO *}",
"gte": "[%s TO *]",
"lt": "{* TO %s}",
"lte": "[* TO %s]",
"fuzzy": "%s~",
}
if value.post_process is False:
query_frag = prepared_value
else:
if filter_type in [
"content",
"contains",
"startswith",
"endswith",
"fuzzy",
]:
if value.input_type_name == "exact":
query_frag = prepared_value
else:
# Iterate over terms & incorportate the converted form of each into the query.
terms = []
if isinstance(prepared_value, str):
for possible_value in prepared_value.split(" "):
terms.append(
filter_types[filter_type]
% self.backend._from_python(possible_value)
)
else:
terms.append(
filter_types[filter_type]
% self.backend._from_python(prepared_value)
)
if len(terms) == 1:
query_frag = terms[0]
else:
query_frag = "(%s)" % " AND ".join(terms)
elif filter_type == "in":
in_options = []
if not prepared_value:
query_frag = "(!*:*)"
else:
for possible_value in prepared_value:
in_options.append(
'"%s"' % self.backend._from_python(possible_value)
)
query_frag = "(%s)" % " OR ".join(in_options)
elif filter_type == "range":
start = self.backend._from_python(prepared_value[0])
end = self.backend._from_python(prepared_value[1])
query_frag = '["%s" TO "%s"]' % (start, end)
elif filter_type == "exact":
if value.input_type_name == "exact":
query_frag = prepared_value
else:
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
else:
if value.input_type_name != "exact":
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
if len(query_frag) and not isinstance(value, Raw):
if not query_frag.startswith("(") and not query_frag.endswith(")"):
query_frag = "(%s)" % query_frag
return "%s%s" % (index_fieldname, query_frag)
def build_alt_parser_query(self, parser_name, query_string="", **kwargs):
if query_string:
kwargs["v"] = query_string
kwarg_bits = []
for key in sorted(kwargs.keys()):
if isinstance(kwargs[key], str) and " " in kwargs[key]:
kwarg_bits.append("%s='%s'" % (key, kwargs[key]))
else:
kwarg_bits.append("%s=%s" % (key, kwargs[key]))
return "{!%s %s}" % (parser_name, " ".join(kwarg_bits))
def build_params(self, spelling_query=None, **kwargs):
search_kwargs = {
"start_offset": self.start_offset,
"result_class": self.result_class,
}
order_by_list = None
if self.order_by:
if order_by_list is None:
order_by_list = []
for field in self.order_by:
direction = "asc"
if field.startswith("-"):
direction = "desc"
field = field[1:]
order_by_list.append((field, direction))
search_kwargs["sort_by"] = order_by_list
if self.date_facets:
search_kwargs["date_facets"] = self.date_facets
if self.distance_point:
search_kwargs["distance_point"] = self.distance_point
if self.dwithin:
search_kwargs["dwithin"] = self.dwithin
if self.end_offset is not None:
search_kwargs["end_offset"] = self.end_offset
if self.facets:
search_kwargs["facets"] = self.facets
if self.fields:
search_kwargs["fields"] = self.fields
if self.highlight:
search_kwargs["highlight"] = self.highlight
if self.models:
search_kwargs["models"] = self.models
if self.narrow_queries:
search_kwargs["narrow_queries"] = self.narrow_queries
if self.query_facets:
search_kwargs["query_facets"] = self.query_facets
if self.within:
search_kwargs["within"] = self.within
if spelling_query:
search_kwargs["spelling_query"] = spelling_query
elif self.spelling_query:
search_kwargs["spelling_query"] = self.spelling_query
return search_kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query, **kwargs)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get("results", [])
self._hit_count = results.get("hits", 0)
self._facet_counts = self.post_process_facets(results)
self._spelling_suggestion = results.get("spelling_suggestion", None)
def run_mlt(self, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError(
"No instance was provided to determine 'More Like This' results."
)
additional_query_string = self.build_query()
search_kwargs = {
"start_offset": self.start_offset,
"result_class": self.result_class,
"models": self.models,
}
if self.end_offset is not None:
search_kwargs["end_offset"] = self.end_offset - self.start_offset
results = self.backend.more_like_this(
self._mlt_instance, additional_query_string, **search_kwargs
)
self._results = results.get("results", [])
self._hit_count = results.get("hits", 0)
class ElasticsearchSearchEngine(BaseEngine):
backend = ElasticsearchSearchBackend
query = ElasticsearchSearchQuery
| {
"content_hash": "3fc690fa4708a65979432fc00006d006",
"timestamp": "",
"source": "github",
"line_count": 1146,
"max_line_length": 118,
"avg_line_length": 34.75130890052356,
"alnum_prop": 0.4937853107344633,
"repo_name": "reviewboard/reviewboard",
"id": "8101b3b8a3605603bcb5e4fd3b0889d3f1666714",
"size": "39825",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reviewboard/search/search_backends/haystack_backports/elasticsearch_backend.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10167"
},
{
"name": "Dockerfile",
"bytes": "7721"
},
{
"name": "HTML",
"bytes": "226489"
},
{
"name": "JavaScript",
"bytes": "3991608"
},
{
"name": "Less",
"bytes": "438017"
},
{
"name": "Python",
"bytes": "9186415"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
} |
""" Test cases for .boxplot method """
import itertools
import string
import numpy as np
import pytest
import pandas.util._test_decorators as td
from pandas import (
DataFrame,
MultiIndex,
Series,
date_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.tests.plotting.common import (
TestPlotBase,
_check_plot_works,
)
import pandas.plotting as plotting
pytestmark = pytest.mark.slow
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def test_boxplot_legacy1(self):
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
df["indic"] = ["foo", "bar"] * 3
df["indic2"] = ["foo", "bar", "foo"] * 2
_check_plot_works(df.boxplot, return_type="dict")
_check_plot_works(df.boxplot, column=["one", "two"], return_type="dict")
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, column=["one", "two"], by="indic")
_check_plot_works(df.boxplot, column="one", by=["indic", "indic2"])
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by=["indic", "indic2"])
_check_plot_works(plotting._core.boxplot, data=df["one"], return_type="dict")
_check_plot_works(df.boxplot, notch=1, return_type="dict")
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="indic", notch=1)
def test_boxplot_legacy2(self):
df = DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"])
df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
df["Y"] = Series(["A"] * 10)
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.boxplot, by="X")
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.boxplot("Col1", by="X", ax=ax)
ax_axes = ax.axes
assert ax_axes is axes
fig, ax = self.plt.subplots()
axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
ax_axes = ax.axes
assert ax_axes is axes["A"]
# Multiple columns with an ax argument should use same figure
fig, ax = self.plt.subplots()
with tm.assert_produces_warning(UserWarning):
axes = df.boxplot(
column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
)
assert axes["Col1"].get_figure() is fig
# When by is None, check that all relevant lines are present in the
# dict
fig, ax = self.plt.subplots()
d = df.boxplot(ax=ax, return_type="dict")
lines = list(itertools.chain.from_iterable(d.values()))
assert len(ax.get_lines()) == len(lines)
def test_boxplot_return_type_none(self):
# GH 12216; return_type=None & by=None -> axes
result = self.hist_df.boxplot()
assert isinstance(result, self.plt.Axes)
def test_boxplot_return_type_legacy(self):
# API change in https://github.com/pandas-dev/pandas/pull/7096
import matplotlib as mpl # noqa
df = DataFrame(
np.random.randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
msg = "return_type must be {'axes', 'dict', 'both'}"
with pytest.raises(ValueError, match=msg):
df.boxplot(return_type="NOT_A_TYPE")
result = df.boxplot()
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="dict")
self._check_box_return_type(result, "dict")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="axes")
self._check_box_return_type(result, "axes")
with tm.assert_produces_warning(False):
result = df.boxplot(return_type="both")
self._check_box_return_type(result, "both")
def test_boxplot_axis_limits(self):
def _check_ax_limits(col, ax):
y_min, y_max = ax.get_ylim()
assert y_min <= col.min()
assert y_max >= col.max()
df = self.hist_df.copy()
df["age"] = np.random.randint(1, 20, df.shape[0])
# One full row
height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
assert weight_ax._sharey == height_ax
# Two rows, one partial
p = df.boxplot(["height", "weight", "age"], by="category")
height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
dummy_ax = p[1, 1]
_check_ax_limits(df["height"], height_ax)
_check_ax_limits(df["weight"], weight_ax)
_check_ax_limits(df["age"], age_ax)
assert weight_ax._sharey == height_ax
assert age_ax._sharey == height_ax
assert dummy_ax._sharey is None
def test_boxplot_empty_column(self):
df = DataFrame(np.random.randn(20, 4))
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type="axes")
def test_figsize(self):
df = DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"])
result = df.boxplot(return_type="axes", figsize=(12, 8))
assert result.figure.bbox_inches.width == 12
assert result.figure.bbox_inches.height == 8
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
self._check_ticks_props(
df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16
)
def test_boxplot_numeric_data(self):
# GH 22799
df = DataFrame(
{
"a": date_range("2012-01-01", periods=100),
"b": np.random.randn(100),
"c": np.random.randn(100) + 2,
"d": date_range("2012-01-01", periods=100).astype(str),
"e": date_range("2012-01-01", periods=100, tz="UTC"),
"f": timedelta_range("1 days", periods=100),
}
)
ax = df.plot(kind="box")
assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
@pytest.mark.parametrize(
"colors_kwd, expected",
[
(
{"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
{"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
),
({"boxes": "r"}, {"boxes": "r"}),
("r", {"boxes": "r", "whiskers": "r", "medians": "r", "caps": "r"}),
],
)
def test_color_kwd(self, colors_kwd, expected):
# GH: 26214
df = DataFrame(np.random.rand(10, 2))
result = df.boxplot(color=colors_kwd, return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"scheme,expected",
[
(
"dark_background",
{
"boxes": "#8dd3c7",
"whiskers": "#8dd3c7",
"medians": "#bfbbd9",
"caps": "#8dd3c7",
},
),
(
"default",
{
"boxes": "#1f77b4",
"whiskers": "#1f77b4",
"medians": "#2ca02c",
"caps": "#1f77b4",
},
),
],
)
def test_colors_in_theme(self, scheme, expected):
# GH: 40769
df = DataFrame(np.random.rand(10, 2))
import matplotlib.pyplot as plt
plt.style.use(scheme)
result = df.plot.box(return_type="dict")
for k, v in expected.items():
assert result[k][0].get_color() == v
@pytest.mark.parametrize(
"dict_colors, msg",
[({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")],
)
def test_color_kwd_errors(self, dict_colors, msg):
# GH: 26214
df = DataFrame(np.random.rand(10, 2))
with pytest.raises(ValueError, match=msg):
df.boxplot(color=dict_colors, return_type="dict")
@pytest.mark.parametrize(
"props, expected",
[
("boxprops", "boxes"),
("whiskerprops", "whiskers"),
("capprops", "caps"),
("medianprops", "medians"),
],
)
def test_specified_props_kwd(self, props, expected):
# GH 30346
df = DataFrame({k: np.random.random(100) for k in "ABC"})
kwd = {props: {"color": "C1"}}
result = df.boxplot(return_type="dict", **kwd)
assert result[expected][0].get_color() == "C1"
@td.skip_if_no_mpl
class TestDataFrameGroupByPlots(TestPlotBase):
def test_boxplot_legacy1(self):
grouped = self.hist_df.groupby(by="gender")
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_boxplot_legacy2(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.groupby(level=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_boxplot_legacy3(self):
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
grouped = df.unstack(level=1).groupby(level=0, axis=1)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(grouped.boxplot, return_type="axes")
self._check_axes_shape(list(axes.values), axes_num=3, layout=(2, 2))
axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_grouped_plot_fignums(self):
n = 10
weight = Series(np.random.normal(166, 20, size=n))
height = Series(np.random.normal(60, 10, size=n))
with tm.RNGContext(42):
gender = np.random.choice(["male", "female"], size=n)
df = DataFrame({"height": height, "weight": weight, "gender": gender})
gb = df.groupby("gender")
res = gb.plot()
assert len(self.plt.get_fignums()) == 2
assert len(res) == 2
tm.close()
res = gb.boxplot(return_type="axes")
assert len(self.plt.get_fignums()) == 1
assert len(res) == 2
tm.close()
# now works with GH 5610 as gender is excluded
res = df.groupby("gender").hist()
tm.close()
def test_grouped_box_return_type(self):
df = self.hist_df
# old style: return_type=None
result = df.boxplot(by="gender")
assert isinstance(result, np.ndarray)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
# now for groupby
result = df.groupby("gender").boxplot(return_type="dict")
self._check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
columns2 = "X B C D A G Y N Q O".split()
df2 = DataFrame(np.random.randn(50, 10), columns=columns2)
categories2 = "A B C D E F G H I J".split()
df2["category"] = categories2 * 5
for t in ["dict", "axes", "both"]:
returned = df.groupby("classroom").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=["A", "B", "C"])
returned = df.boxplot(by="classroom", return_type=t)
self._check_box_return_type(
returned, t, expected_keys=["height", "weight", "category"]
)
returned = df2.groupby("category").boxplot(return_type=t)
self._check_box_return_type(returned, t, expected_keys=categories2)
returned = df2.boxplot(by="category", return_type=t)
self._check_box_return_type(returned, t, expected_keys=columns2)
def test_grouped_box_layout(self):
df = self.hist_df
msg = "Layout of 1x1 must be larger than required size 2"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
msg = "The 'layout' keyword is not supported when 'by' is None"
with pytest.raises(ValueError, match=msg):
df.boxplot(
column=["height", "weight", "category"],
layout=(2, 1),
return_type="dict",
)
msg = "At least one dimension of layout must be positive"
with pytest.raises(ValueError, match=msg):
df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("gender").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=2, layout=(1, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
# GH 6769
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("classroom").boxplot, column="height", return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
# GH 5897
axes = df.boxplot(
column=["height", "weight", "category"], by="gender", return_type="axes"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
for ax in [axes["height"]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes["weight"], axes["category"]]:
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, 2),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
with tm.assert_produces_warning(UserWarning):
box = _check_plot_works(
df.groupby("category").boxplot,
column="height",
layout=(3, -1),
return_type="dict",
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(3, 2))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(4, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(4, 1))
box = df.boxplot(
column=["height", "weight", "category"], by="gender", layout=(-1, 1)
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(3, 1))
box = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], layout=(1, 4), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 4))
box = df.groupby("classroom").boxplot( # noqa
column=["height", "weight", "category"], layout=(1, -1), return_type="dict"
)
self._check_axes_shape(self.plt.gcf().axes, axes_num=3, layout=(1, 3))
def test_grouped_box_multiple_axes(self):
# GH 6970, GH 7069
df = self.hist_df
# check warning to ignore sharex / sharey
# this check should be done in the first function which
# passes multiple axes to plot, hist or boxplot
# location should be changed if other test is added
# which has earlier alphabetical order
with tm.assert_produces_warning(UserWarning):
fig, axes = self.plt.subplots(2, 2)
df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
self._check_axes_shape(self.plt.gcf().axes, axes_num=4, layout=(2, 2))
fig, axes = self.plt.subplots(2, 3)
with tm.assert_produces_warning(UserWarning):
returned = df.boxplot(
column=["height", "weight", "category"],
by="gender",
return_type="axes",
ax=axes[0],
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[0])
assert returned[0].figure is fig
# draw on second row
with tm.assert_produces_warning(UserWarning):
returned = df.groupby("classroom").boxplot(
column=["height", "weight", "category"], return_type="axes", ax=axes[1]
)
returned = np.array(list(returned.values))
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
tm.assert_numpy_array_equal(returned, axes[1])
assert returned[0].figure is fig
msg = "The number of passed axes must be 3, the same as the output plot"
with pytest.raises(ValueError, match=msg):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
with tm.assert_produces_warning(UserWarning):
axes = df.groupby("classroom").boxplot(ax=axes)
def test_fontsize(self):
df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
self._check_ticks_props(
df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
)
@pytest.mark.parametrize(
"col, expected_xticklabel",
[
("v", ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
(["v"], ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
("v1", ["(a, v1)", "(b, v1)", "(c, v1)", "(d, v1)", "(e, v1)"]),
(
["v", "v1"],
[
"(a, v)",
"(a, v1)",
"(b, v)",
"(b, v1)",
"(c, v)",
"(c, v1)",
"(d, v)",
"(d, v1)",
"(e, v)",
"(e, v1)",
],
),
(
None,
[
"(a, v)",
"(a, v1)",
"(b, v)",
"(b, v1)",
"(c, v)",
"(c, v1)",
"(d, v)",
"(d, v1)",
"(e, v)",
"(e, v1)",
],
),
],
)
def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel):
# GH 16748
df = DataFrame(
{
"cat": np.random.choice(list("abcde"), 100),
"v": np.random.rand(100),
"v1": np.random.rand(100),
}
)
grouped = df.groupby("cat")
axes = _check_plot_works(
grouped.boxplot, subplots=False, column=col, return_type="axes"
)
result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
assert expected_xticklabel == result_xticklabel
def test_boxplot_multiindex_column(self):
# GH 16748
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(3, 8), index=["A", "B", "C"], columns=index)
col = [("bar", "one"), ("bar", "two")]
axes = _check_plot_works(df.boxplot, column=col, return_type="axes")
expected_xticklabel = ["(bar, one)", "(bar, two)"]
result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
assert expected_xticklabel == result_xticklabel
| {
"content_hash": "1c53844282bd341df8d4c6393db26f76",
"timestamp": "",
"source": "github",
"line_count": 561,
"max_line_length": 88,
"avg_line_length": 38.288770053475936,
"alnum_prop": 0.5299348230912476,
"repo_name": "rs2/pandas",
"id": "dbceeae44a493316eb58bddfc9c74d4a5c2f4a3b",
"size": "21480",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pandas/tests/plotting/test_boxplot_method.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "127"
},
{
"name": "C",
"bytes": "360253"
},
{
"name": "CSS",
"bytes": "1438"
},
{
"name": "Cython",
"bytes": "1081551"
},
{
"name": "Dockerfile",
"bytes": "1690"
},
{
"name": "HTML",
"bytes": "456275"
},
{
"name": "Makefile",
"bytes": "507"
},
{
"name": "Python",
"bytes": "17393243"
},
{
"name": "Shell",
"bytes": "10872"
},
{
"name": "Smarty",
"bytes": "7820"
},
{
"name": "XSLT",
"bytes": "1196"
}
],
"symlink_target": ""
} |
import pytest
from sovrin_client.test import waits
from stp_core.loop.eventually import eventually
from sovrin_client.test.cli.helper import checkConnectedToEnv, prompt_is
@pytest.fixture(scope="module")
def alice(aliceCLI):
return aliceCLI
def test_disconnect_when_not_connected(alice, be, do):
be(alice)
do(None, expect=prompt_is("sovrin"))
do('disconnect', within=1, expect=['Not connected to any environment.'])
do(None, expect=prompt_is("sovrin"))
@pytest.fixture(scope="module")
def alice_connected(alice, be, do, poolNodesCreated):
be(alice)
do(None, expect=prompt_is("sovrin"))
do('connect test', within=5, expect=["Connected to test"])
do(None, expect=prompt_is("sovrin@test"))
def test_connect_to_test(alice_connected):
pass
@pytest.fixture(scope="module")
def alice_disconnected(alice, be, do, alice_connected):
be(alice)
do(None, expect=prompt_is("sovrin@test"))
do('disconnect', within=1, expect=[
'Disconnecting from test ...',
'Disconnected from test'
])
do(None, expect=prompt_is("sovrin"))
def test_disconnect_when_connected(do, be, alice_disconnected):
pass
def testConnectEnv(poolNodesCreated, looper, notConnectedStatus):
poolCLI = poolNodesCreated
notConnectedMsgs = notConnectedStatus
# Done to initialise a wallet.
poolCLI.enterCmd("new key")
poolCLI.enterCmd("status")
for msg in notConnectedMsgs:
assert msg in poolCLI.lastCmdOutput
poolCLI.enterCmd("connect dummy")
assert "Unknown environment dummy" in poolCLI.lastCmdOutput
poolCLI.enterCmd("connect test")
assert "Connecting to test" in poolCLI.lastCmdOutput
timeout = waits.expectedAgentConnected()
looper.run(eventually(checkConnectedToEnv, poolCLI, retryWait=1,
timeout=timeout))
poolCLI.enterCmd("status")
assert "Connected to test Sovrin network" == poolCLI.lastCmdOutput
def testCreateMultiPoolNodes(multiPoolNodesCreated):
assert len(multiPoolNodesCreated) == 2
@pytest.fixture(scope="module")
def pool1(multiPoolNodesCreated):
return multiPoolNodesCreated[0]
@pytest.fixture(scope="module")
def pool2(multiPoolNodesCreated):
return multiPoolNodesCreated[1]
def test_connect_to_different_pools(do, be, cliForMultiNodePools):
be(cliForMultiNodePools)
do(None, expect=prompt_is("sovrin"))
do('connect pool1', within=5, expect=["Connected to pool1"])
do(None, expect=prompt_is("sovrin@pool1"))
do('connect pool2', within=5, expect=["Connected to pool2"])
do(None, expect=prompt_is("sovrin@pool2"))
do('connect pool1', within=5, expect=["Connected to pool1"])
do(None, expect=prompt_is("sovrin@pool1"))
| {
"content_hash": "33f52fe3fcff1d8af182830a4721054a",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 76,
"avg_line_length": 29.630434782608695,
"alnum_prop": 0.7109317681584739,
"repo_name": "keenondrums/sovrin-node",
"id": "257271ec3a5cacb8242693247b72e630b6df8b37",
"size": "2726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sovrin_client/test/cli/test_connect_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3195"
},
{
"name": "Python",
"bytes": "1088655"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "15720"
}
],
"symlink_target": ""
} |
import unittest
from entities import *
class TestField(unittest.TestCase):
def test_make_default(self):
field = Field(default=None, null=True)
self.assertEqual(field.make_default(), None)
field = Field(default=lambda: 1, null=True)
self.assertEqual(field.make_default(), 1)
field = Field(default=2, null=True)
self.assertEqual(field.make_default(), 2)
field = Field(default=None, null=False)
self.assertRaises(ValueError, field.make_default)
def test_full_name(self):
class Foo(Entity):
field = ListField(DynamicField())
self.assertEqual(Foo.field.full_name(), 'field')
self.assertEqual(Foo.field.item_field.full_name(), 'field.<item>')
def test_validate(self):
field = Field(null=True)
self.assertEqual(field.validate(None), None)
field = DynamicField(IntegerField, null=True)
self.assertEqual(field.validate(None), None)
field = DynamicField(IntegerField, null=False)
self.assertRaises(ValidationError, lambda: field.validate(None))
self.assertRaises(ValidationError, lambda: field.validate(1.0))
def test_keyify(self):
field = Field()
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify(1), 1)
self.assertEqual(field.keyify('foo'), 'foo')
def test_get_set(self):
class Foo(Entity):
field = IntegerField(0)
self.assertIsInstance(Foo.field, Field)
entity = Foo()
self.assertIsInstance(entity.field, int)
entity.field = 1
self.assertEqual(entity.field, 1)
def test_repr(self):
class Foo(Entity):
field = Field()
self.assertEqual(repr(Foo.field), "Field(name='field')")
class TestDynamicField(unittest.TestCase):
def test_make_empty(self):
empty = DynamicField().make_empty()
self.assertIsInstance(empty, object)
class TestBooleanField(unittest.TestCase):
def test_make_empty(self):
empty = BooleanField().make_empty()
self.assertIsInstance(empty, bool)
self.assertEqual(empty, False)
class TestIntegerField(unittest.TestCase):
def test_make_empty(self):
empty = IntegerField().make_empty()
self.assertIsInstance(empty, int)
self.assertEqual(empty, 0)
class TestFloatField(unittest.TestCase):
def test_make_empty(self):
empty = FloatField().make_empty()
self.assertIsInstance(empty, float)
self.assertEqual(empty, 0.0)
class TestStringField(unittest.TestCase):
def test_make_empty(self):
empty = StringField().make_empty()
self.assertIsInstance(empty, basestring)
self.assertEqual(empty, u'')
class TestDateField(unittest.TestCase):
def test_make_empty(self):
empty = DateField().make_empty()
self.assertIsInstance(empty, datetime.date)
class TestTimeField(unittest.TestCase):
def test_make_empty(self):
empty = TimeField().make_empty()
self.assertIsInstance(empty, datetime.datetime)
class TestCollectionField(unittest.TestCase):
def test_make_empty(self):
empty = ListField().make_empty()
self.assertIsInstance(empty, list)
self.assertEqual(len(empty), 0)
def test_validate(self):
field = ListField()
self.assertEqual(field.validate([1, 2.0, '3']), None)
field = ListField(IntegerField())
self.assertEqual(field.validate([1, 2, 3]), None)
self.assertRaises(ValidationError,
lambda: field.validate([1, 2, '3']))
self.assertRaises(MultipleErrors,
lambda: field.validate([1, 2.0, '3']))
field = ListField(IntegerField(), recursive=True)
self.assertEqual(field.validate([1, [2, 3]]), None)
self.assertRaises(ValidationError,
lambda: field.validate([1, [2, '3']]))
self.assertRaises(MultipleErrors,
lambda: field.validate([1, [2.0, '3']]))
def test_keyify(self):
field = ListField()
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify([1, 2, 3]), (1, 2, 3))
field = ListField(ListField())
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify([[1], [2, 3]]), ((1,), (2, 3)))
class TestListField(unittest.TestCase):
def test_make_empty(self):
empty = ListField().make_empty()
self.assertIsInstance(empty, list)
self.assertEqual(len(empty), 0)
class TestSetField(unittest.TestCase):
def test_make_empty(self):
empty = SetField().make_empty()
self.assertIsInstance(empty, set)
self.assertEqual(len(empty), 0)
class TestDictField(unittest.TestCase):
def test_make_empty(self):
empty = DictField().make_empty()
self.assertIsInstance(empty, dict)
self.assertEqual(len(empty), 0)
def test_validate(self):
field = DictField(IntegerField())
self.assertEqual(field.validate({'1': 1}), None)
def test_keyify(self):
field = DictField()
self.assertEqual(field.keyify(None), None)
self.assertEqual(field.keyify({'1': 1, '2': 2}), (('1', 1), ('2', 2)))
field = DictField(DictField())
self.assertEqual(field.keyify(None), None)
self.assertEqual(
field.keyify({'1': {'1.1': 11}, '2': {'2.1': 21}}),
(('1', (('1.1', 11),)), ('2', (('2.1', 21),)))
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "a88c2bb505086cf053150c3fa6e68484",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 78,
"avg_line_length": 29.74074074074074,
"alnum_prop": 0.6141256004269703,
"repo_name": "eseraygun/python-entities",
"id": "94151876369f57fae8a1c19021cd725b707cb11b",
"size": "5621",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/field.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "20752"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
import codecs
import datetime
import os
import re
from stat import *
import sys
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from jinja2.utils import open_if_exists
try:
import markdown
except ImportError:
markdown = None
try:
from textile import textile
except ImportError:
textile = None
import PyRSS2Gen
import conf
# could be better
re_date = re.compile('^(\d{4})\D?(0[1-9]|1[0-2])\D?([12]\d|0[1-9]|3[01])-(.*)$')
template_env = Environment(loader=FileSystemLoader(conf.TEMPLATES_PATH, encoding="utf-8"))
template_env.charset = 'utf-8'
def render_template(template_name, _stream=False, **kwargs):
""" render jinja template """
tmpl = template_env.get_template(template_name)
context = kwargs
if _stream:
return tmpl.stream(context)
return tmpl.render(context)
def relative_url(value):
site_url = conf.SITE_URL
if site_url.endswith('/'):
site_url = site_url[:-1]
return value.split(site_url)[1]
template_env.filters['rel_url'] = relative_url
def source_newer(source, target):
if len(sys.argv) > 1 and sys.argv[1] == "force":
return True
if not os.path.exists(target):
return True
else:
smtime = os.stat(source)[ST_MTIME]
tmtime = os.stat(target)[ST_MTIME]
return smtime > tmtime
def convert_markdown(value):
md = markdown.Markdown(output_format="html")
md.set_output_format('html')
return md.convert(value)
def convert_textile(value):
return textile(value, head_offset=False, encoding='utf-8',
output='utf-8').decode('utf-8')
def rfc3339_date(date):
# iso8601
if date.tzinfo:
return date.strftime('%Y-%m-%dT%H:%M:%S%z')
else:
return date.strftime('%Y-%m-%dT%H:%M:%SZ')
class Site(object):
def __init__(self):
self.sitemap = []
self.feed = []
site_url = conf.SITE_URL
if site_url.endswith('/'):
site_url = site_url[:-1]
self.site_url = site_url
def process_directory(self, current_dir, files, target_path):
files = [f for f in files if os.path.splitext(f)[1] in conf.EXTENSIONS]
blog = None
for f in files:
print "process %s" % f
page = Page(self, f, current_dir, target_path)
if page.is_blog() and f == "index.txt" or f == "archives.txt":
continue
elif page.is_blog():
if blog is None:
blog = Blog(self, current_dir, target_path)
blog.append(page)
continue
if not source_newer(page.finput, page.foutput) and f != "index.txt":
continue
print "write %s" % page.foutput
try:
f = codecs.open(page.foutput, 'w', 'utf-8')
try:
f.write(page.render())
finally:
f.close()
except (IOError, OSError), err:
raise
self.sitemap.append(page)
if blog is not None:
blog.render()
def generate_rss(self):
rss = PyRSS2Gen.RSS2(
title = conf.SITE_NAME,
link = conf.SITE_URL,
description = conf.SITE_DESCRIPTION,
lastBuildDate = datetime.datetime.utcnow(),
items = [])
for i, e in enumerate(self.feed):
item = PyRSS2Gen.RSSItem(
title = e['title'],
link = e['link'],
description = e['description'],
guid = PyRSS2Gen.Guid(e['link']),
pubDate = datetime.datetime.fromtimestamp(e['pubDate']))
rss.items.append(item)
if i == 15: break
rss.write_xml(open(os.path.join(conf.OUTPUT_PATH, "feed.xml"), "w"))
def generate_sitemap(self):
xml = u'<?xml version="1.0" encoding="UTF-8"?>'
xml += u'<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9">'
for page in self.sitemap:
xml += u'<url>'
xml += u'<loc>%s</loc>' % page.url
xml += u'<lastmod>%s</lastmod>' % rfc3339_date(page.headers['published'])
xml += u'<changefreq>daily</changefreq>'
xml += u'<priority>0.5</priority>'
xml += u'</url>'
xml += u'</urlset>'
with codecs.open(os.path.join(conf.OUTPUT_PATH, "sitemaps.xml"), "w", "utf-8") as f:
f.write(xml)
def render(self):
for root, dirs, files in os.walk(conf.INPUT_PATH):
target_path = root.replace(conf.INPUT_PATH, conf.OUTPUT_PATH)
if not os.path.isdir(target_path):
os.makedirs(target_path)
self.process_directory(root, files, target_path)
if self.feed:
self.feed.sort(lambda a, b: a['pubDate'] - b['pubDate'], reverse=True)
self.generate_rss()
if self.sitemap:
self.generate_sitemap()
class Blog(object):
def __init__(self, site, current_dir, target_path):
self.site = site
self.current_dir = current_dir
self.target_path = target_path
self.pages = []
def append(self, page):
paras = [p for p in page.body.split("\n\n") if p]
if paras:
description = "\n\n".join(paras[0:2])
content_type = page.headers.get('content_type', conf.CONTENT_TYPE)
if content_type == "markdown":
description = convert_markdown(description)
elif content_type == "textile":
description = convert_textile(description)
m = re_date.match(os.path.splitext(page.filename)[0])
if m:
date = "%s-%s-%s" % (m.group(1), m.group(2), m.group(3))
else:
date = ""
page.headers['date'] = date
page.headers['description'] = description
self.pages.append(page)
def render(self):
index_page = Page(self.site, "index.txt", self.current_dir,
self.target_path)
try:
archives_page = Page(self.site, "archives.txt", self.current_dir,
self.target_path)
except IOError:
archives_page = None
if not os.path.isfile(index_page.finput):
raise IOError, "index.txt isn't found in %s" % self.current_dir
self.pages.sort(lambda a, b: a.headers['pubDate'] - b.headers['pubDate'], reverse=True)
entries = []
# first pass
for page in self.pages:
entry = {
"title": page.headers.get('title', page.filename),
"description": page.headers['description'],
"link": page.url,
"pubDate": page.headers['pubDate'],
"date": page.headers['date']
}
self.site.feed.append(entry)
entries.append(entry)
self.pages.append(index_page)
if archives_page is not None:
self.pages.append(archives_page)
# second pass : render pages
for page in self.pages:
page.headers['entries'] = entries
try:
f = codecs.open(page.foutput, 'w', 'utf-8')
try:
f.write(page.render())
finally:
f.close()
except (IOError, OSError), err:
raise
self.site.sitemap.append(page)
class Page(object):
content_types = {
'html': 'text/html',
'markdown': 'text/html',
'textile': 'text/html',
'text': 'text/plain'
}
files_ext = {
'html': 'html',
'markdown': 'html',
'textile': 'html',
'text': 'txt'
}
def __init__(self, site, filename, current_dir, target_path):
self.filename = filename
self.current_dir = current_dir
self.target_path = target_path
self.finput = os.path.join(current_dir, filename)
self.parsed = False
self.foutput = ''
self.site = site
self.headers = {}
self.body = ""
self.parse()
def get_url(self):
rel_path = self.foutput.split(conf.OUTPUT_PATH)[1]
if rel_path.startswith('/'):
rel_path = rel_path[1:]
return "/".join([self.site.site_url, rel_path])
def parse(self):
with open(self.finput, 'r') as f:
headers = {}
raw = f.read()
try:
(header_lines,body) = raw.split("\n\n", 1)
for header in header_lines.split("\n"):
(name, value) = header.split(": ", 1)
headers[name.lower()] = unicode(value.strip())
self.headers = headers
self.headers['pubDate'] = os.stat(self.finput)[ST_CTIME]
self.headers['published'] = datetime.datetime.fromtimestamp(self.headers['pubDate'])
self.body = body
content_type = self.headers.get('content_type', conf.CONTENT_TYPE)
if content_type in self.content_types.keys():
self.foutput = os.path.join(self.target_path,
"%s.%s" % (os.path.splitext(self.filename)[0], self.files_ext[content_type]))
self.url = self.get_url()
else:
raise TypeError, "Unknown content_type"
except:
raise TypeError, "Invalid page file format for %s" % self.finput
self.parsed = True
def is_blog(self):
if not 'page_type' in self.headers:
return False
return (self.headers['page_type'] == "blog")
def render(self):
if not self.parsed:
self.parse()
template = self.headers.get('template', conf.DEFAULT_TEMPLATE)
content_type = self.headers.get('content_type', conf.CONTENT_TYPE)
if content_type in self.content_types.keys():
fun = getattr(self, "render_%s" % content_type)
return fun(template)
else:
raise TypeError, "Unknown content_type"
def _render_html(self, template, body):
kwargs = {
"body": body,
"sitename": conf.SITE_NAME,
"siteurl": conf.SITE_URL,
"url": self.url
}
kwargs.update(self.headers)
return render_template(template, **kwargs)
def render_html(self, template):
return self._render_html(template, self.body)
def render_markdown(self, template):
if markdown is None:
raise TypeError, "markdown isn't suported"
body = convert_markdown(self.body)
return self._render_html(template, body)
def render_textile(self, template):
if textile is None:
raise TypeError, "textile isn't suported"
body = convert_textile(self.body)
return self._render_html(template, body)
def render_text(self, template):
return self.body
def main():
site = Site()
site.render()
if __name__ == "__main__":
main()
| {
"content_hash": "f5ee6edb17f380b695fe28758d5595cc",
"timestamp": "",
"source": "github",
"line_count": 345,
"max_line_length": 105,
"avg_line_length": 33.2695652173913,
"alnum_prop": 0.525265725736191,
"repo_name": "arnaudsj/couchdbkit",
"id": "ea538bfaa3e277362cf1787cffda8f352ee7a797",
"size": "12310",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "doc/couchdbkit.org/buildweb.py",
"mode": "33261",
"license": "mit",
"language": [],
"symlink_target": ""
} |
'''
Given integers n and k, find the lexicographically k-th smallest integer in the range from 1 to n.
Note: 1 <= k <= n <= 10^9.
Example:
Input:
n: 13 k: 2
Output:
10
Explanation:
The lexicographical order is [1, 10, 11, 12, 13, 2, 3, 4, 5, 6, 7, 8, 9], so the second smallest number is 10.
'''
'''
A solution explaination:
https://discuss.leetcode.com/topic/64539/java-7ms-denary-trie-tree-solution-with-detailed-explanation
'''
class Solution(object):
def findKthNumber(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
result = 1;
k -= 1
while k > 0:
count = 0
interval = [result, result+1]
while interval[0] <= n:
count += (min(n+1, interval[1]) - interval[0])
interval = [10*interval[0], 10*interval[1]]
# complete tree, move to next branch
if k >= count:
result += 1
k -= count
# incomplete tree, search in subtree
else:
result *= 10
k -= 1
return result
solution = Solution()
print solution.findKthNumber(4289384,1922239)
print solution.findKthNumber(9885387,8786251)
print solution.findKthNumber(13,2)
print solution.findKthNumber(10,3)
| {
"content_hash": "f963815517818907630f624371418e0c",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 110,
"avg_line_length": 24.943396226415093,
"alnum_prop": 0.5597579425113465,
"repo_name": "shub0/algorithm-data-structure",
"id": "2bd661e73d5ec60b0313f2409b19ead6d939eaab",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/k_element_lexicographical_order.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Java",
"bytes": "166293"
},
{
"name": "Python",
"bytes": "487573"
}
],
"symlink_target": ""
} |
from slacker import Slacker
import yaml
config = yaml.load(file('rtmbot.conf', 'r'))
api_token = config["SLACK_TOKEN"]
slack = Slacker(api_token)
# Send a message to #general channel
slack.chat.post_message('#bot_test', 'Hello fellow slackers!', as_user=True)
# Upload a file
slack.files.upload('carrot.png', channels="#bot_test")
| {
"content_hash": "2501b12acf6d742b27a03a45f1ad3d9f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 76,
"avg_line_length": 27.833333333333332,
"alnum_prop": 0.7245508982035929,
"repo_name": "martinpeck/peckbot",
"id": "6b91518d21bcb11e82cd0d47e0249dfffb5b2579",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_file_upload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9398"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'TagTranslation'
db.create_table('aldryn_news_tag_translation', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
('language_code', self.gf('django.db.models.fields.CharField')(max_length=15, db_index=True)),
('master', self.gf('django.db.models.fields.related.ForeignKey')(related_name='translations', null=True, to=orm['aldryn_news.Tag'])),
))
db.send_create_signal('aldryn_news', ['TagTranslation'])
# Adding unique constraint on 'TagTranslation', fields ['language_code', 'master']
db.create_unique('aldryn_news_tag_translation', ['language_code', 'master_id'])
# Adding model 'Tag'
db.create_table('aldryn_news_tag', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=100)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=100)),
))
db.send_create_signal('aldryn_news', ['Tag'])
# Adding model 'TaggedItem'
db.create_table('aldryn_news_taggeditem', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('object_id', self.gf('django.db.models.fields.IntegerField')(db_index=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name=u'aldryn_news_taggeditem_tagged_items', to=orm['contenttypes.ContentType'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='aldryn_news_taggeditem_items', to=orm['aldryn_news.Tag'])),
))
db.send_create_signal('aldryn_news', ['TaggedItem'])
def backwards(self, orm):
# Removing unique constraint on 'TagTranslation', fields ['language_code', 'master']
db.delete_unique('aldryn_news_tag_translation', ['language_code', 'master_id'])
# Deleting model 'TagTranslation'
db.delete_table('aldryn_news_tag_translation')
# Deleting model 'Tag'
db.delete_table('aldryn_news_tag')
# Deleting model 'TaggedItem'
db.delete_table('aldryn_news_taggeditem')
models = {
'aldryn_news.category': {
'Meta': {'ordering': "['ordering']", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'aldryn_news.categorytranslation': {
'Meta': {'unique_together': "[['slug', 'language_code'], ('language_code', 'master')]", 'object_name': 'CategoryTranslation', 'db_table': "'aldryn_news_category_translation'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': "orm['aldryn_news.Category']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'})
},
'aldryn_news.latestnewsplugin': {
'Meta': {'object_name': 'LatestNewsPlugin', 'db_table': "'cmsplugin_latestnewsplugin'", '_ormbases': ['cms.CMSPlugin']},
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'latest_entries': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['taggit.Tag']", 'symmetrical': 'False', 'blank': 'True'})
},
'aldryn_news.news': {
'Meta': {'ordering': "['-publication_start']", 'object_name': 'News'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['aldryn_news.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key_visual': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['filer.Image']", 'null': 'True', 'blank': 'True'}),
'publication_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publication_start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'})
},
'aldryn_news.newstranslation': {
'Meta': {'unique_together': "[['slug', 'language_code'], ('language_code', 'master')]", 'object_name': 'NewsTranslation', 'db_table': "'aldryn_news_news_translation'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'lead_in': ('djangocms_text_ckeditor.fields.HTMLField', [], {}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': "orm['aldryn_news.News']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'aldryn_news.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'aldryn_news.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'aldryn_news_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'aldryn_news_taggeditem_items'", 'to': "orm['aldryn_news.Tag']"})
},
'aldryn_news.tagtranslation': {
'Meta': {'unique_together': "[('language_code', 'master')]", 'object_name': 'TagTranslation', 'db_table': "'aldryn_news_tag_translation'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': "orm['aldryn_news.Tag']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'filer.file': {
'Meta': {'object_name': 'File'},
'_file_size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'folder': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'all_files'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'has_all_mandatory_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'original_filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'owned_files'", 'null': 'True', 'to': "orm['auth.User']"}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_filer.file_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'sha1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.folder': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('parent', 'name'),)", 'object_name': 'Folder'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'modified_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'filer_owned_folders'", 'null': 'True', 'to': "orm['auth.User']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['filer.Folder']"}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'uploaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'})
},
'filer.image': {
'Meta': {'object_name': 'Image', '_ormbases': ['filer.File']},
'_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'default_alt_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'default_caption': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'file_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['filer.File']", 'unique': 'True', 'primary_key': 'True'}),
'must_always_publish_author_credit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'must_always_publish_copyright': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subject_location': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '64', 'null': 'True', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
}
}
complete_apps = ['aldryn_news'] | {
"content_hash": "b1b8cd8af2eab84f703be2742befbe58",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 191,
"avg_line_length": 78.8609865470852,
"alnum_prop": 0.5620948481746844,
"repo_name": "aldryn/aldryn-news",
"id": "0047c5dda061065f1291b609c298893938bcf933",
"size": "17610",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aldryn_news/migrations/0005_auto__add_tagtranslation__add_unique_tagtranslation_language_code_mast.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Gettext Catalog",
"bytes": "14192"
},
{
"name": "HTML",
"bytes": "4680"
},
{
"name": "Python",
"bytes": "175914"
}
],
"symlink_target": ""
} |
"""
Create and delete FILES_PER_THREAD temp files (via tempfile.TemporaryFile)
in each of NUM_THREADS threads, recording the number of successes and
failures. A failure is a bug in tempfile, and may be due to:
+ Trying to create more than one tempfile with the same name.
+ Trying to delete a tempfile that doesn't still exist.
+ Something we've never seen before.
By default, NUM_THREADS == 20 and FILES_PER_THREAD == 50. This is enough to
create about 150 failures per run under Win98SE in 2.0, and runs pretty
quickly. Guido reports needing to boost FILES_PER_THREAD to 500 before
provoking a 2.0 failure under Linux.
"""
NUM_THREADS = 20
FILES_PER_THREAD = 50
import tempfile
from test.test_support import start_threads, run_unittest, import_module
threading = import_module('threading')
import unittest
import StringIO
from traceback import print_exc
startEvent = threading.Event()
class TempFileGreedy(threading.Thread):
error_count = 0
ok_count = 0
def run(self):
self.errors = StringIO.StringIO()
startEvent.wait()
for i in range(FILES_PER_THREAD):
try:
f = tempfile.TemporaryFile("w+b")
f.close()
except:
self.error_count += 1
print_exc(file=self.errors)
else:
self.ok_count += 1
class ThreadedTempFileTest(unittest.TestCase):
def test_main(self):
threads = [TempFileGreedy() for i in range(NUM_THREADS)]
with start_threads(threads, startEvent.set):
pass
ok = sum(t.ok_count for t in threads)
errors = [str(t.getName()) + str(t.errors.getvalue())
for t in threads if t.error_count]
msg = "Errors: errors %d ok %d\n%s" % (len(errors), ok,
'\n'.join(errors))
self.assertEqual(errors, [], msg)
self.assertEqual(ok, NUM_THREADS * FILES_PER_THREAD)
def test_main():
run_unittest(ThreadedTempFileTest)
if __name__ == "__main__":
test_main()
| {
"content_hash": "4e5eddd20d9d77a936fd4400bc863fe0",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 76,
"avg_line_length": 32.09230769230769,
"alnum_prop": 0.6255992329817833,
"repo_name": "Jeff-Tian/mybnb",
"id": "245fdd1c91b1187d27b26af3f8445949eca96079",
"size": "2086",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Python27/Lib/test/test_threadedtempfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "455330"
},
{
"name": "Batchfile",
"bytes": "6263"
},
{
"name": "C",
"bytes": "2304983"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "31815"
},
{
"name": "CSS",
"bytes": "30628"
},
{
"name": "Cucumber",
"bytes": "248616"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "31983"
},
{
"name": "HTML",
"bytes": "376863"
},
{
"name": "JavaScript",
"bytes": "20239"
},
{
"name": "M4",
"bytes": "67848"
},
{
"name": "Makefile",
"bytes": "142926"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "19913027"
},
{
"name": "REXX",
"bytes": "3862"
},
{
"name": "Ruby",
"bytes": "14954382"
},
{
"name": "Shell",
"bytes": "366205"
},
{
"name": "Tcl",
"bytes": "2150972"
},
{
"name": "TeX",
"bytes": "230259"
},
{
"name": "Visual Basic",
"bytes": "494"
},
{
"name": "XSLT",
"bytes": "3736"
},
{
"name": "Yacc",
"bytes": "14342"
}
],
"symlink_target": ""
} |
from anodos.models import Log
from catalog.models import *
class Runner:
name = 'Перерасчет розничных цен и количества'
alias = 'recalculate'
def __init__(self):
self.updater = Updater.objects.take(alias = self.alias, name = self.name)
def run(self):
products = Product.objects.values('id')
for n, product in enumerate(products):
product = Product.objects.get(id = product['id'])
product.recalculate()
if self.test:
print('{} / {}: {}'.format(n+1, len(products), product))
Log.objects.add(subject = "catalog.updater.{}".format(self.updater.alias),
channel = "info",
title = "Updated",
description = "Products: {}.".format('{:,}'.format(len(products)).replace(',', ' ')))
| {
"content_hash": "9233c61f9b2221b8298e22d268d80bdc",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 109,
"avg_line_length": 30.5,
"alnum_prop": 0.550351288056206,
"repo_name": "anodos-ru/catalog",
"id": "67077fb74333a8c66bc26df7631763d262d52bb1",
"size": "887",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "updaters/recalculate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "46988"
},
{
"name": "JavaScript",
"bytes": "76750"
},
{
"name": "Python",
"bytes": "305629"
}
],
"symlink_target": ""
} |
import json
import urllib, urllib2
from keys import BING_API_KEY
def run_query(search_terms):
# Specify the base
root_url = 'https://api.datamarket.azure.com/Bing/Search/'
source = 'Web'
# Specify how many results we wish to be returned per page.
# Offset specifies where in the results list to start from.
# With results_per_page = 10 and offset = 11, this would start from page 2.
results_per_page = 10
offset = 0
# Wrap quotes around our query terms as required by the Bing API.
# The query we will then use is stored within variable query.
query = "'{0}'".format(search_terms)
query = urllib.quote(query)
# Construct the latter part of our request's URL.
# Sets the format of the response to JSON and sets other properties.
search_url = "{0}{1}?$format=json&$top={2}&$skip={3}&Query={4}".format(
root_url,
source,
results_per_page,
offset,
query)
# Setup authentication with the Bing servers.
# The username MUST be a blank string, and put in your API key!
username = ''
# Create a 'password manager' which handles authentication for us.
password_mgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_mgr.add_password(None, search_url, username, BING_API_KEY)
# Create our results list which we'll populate.
results = []
try:
# Prepare for connecting to Bing's servers.
handler = urllib2.HTTPBasicAuthHandler(password_mgr)
opener = urllib2.build_opener(handler)
urllib2.install_opener(opener)
# Connect to the server and read the response generated.
response = urllib2.urlopen(search_url).read()
# Convert the string response to a Python dictionary object.
json_response = json.loads(response)
# Loop through each page returned, populating out results list.
for result in json_response['d']['results']:
results.append({
'title': result['Title'],
'link': result['Url'],
'summary': result['Description']})
# Catch a URLError exception - something went wrong when connecting!
except urllib2.URLError, e:
print "Error when querying the Bing API: ", e
# Return the list of results to the calling function.
return results
def main():
search_terms = raw_input('Search for:')
results = run_query(search_terms)
rank = 1
for result in results[:10]:
print "Rank: ", rank
rank = rank + 1
print "Title: ", result['title']
print "Link: ", result['link']
print ""
if __name__ == '__main__':
main() | {
"content_hash": "cc215da37da859a89b9d9f74ff065ae0",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 79,
"avg_line_length": 33.721518987341774,
"alnum_prop": 0.6358858858858859,
"repo_name": "leifos/tango_with_tests",
"id": "b531757136faf106fd1fca970422c1032c8f386c",
"size": "2664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tango_with_django_project/rango/bing_search.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21513"
},
{
"name": "JavaScript",
"bytes": "992"
},
{
"name": "Python",
"bytes": "173866"
}
],
"symlink_target": ""
} |
"""
GPLv3 license (ASTRA toolbox)
Note that the TomoPhantom package is released under Apache License, Version 2.0
* Script to generate 3D analytical phantoms and their projection data using TomoPhantom
* Projection data is also generated numerically and reconstructed using
* tomobar/ ASTRA TOOLBOX
>>>>> Dependencies (reconstruction): <<<<<
1. ASTRA toolbox: conda install -c astra-toolbox astra-toolbox
2. tomobar: conda install -c dkazanc tomobar
or install from https://github.com/dkazanc/ToMoBAR
@author: Daniil Kazantsev
"""
import timeit
import os
import matplotlib.pyplot as plt
import numpy as np
import tomophantom
from tomophantom import TomoP3D
from tomophantom.supp.qualitymetrics import QualityTools
print ("Building 3D phantom using TomoPhantom software")
tic=timeit.default_timer()
model = 13 # select a model number from the library
N_size = 128 # Define phantom dimensions using a scalar value (cubic phantom)
path = os.path.dirname(tomophantom.__file__)
path_library3D = os.path.join(path, "Phantom3DLibrary.dat")
#This will generate a N_size x N_size x N_size phantom (3D)
phantom_tm = TomoP3D.Model(model, N_size, path_library3D)
toc=timeit.default_timer()
Run_time = toc - tic
print("Phantom has been built in {} seconds".format(Run_time))
sliceSel = int(0.5*N_size)
#plt.gray()
plt.figure()
plt.subplot(131)
plt.imshow(phantom_tm[sliceSel,:,:],vmin=0, vmax=1)
plt.title('3D Phantom, axial view')
plt.subplot(132)
plt.imshow(phantom_tm[:,sliceSel,:],vmin=0, vmax=1)
plt.title('3D Phantom, coronal view')
plt.subplot(133)
plt.imshow(phantom_tm[:,:,sliceSel],vmin=0, vmax=1)
plt.title('3D Phantom, sagittal view')
plt.show()
# Projection geometry related parameters:
Horiz_det = int(2*N_size) # detector column count (horizontal)
Vert_det = N_size # detector row count (vertical) (no reason for it to be > N)
angles_num = int(0.5*np.pi*N_size); # angles number
angles = np.linspace(0.0,179.9,angles_num,dtype='float32') # in degrees
angles_rad = angles*(np.pi/180.0)
#%%
print ("Building 3D analytical projection data with TomoPhantom")
projData3D_analyt= TomoP3D.ModelSino(model, N_size, Horiz_det, Vert_det, angles, path_library3D)
intens_max = 70
sliceSel = int(0.5*N_size)
plt.figure()
plt.subplot(131)
plt.imshow(projData3D_analyt[:,sliceSel,:],vmin=0, vmax=intens_max)
plt.title('2D Projection (analytical)')
plt.subplot(132)
plt.imshow(projData3D_analyt[sliceSel,:,:],vmin=0, vmax=intens_max)
plt.title('Sinogram view')
plt.subplot(133)
plt.imshow(projData3D_analyt[:,:,sliceSel],vmin=0, vmax=intens_max)
plt.title('Tangentogram view')
plt.show()
#%%
print ("Adding noise to projection data")
from tomophantom.supp.artifacts import _Artifacts_
# forming dictionaries with artifact types
_noise_ = {'noise_type' : 'Poisson',
'noise_amplitude' : 10000, # noise amplitude
'noise_seed' : 0}
_stripes_ = {'stripes_percentage' : 1.2,
'stripes_maxthickness' : 3.0,
'stripes_intensity' : 0.25,
'stripes_type' : 'mix',
'stripes_variability' : 0.005}
projData3D_analyt_noisy = _Artifacts_(projData3D_analyt, **_noise_, **_stripes_)
intens_max = np.max(projData3D_analyt_noisy)
sliceSel = int(0.5*N_size)
plt.figure()
plt.subplot(131)
plt.imshow(projData3D_analyt_noisy[:,sliceSel,:],vmin=0, vmax=intens_max)
plt.title('2D noisy Projection (analytical)')
plt.subplot(132)
plt.imshow(projData3D_analyt_noisy[sliceSel,:,:],vmin=0, vmax=intens_max)
plt.title('Noisy sinogram view')
plt.subplot(133)
plt.imshow(projData3D_analyt_noisy[:,:,sliceSel],vmin=0, vmax=intens_max)
plt.title('Noisy tangentogram view')
plt.show()
#%%
print ("Reconstruction using FBP from tomobar")
# initialise tomobar DIRECT reconstruction class ONCE
from tomobar.methodsDIR import RecToolsDIR
RectoolsDIR = RecToolsDIR(DetectorsDimH = Horiz_det, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = Vert_det, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = None, # The Center of Rotation (CoR) scalar
AnglesVec = angles_rad, # array of angles in radians
ObjSize = N_size, # a scalar to define reconstructed object dimensions
device_projector = 'gpu')
recNumerical= RectoolsDIR.FBP(projData3D_analyt_noisy) # FBP reconstruction
sliceSel = int(0.5*N_size)
max_val = 1
#plt.gray()
plt.figure()
plt.subplot(131)
plt.imshow(recNumerical[sliceSel,:,:],vmin=0, vmax=max_val)
plt.title('3D Reconstruction, axial view')
plt.subplot(132)
plt.imshow(recNumerical[:,sliceSel,:],vmin=0, vmax=max_val)
plt.title('3D Reconstruction, coronal view')
plt.subplot(133)
plt.imshow(recNumerical[:,:,sliceSel],vmin=0, vmax=max_val)
plt.title('3D Reconstruction, sagittal view')
plt.show()
# calculate errors
Qtools = QualityTools(phantom_tm, recNumerical)
RMSE = Qtools.rmse()
print("Root Mean Square Error is {}".format(RMSE))
#%%
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
print ("Reconstructing with FISTA-OS method using tomobar")
print ("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%")
# initialise tomobar ITERATIVE reconstruction class ONCE
from tomobar.methodsIR import RecToolsIR
Rectools = RecToolsIR(DetectorsDimH = Horiz_det, # DetectorsDimH # detector dimension (horizontal)
DetectorsDimV = Vert_det, # DetectorsDimV # detector dimension (vertical) for 3D case only
CenterRotOffset = 0.0, # Center of Rotation (CoR) scalar (for 3D case only)
AnglesVec = angles_rad, # array of angles in radians
ObjSize = N_size, # a scalar to define reconstructed object dimensions
datafidelity='LS',# data fidelity, choose LS, PWLS (wip), GH (wip), Student (wip)
device_projector='gpu')
# prepare dictionaries with parameters:
_data_ = {'projection_norm_data' : projData3D_analyt_noisy,
'OS_number' : 10} # data dictionary
lc = Rectools.powermethod(_data_) # calculate Lipschitz constant (run once to initialise)
# Run FISTA-OS reconstrucion algorithm without regularisation
_algorithm_ = {'iterations' : 18,
'lipschitz_const' : lc}
# adding regularisation using the CCPi regularisation toolkit
_regularisation_ = {'method' : 'PD_TV',
'regul_param' : 0.0005,
'iterations' : 80,
'device_regulariser': 'gpu'}
RecFISTA_os_reg = Rectools.FISTA(_data_, _algorithm_, _regularisation_)
sliceSel = int(0.5*N_size)
max_val = 1
plt.figure()
plt.subplot(131)
plt.imshow(RecFISTA_os_reg[sliceSel,:,:],vmin=0, vmax=max_val)
plt.title('3D FISTA Reconstruction, axial view')
plt.subplot(132)
plt.imshow(RecFISTA_os_reg[:,sliceSel,:],vmin=0, vmax=max_val)
plt.title('3D FISTA Reconstruction, coronal view')
plt.subplot(133)
plt.imshow(RecFISTA_os_reg[:,:,sliceSel],vmin=0, vmax=max_val)
plt.title('3D FISTA Reconstruction, sagittal view')
plt.show()
#%% | {
"content_hash": "9ed4dbbf36cd8c99d63658f41e27745c",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 111,
"avg_line_length": 37.70967741935484,
"alnum_prop": 0.6928999144568007,
"repo_name": "dkazanc/TomoPhantom",
"id": "dc737a7b52357e206b9a6391155a91f0c22974de",
"size": "7061",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Demos/Python/3D/ReconASTRA3D_artifacts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "456"
},
{
"name": "C",
"bytes": "246720"
},
{
"name": "CMake",
"bytes": "13381"
},
{
"name": "Cython",
"bytes": "42752"
},
{
"name": "MATLAB",
"bytes": "12438"
},
{
"name": "Python",
"bytes": "53351"
},
{
"name": "Shell",
"bytes": "1697"
}
],
"symlink_target": ""
} |
__docformat__ = 'reStructuredText'
# Import Psyco if available
try:
import psyco
psyco.full()
except ImportError:
pass
import sys
import os
import tempfile
import re
import string
import types
from os.path import abspath, dirname, expanduser, join
from urlparse import urljoin, urlparse, urlunparse
from copy import copy, deepcopy
from optparse import OptionParser
import logging
from docutils.languages import get_language
import docutils.readers.doctree
import docutils.core
import docutils.nodes
from docutils.parsers.rst import directives
import pygments_code_block_directive # code-block directive
from reportlab.platypus import *
from reportlab.platypus.flowables import _listWrapOn, _Container
#from reportlab.lib.enums import *
#from reportlab.lib.units import *
#from reportlab.lib.pagesizes import *
from flowables import * # our own reportlab flowables
import flowables
from svgimage import SVGImage
from math_directive import math_node
from math_flowable import Math
from aafigure_directive import Aanode
from log import log, nodeid
from pprint import pprint
from smartypants import smartyPants
from roman import toRoman
# Is this really the best unescape in the stdlib for '&' => '&'????
from xml.sax.saxutils import unescape, escape
import config
from cStringIO import StringIO
#def escape (x,y):
# "Dummy escape function to test for excessive escaping"
# return x
from utils import log, parseRaw
import styles as sty
HAS_PIL = True
try:
from PIL import Image as PILImage
except ImportError:
try:
import Image as PILImage
except ImportError:
log.warning("Support for images other than JPG,"
" is now limited. Please install PIL.")
HAS_PIL = False
try:
from PythonMagick import Image as PMImage
HAS_MAGICK = True
except ImportError:
HAS_MAGICK = False
try:
import wordaxe
from wordaxe.rl.paragraph import Paragraph
from wordaxe.rl.styles import ParagraphStyle, getSampleStyleSheet
# PyHnjHyphenator is broken for non-ascii characters, so
# let's not use it and avoid useless crashes (http://is.gd/19efQ)
#from wordaxe.PyHnjHyphenator import PyHnjHyphenator
# If basehyphenator doesn't load, wordaxe is broken
# pyhyphenator and DCW *may* not load.
from wordaxe.BaseHyphenator import BaseHyphenator
try:
from wordaxe.plugins.PyHyphenHyphenator \
import PyHyphenHyphenator
except:
pass
try:
from wordaxe.DCWHyphenator import DCWHyphenator
except:
pass
except ImportError:
# log.warning("No support for hyphenation, install wordaxe")
HAS_WORDAXE = False
else:
HAS_WORDAXE = True
try:
import sphinx
HAS_SPHINX = True
except ImportError:
HAS_SPHINX = False
# These are to suppress repeated messages
unkn_elem=set()
unkn_text=set()
class RstToPdf(object):
def __init__(self, stylesheets=[], language=None,
header=None,
footer=None,
inlinelinks=False,
breaklevel=1,
font_path=[],
style_path=[],
fit_mode='shrink',
sphinx=False,
smarty='0',
baseurl=None,
repeat_table_rows=False,
footnote_backlinks=True,
inline_footnotes=False,
def_dpi=300,
show_frame=False,
highlightlang='python', #This one is only used by sphinx
basedir=os.getcwd(),
splittables=False,
blank_first_page=False,
breakside='odd'
):
global HAS_SPHINX
self.debugLinesPdf=False
self.depth=0
self.breakside=breakside
self.blank_first_page=blank_first_page
self.splittables=splittables
self.basedir=basedir
self.language = language
self.doc_title = ""
self.doc_author = ""
self.header = header
self.footer = footer
self.decoration = {'header': header,
'footer': footer,
'endnotes': []}
# find base path
if hasattr(sys, 'frozen'):
self.PATH = abspath(dirname(sys.executable))
else:
self.PATH = abspath(dirname(__file__))
self.font_path=font_path
self.style_path=style_path
self.def_dpi=def_dpi
self.loadStyles(stylesheets)
self.docutils_languages = {}
self.inlinelinks = inlinelinks
self.breaklevel = breaklevel
self.fit_mode = fit_mode
self.to_unlink = []
self.smarty = smarty
self.baseurl = baseurl
self.repeat_table_rows = repeat_table_rows
self.footnote_backlinks = footnote_backlinks
self.inline_footnotes = inline_footnotes
self.def_dpi = def_dpi
self.show_frame = show_frame
self.img_dir = os.path.join(abspath(dirname(__file__)), 'images')
# Sorry about this, but importing sphinx.roles makes some
# ordinary documents fail (demo.txt specifically) so
# I can' t just try to import it outside. I need
# to do it only if it's requested
if HAS_SPHINX and sphinx:
import sphinx.roles
self.highlightlang = highlightlang
else:
HAS_SPHINX = False
directives.register_directive('code-block', pygments_code_block_directive.code_block_directive)
if not self.styles.languages:
self.styles.languages=[]
if self.language:
self.styles.languages.append(self.language)
self.styles['bodytext'].language = self.language
# Load the docutils language modules for all required languages
for lang in self.styles.languages:
try:
self.docutils_languages[lang] = get_language(lang)
except ImportError:
try:
self.docutils_languages[lang] = \
get_language(lang.split('_', 1)[0])
except ImportError:
log.warning("Can't load Docutils module \
for language %s", lang)
# Load the hyphenators for all required languages
if HAS_WORDAXE:
for lang in self.styles.languages:
if lang.split('_', 1)[0] == 'de':
try:
wordaxe.hyphRegistry[lang] = DCWHyphenator('de', 5)
continue
except Exception:
# hyphenators may not always be available or crash,
# e.g. wordaxe issue 2809074 (http://is.gd/16lqs)
log.warning("Can't load wordaxe DCW hyphenator "
"for German language, trying Py hyphenator instead")
else:
continue
try:
wordaxe.hyphRegistry[lang] = PyHyphenHyphenator(lang)
except Exception:
log.warning("Can't load wordaxe Py hyphenator"
" for language %s, trying base hyphenator", lang)
else:
continue
try:
wordaxe.hyphRegistry[lang] = BaseHyphenator(lang)
except Exception:
log.warning("Can't even load wordaxe base hyphenator")
log.info('hyphenation by default in %s , loaded %s',
self.styles['bodytext'].language,
','.join(self.styles.languages))
self.pending_targets=[]
self.targets=[]
def loadStyles(self, styleSheets=None ):
if styleSheets is None:
styleSheets=[]
self.styles = sty.StyleSheet(styleSheets,
self.font_path,
self.style_path,
def_dpi=self.def_dpi)
def size_for_image_node(self, node):
imgname = os.path.join(self.basedir,str(node.get("uri")))
scale = float(node.get('scale', 100))/100
# Figuring out the size to display of an image is ... annoying.
# If the user provides a size with a unit, it's simple, adjustUnits
# will return it in points and we're done.
# However, often the unit wil be "%" (specially if it's meant for
# HTML originally. In which case, we will use a percentage of
# the containing frame.
# Find the image size in pixels:
kind = 'direct'
xdpi, ydpi = self.styles.def_dpi, self.styles.def_dpi
extension = imgname.split('.')[-1].lower()
if extension in [
"ai", "ccx", "cdr", "cgm", "cmx",
"sk1", "sk", "svg", "xml", "wmf", "fig"]:
iw, ih = SVGImage(imgname).wrap(0, 0)
# These are in pt, so convert to px
iw = iw * xdpi / 72
ih = ih * ydpi / 72
elif extension == 'pdf':
try:
from pyPdf import pdf
except:
log.warning('PDF images are not supported without pypdf [%s]', nodeid(node))
return 0, 0, 'direct'
reader = pdf.PdfFileReader(open(imgname))
x1, y1, x2, y2 = reader.getPage(0)['/MediaBox']
# These are in pt, so convert to px
iw = float((x2-x1) * xdpi / 72)
ih = float((y2-y1) * ydpi / 72)
else:
if HAS_PIL:
img = PILImage.open(imgname)
iw, ih = img.size
xdpi, ydpi = img.info.get('dpi', (xdpi, ydpi))
elif HAS_MAGICK:
img = PMImage(imgname)
iw = img.size().width()
ih = img.size().height()
# FIXME: need to figure out how to get the DPI
# xdpi, ydpi=img.density().???
else:
log.warning("Sizing images without PIL "
"or PythonMagick, using 100x100 [%s]"
, nodeid(node))
iw, ih = 100., 100.
# Try to get the print resolution from the image itself via PIL.
# If it fails, assume a DPI of 300, which is pretty much made up,
# and then a 100% size would be iw*inch/300, so we pass
# that as the second parameter to adjustUnits
#
# Some say the default DPI should be 72. That would mean
# the largest printable image in A4 paper would be something
# like 480x640. That would be awful.
#
w = node.get('width')
if w is not None:
# In this particular case, we want the default unit
# to be pixels so we work like rst2html
if w[-1] == '%':
kind = 'percentage_of_container'
w=int(w[:-1])
else:
# This uses default DPI setting because we
# are not using the image's "natural size"
# this is what LaTeX does, according to the
# docutils mailing list discussion
w = self.styles.adjustUnits(w, self.styles.tw,
default_unit='px')
else:
log.warning("Using image %s without specifying size."
"Calculating based on image size at %ddpi [%s]",
imgname, xdpi, nodeid(node))
# No width specified at all, use w in px
w = iw*inch/xdpi
h = node.get('height')
if h is not None and h[-1] != '%':
h = self.styles.adjustUnits(h, ih*inch/ydpi)
else:
# Now, often, only the width is specified!
# if we don't have a height, we need to keep the
# aspect ratio, or else it will look ugly
if h and h[-1]=='%':
log.error('Setting height as a percentage does **not** work. '\
'ignoring height parameter [%s]', nodeid(node))
h = w*ih/iw
# Apply scale factor
w = w*scale
h = h*scale
# And now we have this probably completely bogus size!
log.info("Image %s size calculated: %fcm by %fcm [%s]",
imgname, w/cm, h/cm, nodeid(node))
return w, h, kind
def style_language(self, style):
"""Return language corresponding to this style."""
try:
return style.language
except AttributeError:
pass
try:
return self.styles['bodytext'].language
except AttributeError:
# FIXME: this is pretty arbitrary, and will
# probably not do what you want.
# however, it should only happen if:
# * You specified the language of a style
# * Have no wordaxe installed.
# Since it only affects hyphenation, and wordaxe is
# not installed, t should have no effect whatsoever
return os.environ['LANG'] or 'en'
def text_for_label(self, label, style):
"""Translate text for label."""
try:
text = self.docutils_languages[self.style_language(style)]\
.labels[label]
except KeyError:
text = label.capitalize()
return text + ":"
def text_for_bib_field(self, field, style):
"""Translate text for bibliographic fields."""
try:
text = self.docutils_languages[self.style_language(style)]\
.bibliographic_fields[field]
except KeyError:
text = field
return text + ":"
def author_separator(self, style):
"""Return separator string for authors."""
try:
sep = self.docutils_languages[self.style_language(style)]\
.author_separators[0]
except KeyError:
sep = ';'
return sep + " "
def styleToFont(self, style):
'''Takes a style name, returns a font tag for it, like
"<font face=helvetica size=14 color=red>". Used for inline
nodes (custom interpreted roles)'''
try:
s = self.styles[style]
bc = s.backColor
if bc:
r = '<font face="%s" size="%d" color="#%s" backColor="#%s">'\
%(s.fontName, s.fontSize,
s.textColor.hexval()[2:], bc.hexval()[2:])
else:
r = '<font face="%s" size="%d" color="#%s">' % (
s.fontName, s.fontSize, s.textColor.hexval()[2:])
return r
except KeyError:
log.warning('Unknown class %s', style)
return None
def gather_pdftext(self, node, replaceEnt=True):
return ''.join([self.gen_pdftext(n, replaceEnt)
for n in node.children])
def gen_pdftext(self, node, replaceEnt=True):
pre = ""
post = ""
log.debug("self.gen_pdftext: %s", node.__class__)
log.debug("[%s]", nodeid(node))
try:
log.debug("self.gen_pdftext: %s", node)
except (UnicodeDecodeError, UnicodeEncodeError):
log.debug("self.gen_pdftext: %r", node)
#########################################################
# SPHINX nodes
#########################################################
if HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_signature):
node.pdftext = self.gather_pdftext(node)
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.module):
node.pdftext = self.gather_pdftext(node)
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_addname):
pre = self.styleToFont("descclassname")
post = "</font>"
node.pdftext = pre+self.gather_pdftext(node)+post
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_name):
pre = self.styleToFont("descname")
post = "</font>"
node.pdftext = pre+self.gather_pdftext(node)+post
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_returns):
pre = self.styleToFont("returns")
post = "</font>"
node.pdftext=' → ' + pre+ self.gather_pdftext(node) + post
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_type):
pre = self.styleToFont("desctype")
post = "</font>"
node.pdftext = pre+self.gather_pdftext(node)+post
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_parameterlist):
pre=' ('
post=')'
t=self.gather_pdftext(node)
while t and t[0] in ', ':
t=t[1:]
node.pdftext = pre+t+post
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_parameter):
if node.hasattr('noemph'):
pre = ', '
post = ''
else:
pre = ', <i>'
post = '</i>'
pre += self.styleToFont("descparameter")
post = "</font>"+post
node.pdftext = pre+self.gather_pdftext(node)+post
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_optional):
pre =self.styleToFont("optional")+'[</font>, '
post = self.styleToFont("optional")+']</font>'
t=self.gather_pdftext(node)
while t and t[0]in ', ':
t=t[1:]
node.pdftext = pre+t+post
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.desc_annotation):
node.pdftext = '<i>%s</i>'%self.gather_pdftext(node)
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.pending_xref):
node.pdftext = self.gather_pdftext(node)
#########################################################
# End of SPHINX nodes
#########################################################
elif isinstance(node, (docutils.nodes.paragraph,
docutils.nodes.title, docutils.nodes.subtitle)):
pre=''
targets=set(node.get('ids',[])+self.pending_targets)
self.pending_targets=[]
for _id in targets:
if _id not in self.targets:
pre+='<a name="%s"/>'%(_id)
self.targets.append(_id)
node.pdftext = pre+self.gather_pdftext(node) + "\n"
elif isinstance(node, docutils.nodes.Text):
node.pdftext = node.astext()
if replaceEnt:
node.pdftext = escape(node.pdftext)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.strong):
pre = "<b>"
post = "</b>"
node.pdftext = self.gather_pdftext(node)
#if replaceEnt:
# node.pdftext=escape(node.pdftext,True)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.emphasis):
pre = "<i>"
post = "</i>"
node.pdftext = self.gather_pdftext(node)
#if replaceEnt:
# node.pdftext=escape(node.pdftext,True)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.literal):
pre = '<font face="%s">' % self.styles['literal'].fontName
post = "</font>"
if not self.styles['literal'].hyphenation:
pre = '<nobr>' + pre
post += '</nobr>'
node.pdftext = self.gather_pdftext(node)
#if replaceEnt:
# node.pdftext=escape(node.pdftext,True)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.superscript):
pre = '<super>'
post = "</super>"
node.pdftext = self.gather_pdftext(node)
#if replaceEnt:
#node.pdftext = escape(node.pdftext, True)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.subscript):
pre = '<sub>'
post = "</sub>"
node.pdftext = self.gather_pdftext(node)
#if replaceEnt:
#node.pdftext = escape(node.pdftext, True)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.title_reference):
pre = self.styleToFont("title_reference")
post = "</font>"
node.pdftext = self.gather_pdftext(node)
# Fix issue 134
#if replaceEnt:
#node.pdftext = escape(node.pdftext, True)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.reference):
uri = node.get('refuri')
if uri:
if self.baseurl: # Need to join the uri with the base url
uri = urljoin(self.baseurl, uri)
if urlparse(uri)[0] and self.inlinelinks:
# external inline reference
post = u' (%s)' % uri
else:
# A plain old link
pre += u'<a href="%s" color="%s">' %\
(uri, self.styles.linkColor)
post = '</a>' + post
else:
uri = node.get('refid')
if uri:
pre += u'<a href="#%s" color="%s">' %\
(uri, self.styles.linkColor)
post = '</a>' + post
node.pdftext = self.gather_pdftext(node)
#if replaceEnt:
# node.pdftext=escape(node.pdftext,True)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, (docutils.nodes.option_string,
docutils.nodes.option_argument)):
node.pdftext = node.astext()
if replaceEnt:
node.pdftext = escape(node.pdftext)
elif isinstance(node, (docutils.nodes.header, docutils.nodes.footer)):
node.pdftext = self.gather_pdftext(node)
if replaceEnt:
node.pdftext = escape(node.pdftext)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, (docutils.nodes.system_message,
docutils.nodes.problematic)):
pre = '<font color="red">'
post = "</font>"
node.pdftext = self.gather_pdftext(node)
if replaceEnt:
node.pdftext = escape(node.pdftext)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.generated):
node.pdftext = self.gather_pdftext(node)
if replaceEnt:
node.pdftext = escape(node.pdftext)
node.pdftext = pre + node.pdftext + post
elif isinstance(node, docutils.nodes.image):
# First see if the image file exists, or else,
# use image-missing.png
uri=node.get('uri')
if not os.path.exists(uri):
log.error("Missing image file: %s [%s]", uri, nodeid(node))
uri=os.path.join(self.img_dir, 'image-missing.png')
w, h = 1*cm, 1*cm
else:
w, h, kind = self.size_for_image_node(node)
alignment=node.get('align', 'CENTER').lower()
if alignment in ('top', 'middle', 'bottom'):
align='valign="%s"'%alignment
else:
align=''
node.pdftext = '<img src="%s" width="%f" height="%f" %s/>'%\
(uri, w, h, align)
elif isinstance(node, math_node):
mf = Math(node.math_data)
w, h = mf.wrap(0, 0)
descent = mf.descent()
img = mf.genImage()
self.to_unlink.append(img)
node.pdftext = '<img src="%s" width=%f height=%f valign=%f/>' % (
img, w, h, -descent)
elif isinstance(node, docutils.nodes.footnote_reference):
# TODO: when used in Sphinx, all footnotes are autonumbered
anchors=''
for i in node['ids']:
if i not in self.targets:
anchors+='<a name="%s"/>' % i
self.targets.append(i)
node.pdftext = u'%s<super><a href="%s" color="%s">%s</a></super>'%\
(anchors, '#' + node.astext(),
self.styles.linkColor, node.astext())
elif isinstance(node, docutils.nodes.citation_reference):
anchors=''
for i in node['ids']:
if i not in self.targets:
anchors +='<a name="%s"/>' % i
self.targets.append(i)
node.pdftext = u'%s[<a href="%s" color="%s">%s</a>]'%\
(anchors, '#' + node.astext(),
self.styles.linkColor, node.astext())
elif isinstance(node, docutils.nodes.target):
if node['ids'][0] not in self.targets:
pre = u'<a name="%s"/>' % node['ids'][0]
self.targets.append(node['ids'][0])
node.pdftext = self.gather_pdftext(node)
if replaceEnt:
node.pdftext = escape(node.pdftext)
node.pdftext = pre + node.pdftext
elif isinstance(node, docutils.nodes.inline):
ftag = self.styleToFont(node['classes'][0])
if ftag:
node.pdftext = "%s%s</font>"%\
(ftag, self.gather_pdftext(node))
else:
node.pdftext = self.gather_pdftext(node)
elif isinstance(node,docutils.nodes.literal_block):
node.pdftext = self.gather_pdftext(node)
else:
# With sphinx you will get hundreds of these
#if not HAS_SPHINX:
cln=str(node.__class__)
if not cln in unkn_text:
unkn_text.add(cln)
log.warning("Unkn. node (self.gen_pdftext): %s [%s]",
node.__class__, nodeid(node))
try:
log.debug(node)
except (UnicodeDecodeError, UnicodeEncodeError):
log.debug(repr(node))
node.pdftext = self.gather_pdftext(node)
try:
log.debug("self.gen_pdftext: %s" % node.pdftext)
except UnicodeDecodeError:
pass
# Try to be clever about when to use smartypants
if node.__class__ in (docutils.nodes.paragraph,
docutils.nodes.block_quote, docutils.nodes.title):
node.pdftext = smartyPants(node.pdftext, self.smarty)
return node.pdftext
def gen_elements(self, node, style=None):
#pprint (dir(node))
#try:
#print node.line
#print node.source
#except:
#pass
#print '------------'
log.debug("gen_elements: %s", node.__class__)
log.debug("[%s]", nodeid(node))
try:
log.debug("gen_elements: %s", node)
except (UnicodeDecodeError, UnicodeEncodeError):
log.debug("gen_elements: %r", node)
# set anchors for internal references
try:
for i in node['ids']:
self.pending_targets.append(i)
except TypeError: #Happens with docutils.node.Text
pass
try:
if node['classes'] and node['classes'][0]:
# FIXME: Supports only one class, sorry ;-)
if self.styles.StyleSheet.has_key(node['classes'][0]):
style = self.styles[node['classes'][0]]
else:
log.info("Unknown class %s, ignoring. [%s]",
node['classes'][0], nodeid(node))
except TypeError: # Happens when a docutils.node.Text reaches here
pass
if style is None or style == self.styles['bodytext']:
style = self.styles.styleForNode(node)
if isinstance(node, docutils.nodes.document):
node.elements = self.gather_elements(node, style=style)
elif HAS_SPHINX and isinstance(node, (sphinx.addnodes.glossary,
sphinx.addnodes.start_of_file)):
node.elements = self.gather_elements(node, style=style)
elif HAS_SPHINX and isinstance(node, (sphinx.addnodes.index)):
try:
self.pending_targets.append(node['entries'][0][2])
except IndexError:
if node['entries']:
log.error("Can't process index entry: %s [%s]",
node['entries'], nodeid(node))
node.elements = []
elif isinstance(node, math_node):
node.elements = [Math(node.math_data)]
#######################
## Tables
#######################
elif isinstance(node, docutils.nodes.table):
node.elements = [Spacer(0, self.styles['table'].spaceBefore)] + \
self.gather_elements(node, style=style) +\
[Spacer(0, self.styles['table'].spaceAfter)]
elif isinstance(node, docutils.nodes.tgroup):
rows = []
colWidths = []
hasHead = False
headRows = 0
for n in node.children:
if isinstance(n, docutils.nodes.thead):
hasHead = True
for row in n.children:
r = []
for cell in row.children:
r.append(cell)
rows.append(r)
headRows = len(rows)
elif isinstance(n, docutils.nodes.tbody):
for row in n.children:
r = []
for cell in row.children:
r.append(cell)
rows.append(r)
elif isinstance(n, docutils.nodes.colspec):
colWidths.append(int(n['colwidth']))
# colWidths are in no specific unit, really. Maybe ems.
# Convert them to %
colWidths=map(int, colWidths)
tot=sum(colWidths)
colWidths=["%s%%"%((100.*w)/tot) for w in colWidths]
if 'colWidths' in style.__dict__:
colWidths[:len(style.colWidths)]=style.colWidths
spans = self.filltable(rows)
data = []
cellStyles = []
rowids = range(0, len(rows))
for row, i in zip(rows, rowids):
r = []
j = 0
for cell in row:
if isinstance(cell, str):
r.append("")
else:
# I honestly have no idea what the next line does
# (Roberto Alsina, May 25th, 2009)
ell = self.gather_elements(cell, style=
i < headRows and self.styles['table-heading'] \
or style)
#if len(ell) == 1:
# Experiment: if the cell has a single element,
# extract its class and use it for the cell.
# That way, you can have cells with specific
# background colors, at least.
#
# Experiment killed ;-)
# You can do that and more using table styles now!
#try:
#cellStyles += \
#self.styles.pStyleToTStyle(ell[0].style,
#j, i)
## Fix for issue 85: only do it if it has a style.
#except AttributeError:
#pass
r.append(ell)
j += 1
data.append(r)
st = TableStyle(spans)
if 'commands' in self.styles['table'].__dict__:
for cmd in self.styles['table'].commands:
st.add(*cmd)
if 'commands' in style.__dict__:
for cmd in style.commands:
st.add(*cmd)
for cmd in cellStyles:
st.add(*cmd)
if hasHead:
for cmd in self.styles.tstyleHead(headRows):
st.add(*cmd)
rtr = self.repeat_table_rows
node.elements = [DelayedTable(data, colWidths, st, rtr)]
elif isinstance(node, docutils.nodes.title):
# Special cases: (Not sure this is right ;-)
if isinstance(node.parent, docutils.nodes.document):
node.elements = [Paragraph(self.gen_pdftext(node),
self.styles['title'])]
self.doc_title = unicode(self.gen_pdftext(node)).strip()
elif isinstance(node.parent, docutils.nodes.topic):
node.elements = [Paragraph(self.gen_pdftext(node),
self.styles['topic-title'])]
elif isinstance(node.parent, docutils.nodes.Admonition):
node.elements = [Paragraph(self.gen_pdftext(node),
self.styles['admonition-title'])]
elif isinstance(node.parent, docutils.nodes.table):
node.elements = [Paragraph(self.gen_pdftext(node),
self.styles['table-title'])]
elif isinstance(node.parent, docutils.nodes.sidebar):
node.elements = [Paragraph(self.gen_pdftext(node),
self.styles['sidebar-title'])]
else:
# Section/Subsection/etc.
text = self.gen_pdftext(node)
fch = node.children[0]
if isinstance(fch, docutils.nodes.generated) and \
fch['classes'] == ['sectnum']:
snum = fch.astext()
else:
snum = None
key = node.get('refid')
maxdepth=4
if reportlab.Version > '2.1':
maxdepth=6
# The parent ID is the refid + an ID to make it unique for Sphinx
parent_id=(node.parent.get('ids', [None]) or [None])[0]+u'-'+unicode(id(node))
node.elements = [ Heading(text,
self.styles['heading%d'%min(self.depth, maxdepth)],
level=self.depth-1,
parent_id=parent_id,
node=node
)]
if self.depth <= self.breaklevel:
node.elements.insert(0, MyPageBreak(breakTo=self.breakside))
elif isinstance(node, docutils.nodes.subtitle):
if isinstance(node.parent, docutils.nodes.sidebar):
node.elements = [Paragraph(self.gen_pdftext(node),
self.styles['sidebar-subtitle'])]
elif isinstance(node.parent, docutils.nodes.document):
node.elements = [Paragraph(self.gen_pdftext(node),
self.styles['subtitle'])]
elif HAS_SPHINX and isinstance(node,
sphinx.addnodes.compact_paragraph):
node.elements = self.gather_elements(node, style=style)
elif HAS_SPHINX and isinstance(node,sphinx.addnodes.module):
node.elements = [Reference('module-'+node['modname'])]
elif isinstance(node, docutils.nodes.paragraph):
node.elements = [Paragraph(self.gen_pdftext(node), style)]
elif isinstance(node, docutils.nodes.docinfo):
# A docinfo usually contains several fields.
# We'll render it as a series of elements, one field each.
node.elements = self.gather_elements(node, style=style)
elif isinstance(node, docutils.nodes.field):
# A field has two child elements, a field_name and a field_body.
# We render as a two-column table, left-column is right-aligned,
# bold, and much smaller
fn = Paragraph(self.gather_pdftext(node.children[0]) + ":",
style=self.styles['fieldname'])
fb = self.gen_elements(node.children[1],
style=self.styles['fieldvalue'])
t_style=TableStyle(self.styles['field_list'].commands)
node.elements = [DelayedTable([[fn, fb]], style=t_style,
colWidths=self.styles['field_list'].colWidths)]
elif isinstance(node, docutils.nodes.decoration):
node.elements = self.gather_elements(node, style=style)
elif isinstance(node, docutils.nodes.header):
self.decoration['header'] = self.gather_elements(node,
style=self.styles['header'])
node.elements = []
elif isinstance(node, docutils.nodes.footer):
self.decoration['footer'] = self.gather_elements(node,
style=self.styles['footer'])
node.elements = []
elif isinstance(node, docutils.nodes.author):
if isinstance(node.parent, docutils.nodes.authors):
# Is only one of multiple authors. Return a paragraph
node.elements = [Paragraph(self.gather_pdftext(node),
style=style)]
if self.doc_author:
self.doc_author += self.author_separator(style=style) \
+ node.astext().strip()
else:
self.doc_author = node.astext().strip()
else:
# A single author: works like a field
fb = self.gather_pdftext(node)
t_style=TableStyle(self.styles['field_list'].commands)
colWidths=map(self.styles.adjustUnits,
self.styles['field_list'].colWidths)
node.elements = [Table(
[[Paragraph(self.text_for_label("author", style),
style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)]
self.doc_author = node.astext().strip()
elif isinstance(node, docutils.nodes.authors):
# Multiple authors. Create a two-column table.
# Author references on the right.
t_style=TableStyle(self.styles['field_list'].commands)
colWidths = self.styles['field_list'].colWidths
td = [[Paragraph(self.text_for_label("authors", style),
style=self.styles['fieldname']),
self.gather_elements(node, style=style)]]
node.elements = [DelayedTable(td, style=t_style,
colWidths=colWidths)]
elif isinstance(node, docutils.nodes.organization):
fb = self.gather_pdftext(node)
t_style=TableStyle(self.styles['field_list'].commands)
colWidths=self.styles['field_list'].colWidths
label=self.text_for_label("organization", style)
t = DelayedTable([[Paragraph(label, style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.contact):
fb = self.gather_pdftext(node)
t_style=TableStyle(self.styles['field_list'].commands)
colWidths= self.styles['field_list'].colWidths
label=self.text_for_label("contact", style)
t = DelayedTable([[Paragraph(label, style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.address):
fb = self.gather_pdftext(node)
t_style=TableStyle(self.styles['field_list'].commands)
colWidths= self.styles['field_list'].colWidths
label=self.text_for_label("address", style)
t = DelayedTable([[Paragraph(label, style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.version):
fb = self.gather_pdftext(node)
t_style=TableStyle(self.styles['field_list'].commands)
colWidths= self.styles['field_list'].colWidths
label=self.text_for_label("version", style)
t = DelayedTable([[Paragraph(label, style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.revision):
fb = self.gather_pdftext(node)
t_style=TableStyle(self.styles['field_list'].commands)
colWidths=map(self.styles.adjustUnits,
self.styles['field_list'].colWidths)
label=self.text_for_label("revision", style)
t = Table([[Paragraph(label, style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.status):
fb = self.gather_pdftext(node)
t_style=TableStyle(self.styles['field_list'].commands)
colWidths= self.styles['field_list'].colWidths
label=self.text_for_label("status", style)
t = DelayedTable([[Paragraph(label, style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.date):
fb = self.gather_pdftext(node)
t_style = TableStyle(self.styles['field_list'].commands)
colWidths = self.styles['field_list'].colWidths
label = self.text_for_label("date", style)
t = DelayedTable([[Paragraph(label, style = self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths = colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.copyright):
fb = self.gather_pdftext(node)
t_style = TableStyle(self.styles['field_list'].commands)
colWidths = self.styles['field_list'].colWidths
label = self.text_for_label("copyright", style)
t = DelayedTable([[Paragraph(label, style=self.styles['fieldname']),
Paragraph(fb, style)]],
style=t_style, colWidths=colWidths)
node.elements = [t]
elif isinstance(node, docutils.nodes.topic):
# toc
node_classes = node.attributes.get('classes', [])
if 'contents' in node_classes:
toc_visitor = TocBuilderVisitor(node.document)
if 'local' in node_classes:
toc_visitor.toc = MyTableOfContents(parent=node.parent)
else:
toc_visitor.toc = MyTableOfContents(parent=None)
toc_visitor.toc.linkColor = self.styles.linkColor
node.walk(toc_visitor)
toc = toc_visitor.toc
toc.levelStyles=[self.styles['toc%d'%l] for l in range(1,15)]
for s in toc.levelStyles:
# FIXME: awful slimy hack!
s.__class__=reportlab.lib.styles.ParagraphStyle
## Issue 117: add extra TOC levelStyles.
## 9-deep should be enough.
#for i in range(4):
#ps = toc.levelStyles[-1].__class__(name='Level%d'%(i+5),
#parent=toc.levelStyles[-1],
#leading=toc.levelStyles[-1].leading,
#firstlineIndent=toc.levelStyles[-1].firstLineIndent,
#leftIndent=toc.levelStyles[-1].leftIndent+1*cm)
#toc.levelStyles.append(ps)
## Override fontnames (defaults to Times-Roman)
#for levelStyle in toc.levelStyles:
#levelStyle.__dict__['fontName'] = \
#self.styles['tableofcontents'].fontName
if 'local' in node_classes:
node.elements = [toc]
else:
node.elements = \
[Paragraph(self.gen_pdftext(node.children[0]),
self.styles['heading1']), toc]
else:
node.elements = self.gather_elements(node, style=style)
elif isinstance(node, docutils.nodes.field_body):
node.elements = self.gather_elements(node, style=style)
elif isinstance(node, docutils.nodes.section):
self.depth+=1
node.elements = self.gather_elements(node)
self.depth-=1
elif isinstance(node, docutils.nodes.bullet_list):
node._bullSize = self.styles["enumerated_list_item"].leading
node.elements = self.gather_elements(node,
style=self.styles["bullet_list_item"])
s = self.styles["bullet_list"]
if s.spaceBefore:
node.elements.insert(0, Spacer(0, s.spaceBefore))
if s.spaceAfter:
node.elements.append(Spacer(0, s.spaceAfter))
elif isinstance(node, (docutils.nodes.definition_list,
docutils.nodes.option_list)):
node.elements = self.gather_elements(node, style=style)
elif isinstance(node, docutils.nodes.field_list):
node.elements = [Spacer(0,self.styles['field_list'].spaceBefore)]+\
self.gather_elements(node, style=style)
elif isinstance(node, docutils.nodes.enumerated_list):
node._bullSize = self.styles["enumerated_list_item"].leading*\
max([len(self.bullet_for_node(x)[0]) for x in node.children])
node.elements = self.gather_elements(node,
style = self.styles["enumerated_list_item"])
s = self.styles["enumerated_list"]
if s.spaceBefore:
node.elements.insert(0, Spacer(0, s.spaceBefore))
if s.spaceAfter:
node.elements.append(Spacer(0, s.spaceAfter))
elif isinstance(node, docutils.nodes.definition):
node.elements = self.gather_elements(node,
style = self.styles["definition"])
elif isinstance(node, docutils.nodes.option_list_item):
optext = ', '.join([self.gather_pdftext(child)
for child in node.children[0].children])
desc = self.gather_elements(node.children[1], style)
t_style = TableStyle(self.styles['option_list'].commands)
colWidths = self.styles['option_list'].colWidths
node.elements = [DelayedTable([[self.PreformattedFit(
optext, self.styles["literal"]), desc]], style = t_style,
colWidths = colWidths)]
elif isinstance(node, docutils.nodes.definition_list_item):
# I need to catch the classifiers here
tt = []
dt = []
ids = []
for n in node.children:
if isinstance(n, docutils.nodes.term):
for i in n['ids']: # Used by sphinx glossary lists
if i not in self.targets:
ids.append('<a name="%s"/>' % i)
self.targets.append(i)
tt.append(self.styleToFont("definition_list_term")
+ self.gather_pdftext(n) + "</font>")
elif isinstance(n, docutils.nodes.classifier):
tt.append(self.styleToFont("definition_list_classifier")
+ self.gather_pdftext(n) + "</font>")
else:
dt.extend(self.gen_elements(n, style))
node.elements = [Paragraph(''.join(ids)+' : '.join(tt),
self.styles['definition_list_term']),
MyIndenter(left=10)] + dt + [MyIndenter(left=-10)]
elif isinstance(node, docutils.nodes.list_item):
el = self.gather_elements(node, style=style)
b, t = self.bullet_for_node(node)
# FIXME: this is really really not good code
if not el:
el = [Paragraph(u"<nobr>\xa0</nobr>", self.styles["bodytext"])]
# FIXME: use different unicode bullets depending on b
if b and b in "*+-":
b = u'\u2022'
bStyle = copy(style)
bStyle.alignment = 2
if t == 'bullet':
st=self.styles['bullet_list']
item_st=self.styles['bullet_list_item']
else:
st=self.styles['item_list']
item_st=self.styles['item_list_item']
idx=node.parent.children.index(node)
if idx>0: # Not the first item
sb=item_st.spaceBefore
else:
sb=0
if (idx+1)<len(node.parent.children): #Not the last item
sa=item_st.spaceAfter
else:
sa=0
t_style = TableStyle(st.commands)
#colWidths = map(self.styles.adjustUnits,
#self.styles['item_list'].colWidths)
colWidths = st.colWidths
if self.splittables:
node.elements = [Spacer(0,sb),
SplitTable([[Paragraph(b, style = bStyle), el]],
style = t_style,
colWidths = colWidths),
Spacer(0,sa)
]
else:
node.elements = [Spacer(0,sb),
DelayedTable([[Paragraph(b, style = bStyle), el]],
style = t_style,
colWidths = colWidths),
Spacer(0,sa)
]
elif isinstance(node, docutils.nodes.transition):
node.elements = [Separation()]
elif isinstance(node, (docutils.nodes.system_message,
docutils.nodes.problematic)):
# FIXME show the error in the document, red, whatever
# log.warning("Problematic node %s", node.astext())
node.elements = []
elif isinstance(node, docutils.nodes.block_quote):
# This should work, but doesn't look good inside of
# table cells (see Issue 173)
#node.elements = [MyIndenter(left=self.styles['blockquote'].leftIndent)]\
#+ self.gather_elements( node, style) + \
#[MyIndenter(left=-self.styles['blockquote'].leftIndent)]
# Workaround for Issue 173 using tables
leftIndent=self.styles['blockquote'].leftIndent
rightIndent=self.styles['blockquote'].rightIndent
spaceBefore=self.styles['blockquote'].spaceBefore
spaceAfter=self.styles['blockquote'].spaceAfter
data=[['',self.gather_elements( node, style)]]
if self.splittables:
node.elements=[Spacer(0,spaceBefore),SplitTable(data,
colWidths=[leftIndent,None],
style=TableStyle([["TOPPADDING",[0,0],[-1,-1],0],
["LEFTPADDING",[0,0],[-1,-1],0],
["RIGHTPADDING",[0,0],[-1,-1],rightIndent],
["BOTTOMPADDING",[0,0],[-1,-1],0],
])), Spacer(0,spaceAfter)]
else:
node.elements=[Spacer(0,spaceBefore),DelayedTable(data,
colWidths=[leftIndent,None],
style=TableStyle([["TOPPADDING",[0,0],[-1,-1],0],
["LEFTPADDING",[0,0],[-1,-1],0],
["RIGHTPADDING",[0,0],[-1,-1],rightIndent],
["BOTTOMPADDING",[0,0],[-1,-1],0],
])), Spacer(0,spaceAfter)]
elif isinstance(node, docutils.nodes.attribution):
node.elements = [
Paragraph(self.gather_pdftext(node),
self.styles['attribution'])]
elif isinstance(node, docutils.nodes.comment):
# Class that generates no output
node.elements = []
elif isinstance(node, docutils.nodes.line_block):
if isinstance(node.parent,docutils.nodes.line_block):
qstyle = copy(style)
qstyle.leftIndent += self.styles.adjustUnits("1.5em")
else:
qstyle = copy(self.styles['lineblock'])
# Fix Issue 225: no space betwen line in a lineblock, but keep
# space before the lineblock itself
qstyle.spaceBefore=0
node.elements = [Spacer(0,self.styles['lineblock'].spaceBefore)]+self.gather_elements(node, style=qstyle)
elif isinstance(node, docutils.nodes.line):
# All elements in one line
node.elements = [Paragraph(self.gather_pdftext(node),
style=style)]
elif isinstance(node, (docutils.nodes.literal_block,
docutils.nodes.doctest_block)):
node.elements = [self.PreformattedFit(
self.gather_pdftext(node, replaceEnt = True),
self.styles['code'])]
elif isinstance(node, docutils.nodes.image):
# FIXME: handle class,target,alt, check align
imgname = os.path.join(self.basedir,str(node.get("uri")))
if not os.path.exists(imgname):
log.error("Missing image file: %s [%s]",imgname, nodeid(node))
imgname = os.path.join(self.img_dir, 'image-missing.png')
w, h, kind = 1*cm, 1*cm, 'direct'
else:
w, h, kind = self.size_for_image_node(node)
extension = imgname.split('.')[-1].lower()
if extension in (
'ai', 'ccx', 'cdr', 'cgm', 'cmx', 'fig',
'sk1', 'sk', 'svg', 'xml', 'wmf'):
node.elements = [SVGImage(filename=imgname,
height=h,
width=w,
kind=kind)]
elif extension == 'pdf':
try:
#import rlextra.pageCatcher.pageCatcher as pageCatcher
raise Exception("Broken")
node.elements = \
[pageCatcher.PDFImageFlowable(imgname, w, h)]
except:
log.warning("Proper PDF images require "\
"pageCatcher (but doesn't work yet) [%s]",
nodeid(node))
if HAS_MAGICK:
# w,h are in pixels. I need to set the density
# of the image to the right dpi so this
# looks decent
img = PMImage()
img.density("%s"%self.styles.def_dpi)
img.read(imgname)
_, tmpname = tempfile.mkstemp(suffix='.png')
img.write(tmpname)
self.to_unlink.append(tmpname)
node.elements = [MyImage(filename=tmpname,
height=h,
width=w,
kind=kind)]
else:
log.warning("Minimal PDF image support "\
"requires PythonMagick [%s]", nodeid(node))
imgname = os.path.join(self.img_dir, 'image-missing.png')
w, h, kind = 1*cm, 1*cm, 'direct'
elif not HAS_PIL and HAS_MAGICK and extension != 'jpg':
# Need to convert to JPG via PythonMagick
img = PMImage(imgname)
_, tmpname = tempfile.mkstemp(suffix='.jpg')
img.write(tmpname)
self.to_unlink.append(tmpname)
node.elements = [MyImage(filename=tmpname, height=h, width=w,
kind=kind)]
elif HAS_PIL or extension == 'jpg':
node.elements = [MyImage(filename=imgname, height=h, width=w,
kind=kind)]
else:
# No way to make this work
log.error('To use a %s image you need PIL installed [%s]',extension, nodeid(node))
node.elements = []
if node.elements:
i = node.elements[0]
alignment = node.get('align', 'CENTER').upper()
if alignment in ('LEFT', 'CENTER', 'RIGHT'):
i.hAlign = alignment
# Image flowables don't support valign (makes no sense for them?)
# elif alignment in ('TOP','MIDDLE','BOTTOM'):
# i.vAlign = alignment
elif isinstance(node, docutils.nodes.figure):
sub_elems = self.gather_elements(node, style=None)
node.elements = [BoxedContainer(sub_elems, style)]
elif isinstance(node, docutils.nodes.caption):
node.elements = [Paragraph(self.gather_pdftext(node),
style=self.styles['figure-caption'])]
elif isinstance(node, docutils.nodes.legend):
node.elements = self.gather_elements(node,
style=self.styles['figure-legend'])
elif isinstance(node, docutils.nodes.sidebar):
node.elements = [BoxedContainer(self.gather_elements(node,
style=None),
self.styles['sidebar'])]
elif isinstance(node, docutils.nodes.rubric):
# Sphinx uses a rubric as footnote container
if HAS_SPHINX and len(node.children) == 1 \
and node.children[0].astext() == 'Footnotes':
node.elements=[Separation(),]
else:
node.elements = [Paragraph(self.gather_pdftext(node),
self.styles['rubric'])]
elif isinstance(node, docutils.nodes.compound):
# FIXME think if this is even implementable
node.elements = self.gather_elements(node, style)
elif isinstance(node, docutils.nodes.container):
# FIXME think if this is even implementable
node.elements = self.gather_elements(node, style)
elif isinstance(node, docutils.nodes.substitution_definition):
node.elements = []
elif isinstance(node, docutils.nodes.tbody):
rows = [self.gen_elements(n) for n in node.children]
t = []
for r in rows:
if not r:
continue
t.append(r)
t_style = TableStyle(self.styles['table'].commands)
colWidths = self.styles['table'].colWidths
node.elements = [DelayedTable(t, style=t_style, colWidths=colWidths)]
elif isinstance(node, (docutils.nodes.footnote,
docutils.nodes.citation)):
# It seems a footnote contains a label and a series of elements
ltext = self.gather_pdftext(node.children[0])
if len(node['backrefs']) > 1 and self.footnote_backlinks:
backrefs = []
i = 1
for r in node['backrefs']:
backrefs.append('<a href="#%s" color="%s">%d</a>' % (
r, self.styles.linkColor, i))
i += 1
backrefs = '(%s)' % ', '.join(backrefs)
if ltext not in self.targets:
label = Paragraph('<a name="%s"/>%s'%(ltext,
ltext + backrefs),
self.styles["normal"])
self.targets.append(ltext)
elif len(node['backrefs'])==1 and self.footnote_backlinks:
if ltext not in self.targets:
label = Paragraph('<a name="%s"/>'\
'<a href="%s" color="%s">%s</a>' % (
ltext,
node['backrefs'][0],
self.styles.linkColor,
ltext), self.styles["normal"])
self.targets.append(ltext)
else:
if ltext not in self.targets:
label = Paragraph('<a name="%s"/>%s' % (ltext, ltext),
self.styles["normal"])
self.targets.append(ltext)
contents = self.gather_elements(node, style)[1:]
if self.inline_footnotes:
st=self.styles['endnote']
t_style = TableStyle(st.commands)
colWidths = self.styles['endnote'].colWidths
node.elements = [Spacer(0, st.spaceBefore),
DelayedTable([[label, contents]],
style=t_style, colWidths=colWidths),
Spacer(0, st.spaceAfter)]
else:
self.decoration['endnotes'].append([label, contents])
node.elements = []
elif isinstance(node, docutils.nodes.label):
node.elements = [Paragraph(self.gather_pdftext(node), style)]
elif isinstance(node, docutils.nodes.Text):
node.elements = [Paragraph(self.gather_pdftext(node), style)]
elif isinstance(node, docutils.nodes.entry):
node.elements = self.gather_elements(node, style)
elif isinstance(node, docutils.nodes.target):
if 'refid' in node:
self.pending_targets.append(node['refid'])
node.elements = self.gather_elements(node, style)
elif isinstance(node, docutils.nodes.reference):
node.elements = self.gather_elements(node, style)
elif isinstance(node, docutils.nodes.raw):
# Not really raw, but what the heck
if node.get('format','NONE').lower()=='pdf':
node.elements = parseRaw(str(node.astext()))
else:
node.elements = []
elif isinstance(node, docutils.nodes.citation):
node.elements = []
elif isinstance(node, Aanode):
style_options = {
'font': self.styles['aafigure'].fontName,
}
node.elements = [node.gen_flowable(style_options)]
# custom SPHINX nodes.
# FIXME: make sure they are all here, and keep them all together
elif HAS_SPHINX and isinstance(node, sphinx.addnodes.centered):
node.elements=[Paragraph(self.gather_pdftext(node),
self.styles['centered'])]
elif HAS_SPHINX and isinstance(node, sphinx.addnodes.desc):
st=self.styles[node['desctype']]
if st==self.styles['normal']:
st=copy(self.styles['desc'])
st.spaceBefore=0
pre=[Spacer(0,self.styles['desc'].spaceBefore)]
node.elements = pre+\
self.gather_elements(node, st)
elif HAS_SPHINX and isinstance(node, sphinx.addnodes.desc_signature):
# Need to add ids as targets, found this when using one of the
# django docs extensions
targets=[i.replace(' ','') for i in node['ids']]
pre=''
for i in targets:
if i not in self.targets:
pre+='<a name="%s" />'% i
self.targets.append(i)
node.elements = [Paragraph(pre+self.gather_pdftext(node),style)]
elif HAS_SPHINX and isinstance(node, sphinx.addnodes.desc_content):
node.elements = [MyIndenter(left=10)] +\
self.gather_elements(node, self.styles["definition"]) +\
[MyIndenter(left=-10)]
# These are here because sphinx.addnodes.desc inherits Admonition
elif isinstance(node, (docutils.nodes.attention,
docutils.nodes.caution, docutils.nodes.danger,
docutils.nodes.error, docutils.nodes.hint,
docutils.nodes.important, docutils.nodes.note,
docutils.nodes.tip, docutils.nodes.warning,
docutils.nodes.Admonition)):
if node.children and isinstance(node.children[0], docutils.nodes.title):
title=[]
else:
title= [Paragraph(node.tagname.title(),
style=self.styles['%s-heading'%node.tagname])]
rows=title + self.gather_elements(node, style=style)
st=self.styles[node.tagname]
if 'commands' in dir(st):
t_style = TableStyle(st.commands)
else:
t_style = TableStyle()
t_style.add("ROWBACKGROUNDS", [0, 0], [-1, -1],[st.backColor])
t_style.add("BOX", [ 0, 0 ], [ -1, -1 ], st.borderWidth , st.borderColor)
if self.splittables:
node.elements = [Spacer(0,st.spaceBefore),
SplitTable([['',rows]],
style=t_style,
colWidths=[0,None],
padding=st.borderPadding),
Spacer(0,st.spaceAfter)]
else:
padding, p1, p2, p3, p4=tablepadding(padding=st.borderPadding)
t_style.add(*p1)
t_style.add(*p2)
t_style.add(*p3)
t_style.add(*p4)
node.elements = [Spacer(0,st.spaceBefore),
DelayedTable([['',rows]],
style=t_style,
colWidths=[0,None]),
Spacer(0,st.spaceAfter)]
else:
# With sphinx you will have hundreds of these
#if not HAS_SPHINX:
cln=str(node.__class__)
if not cln in unkn_elem:
unkn_elem.add(cln)
log.error("Unkn. node (gen_elements): %s [%s]",
str(node.__class__), nodeid(node))
# Why fail? Just log it and do our best.
node.elements = self.gather_elements(node, style)
# Make all the sidebar cruft unreachable
#if style.__dict__.get('float','None').lower() !='none':
#node.elements=[Sidebar(node.elements,style)]
#elif 'width' in style.__dict__:
if 'width' in style.__dict__:
node.elements = [BoundByWidth(style.width,
node.elements, style, mode="shrink")]
if node.line and self.debugLinesPdf:
node.elements.insert(0,TocEntry(self.depth-1,'LINE-%s'%node.line))
return node.elements
def gather_elements(self, node, style=None):
if style is None:
style = self.styles.styleForNode(node)
r = []
if 'float' in style.__dict__:
style = None # Don't pass floating styles to children!
for n in node.children:
# import pdb; pdb.set_trace()
r.extend(self.gen_elements(n, style=style))
return r
def bullet_for_node(self, node):
"""Takes a node, assumes it's some sort of
item whose parent is a list, and
returns the bullet text it should have"""
b = ""
t = 'item'
if node.parent.get('start'):
start = int(node.parent.get('start'))
else:
start = 1
if node.parent.get('bullet') or \
isinstance(node.parent, docutils.nodes.bullet_list):
b = node.parent.get('bullet','*')
if b == "None":
b = ""
t = 'bullet'
elif node.parent.get('enumtype')=='arabic':
b = str(node.parent.children.index(node) + start) + '.'
elif node.parent.get('enumtype') == 'lowerroman':
b = toRoman(node.parent.children.index(node) + start).lower() + '.'
elif node.parent.get('enumtype') == 'upperroman':
b = toRoman(node.parent.children.index(node) + start).upper() + '.'
elif node.parent.get('enumtype') == 'loweralpha':
b = string.lowercase[node.parent.children.index(node)
+ start - 1] + '.'
elif node.parent.get('enumtype') == 'upperalpha':
b = string.uppercase[node.parent.children.index(node)
+ start - 1] + '.'
else:
log.critical("Unknown kind of list_item %s [%s]",
node.parent, nodeid(node))
return b, t
def filltable(self, rows):
"""
Takes a list of rows, consisting of cells and performs the following fixes:
* For multicolumn cells, add continuation cells, to make all rows the same
size.
* For multirow cell, insert continuation cells, to make all columns the
same size.
* If there are still shorter rows, add empty cells at the end (ReST quirk)
* Once the table is *normalized*, create spans list, fitting for reportlab's
Table class.
"""
# If there is a multicol cell, we need to insert Continuation Cells
# to make all rows the same length
for y in range(0, len(rows)):
for x in range(0, len(rows[y])):
cell = rows[y][x]
if isinstance(cell, str):
continue
if cell.get("morecols"):
for i in range(0, cell.get("morecols")):
rows[y].insert(x + 1, "")
for y in range(0, len(rows)):
for x in range(0, len(rows[y])):
cell = rows[y][x]
if isinstance(cell, str):
continue
if cell.get("morerows"):
for i in range(0, cell.get("morerows")):
rows[y + i + 1].insert(x, "")
# If a row is shorter, add empty cells at the right end
maxw = max([len(r) for r in rows])
for r in rows:
while len(r) < maxw:
r.append("")
# Create spans list for reportlab's table style
spans = []
for y in range(0, len(rows)):
for x in range(0, len(rows[y])):
cell = rows[y][x]
if isinstance(cell, str):
continue
if cell.get("morecols"):
mc = cell.get("morecols")
else:
mc = 0
if cell.get("morerows"):
mr = cell.get("morerows")
else:
mr = 0
if mc or mr:
spans.append(('SPAN', (x, y), (x + mc, y + mr)))
return spans
def PreformattedFit(self, text, style):
"""Preformatted section that gets horizontally compressed if needed."""
# Pass a ridiculous size, then it will shrink to what's available
# in the frame
return BoundByWidth(2000*cm,
content=[XPreformatted(text, style)],
mode=self.fit_mode, style=style)
def createPdf(self, text=None,
source_path=None,
output=None,
doctree=None,
compressed=False,
# This adds entries to the PDF TOC
# matching the rst source lines
debugLinesPdf=False):
"""Create a PDF from text (ReST input),
or doctree (docutil nodes) and save it in outfile.
If outfile is a string, it's a filename.
If it's something with a write method, (like a StringIO,
or a file object), the data is saved there.
"""
self.decoration = {'header': self.header,
'footer': self.footer,
'endnotes': []}
self.pending_targets=[]
self.targets=[]
self.debugLinesPdf = debugLinesPdf
if doctree is None:
if text is not None:
if self.language:
settings_overrides={'language_code': self.language[:2]}
else:
settings_overrides={}
self.doctree = docutils.core.publish_doctree(text,
source_path=source_path,
settings_overrides=settings_overrides)
log.debug(self.doctree)
else:
log.error('Error: createPdf needs a text or a doctree')
return
else:
self.doctree = doctree
elements = self.gen_elements(self.doctree)
if self.blank_first_page:
elements.insert(0,PageBreak())
# Put the endnotes at the end ;-)
endnotes = self.decoration['endnotes']
if endnotes:
elements.append(Spacer(1, 2*cm))
elements.append(Separation())
for n in self.decoration['endnotes']:
t_style = TableStyle(self.styles['endnote'].commands)
colWidths = self.styles['endnote'].colWidths
elements.append(DelayedTable([[n[0], n[1]]],
style=t_style, colWidths=colWidths))
head = self.decoration['header']
foot = self.decoration['footer']
# So, now, create the FancyPage with the right sizes and elements
FP = FancyPage("fancypage", head, foot, self.styles,
smarty=self.smarty, show_frame=self.show_frame)
pdfdoc = FancyDocTemplate(
output,
pageTemplates=[FP],
showBoundary=0,
pagesize=self.styles.ps,
title=self.doc_title,
author=self.doc_author,
pageCompression=compressed)
while True:
try:
log.info("Starting build")
pdfdoc.multiBuild(elements)
break
except ValueError, v:
# FIXME: cross-document links come through here, which means
# an extra pass per cross-document reference. Which sucks.
if v.args and str(v.args[0]).startswith('format not resolved'):
missing=str(v.args[0]).split(' ')[-1]
log.error('Adding missing reference to %s and rebuilding. This is slow!'%missing)
elements.append(Reference(missing))
for e in elements:
if hasattr(e,'_postponed'):
delattr(e,'_postponed')
else:
raise
#doc = SimpleDocTemplate("phello.pdf")
#doc.build(elements)
for fn in self.to_unlink:
os.unlink(fn)
class TocBuilderVisitor(docutils.nodes.SparseNodeVisitor):
def __init__(self, document):
docutils.nodes.SparseNodeVisitor.__init__(self, document)
self.toc = None
# For some reason, when called via sphinx,
# .. contents:: ends up trying to call
# visitor.document.reporter.debug
# so we need a valid document here.
self.document=docutils.utils.new_document('')
def visit_reference(self, node):
refid = node.attributes.get('refid')
if refid:
self.toc.refids.append(refid)
class FancyDocTemplate(BaseDocTemplate):
#def multiBuild(self, story,
#filename=None,
#canvasmaker=canvas.Canvas,
#maxPasses = 10):
#"""Makes multiple passes until all indexing flowables
#are happy."""
#self._indexingFlowables = []
##scan the story and keep a copy
#for thing in story:
#if thing.isIndexing():
#self._indexingFlowables.append(thing)
##better fix for filename is a 'file' problem
#self._doSave = 0
#passes = 0
#mbe = []
#self._multiBuildEdits = mbe.append
#while 1:
#s=story[-202].style
#for n in dir(s):
#if not n.startswith('_'):
#print n,eval('s.parent.%s'%n)
#print '----------------------'
#passes += 1
#log.info('Pass number %d'%passes)
#for fl in self._indexingFlowables:
#fl.beforeBuild()
## work with a copy of the story, since it is consumed
#tempStory = story[:]
#self.build(tempStory, filename, canvasmaker)
##self.notify('debug',None)
#for fl in self._indexingFlowables:
#fl.afterBuild()
#happy = self._allSatisfied()
#if happy:
#self._doSave = 0
#self.canv.save()
#break
#else:
#self.canv.save()
#f=open('pass-%d.pdf'%passes,'wb')
#f.seek(0)
#f.truncate()
#f.write(self.filename.getvalue())
#self.filename = StringIO()
#if passes > maxPasses:
## Don't fail, just say that the indexes may be wrong
#log.error("Index entries not resolved after %d passes" % maxPasses)
#break
##work through any edits
#while mbe:
#e = mbe.pop(0)
#e[0](*e[1:])
#del self._multiBuildEdits
#if verbose: print 'saved'
def afterFlowable(self, flowable):
if isinstance(flowable, Heading):
# Notify TOC entry for headings/abstracts/dedications.
level, text = flowable.level, flowable.text
parent_id = flowable.parent_id
node = flowable.node
pagenum = setPageCounter()
self.notify('TOCEntry', (level, text, pagenum, parent_id, node))
#FIXME: these should not be global, but look at issue 126
head = None
foot = None
_counter=0
_counterStyle='arabic'
class PageCounter(Flowable):
def __init__(self, number=0, style='arabic'):
self.style=str(style).lower()
self.number=int(number)
Flowable.__init__(self)
def drawOn(self, canvas, x, y, _sW):
global _counter, _counterStyle
_counterStyle=self.style
_counter=self.number
flowables.PageCounter = PageCounter
def setPageCounter(counter=None, style=None):
global _counter, _counterStyle
if counter is not None:
_counter = counter
if style is not None:
_counterStyle = style
if _counterStyle=='lowerroman':
ptext=toRoman(_counter).lower()
elif _counterStyle=='roman':
ptext=toRoman(_counter).upper()
elif _counterStyle=='alpha':
ptext=string.uppercase[_counter%26]
elif _counterStyle=='loweralpha':
ptext=string.lowercase[_counter%26]
else:
ptext=unicode(_counter)
return ptext
class FancyPage(PageTemplate):
""" A page template that handles changing layouts.
"""
def __init__(self, _id, _head, _foot, styles, smarty="0", show_frame=False):
global head, foot
self.styles = styles
head = _head
foot = _foot
self.smarty = smarty
self.show_frame = show_frame
PageTemplate.__init__(self, _id, [])
def beforeDrawPage(self, canv, doc):
"""Do adjustments to the page according to where we are in the document.
* Gutter margins on left or right as needed
"""
#from pudb import set_trace; set_trace()
global head, foot, _counter, _counterStyle
self.tw = self.styles.pw - self.styles.lm -\
self.styles.rm - self.styles.gm
# What page template to use?
tname = canv.__dict__.get('templateName',
self.styles.firstTemplate)
self.template = self.styles.pageTemplates[tname]
canv.templateName=tname
doct = getattr(canv, '_doctemplate', None)
canv._doctemplate = None # to make _listWrapOn work
if doc.page==1:
_counter=0
_counterStyle='arabic'
_counter+=1
# Adjust text space accounting for header/footer
_head = self.template.get('showHeader', True) and (
head or self.template.get('defaultHeader'))
if _head:
if isinstance(_head, list):
_head = _head[:]
else:
_head = [Paragraph(_head, self.styles['header'])]
_, self.hh = _listWrapOn(_head, self.tw, canv)
else:
self.hh = 0
_foot = self.template.get('showFooter', True) and (
foot or self.template.get('defaultFooter'))
if _foot:
if isinstance(_foot, list):
_foot = _foot[:]
else:
_foot = [Paragraph(_foot, self.styles['footer'])]
_, self.fh = _listWrapOn(_foot, self.tw, canv)
else:
self.fh = 0
canv._doctemplate = doct
self.hx = self.styles.lm
self.hy = self.styles.ph - self.styles.tm -self.hh
self.fx = self.styles.lm
self.fy = self.styles.bm
self.th = self.styles.ph - self.styles.tm - \
self.styles.bm - self.hh - self.fh - \
self.styles.ts - self.styles.bs
# Adjust gutter margins
if doc.page % 2: # Left page
x1 = self.styles.lm
else: # Right page
x1 = self.styles.lm + self.styles.gm
y1 = self.styles.bm + self.fh + self.styles.bs
# If there is a background parameter for this page Template, draw it
if 'background' in self.template:
if self.template['background'].split('.')[-1].lower() in [
"ai", "ccx", "cdr", "cgm", "cmx",
"sk1", "sk", "svg", "xml", "wmf", "fig"]:
bg = SVGImage(self.template['background'],
self.styles.pw, self.styles.ph)
else:
bg = Image(self.template['background'],
self.styles.pw, self.styles.ph)
bg.drawOn(canv, 0, 0)
self.frames = []
for frame in self.template['frames']:
self.frames.append(SmartFrame(self,
self.styles.adjustUnits(frame[0], self.tw) + x1,
self.styles.adjustUnits(frame[1], self.th) + y1,
self.styles.adjustUnits(frame[2], self.tw),
self.styles.adjustUnits(frame[3], self.th),
showBoundary=self.show_frame))
canv.firstSect=True
canv._pagenum=doc.page
for frame in self.frames:
frame._pagenum=doc.page
def replaceTokens(self, elems, canv, doc):
"""Put doc_title/page number/etc in text of header/footer."""
# Make sure page counter is up to date
pnum=setPageCounter()
for e in elems:
i = elems.index(e)
if isinstance(e, Paragraph):
text = e.text
if not isinstance(text, unicode):
try:
text = unicode(text, e.encoding)
except AttributeError:
text = unicode(text, 'utf-8')
text = text.replace(u'###Page###', pnum)
text = text.replace(u"###Title###", doc.title)
text = text.replace(u"###Section###",
getattr(canv, 'sectName', ''))
text = text.replace(u"###SectNum###",
getattr(canv, 'sectNum', ''))
text = smartyPants(text, self.smarty)
elems[i] = Paragraph(text, e.style)
def afterDrawPage(self, canv, doc):
"""Draw header/footer."""
# Adjust for gutter margin
log.info('Page %s [%s]'%(_counter,doc.page))
if doc.page % 2: # Left page
hx = self.hx
fx = self.fx
else: # Right Page
hx = self.hx + self.styles.gm
fx = self.fx + self.styles.gm
_head = self.template.get('showHeader', True) and (
head or self.template.get('defaultHeader'))
if _head:
_head = copy(_head)
if isinstance(_head, list):
_head = _head[:]
else:
_head = [Paragraph(_head, self.styles['header'])]
self.replaceTokens(_head, canv, doc)
container = _Container()
container._content = _head
container.width = self.tw
container.height = self.hh
container.drawOn(canv, hx, self.hy)
_foot = self.template.get('showFooter', True) and (
foot or self.template.get('defaultFooter'))
if _foot:
_foot = copy(_foot)
if isinstance(_foot, list):
_foot = _foot[:]
else:
_foot = [Paragraph(_foot, self.styles['footer'])]
self.replaceTokens(_foot, canv, doc)
container = _Container()
container._content = _foot
container.width = self.tw
container.height = self.fh
container.drawOn(canv, fx, self.fy)
def parse_commandline():
parser = OptionParser()
parser.add_option('--config', dest='configfile', metavar='FILE',
help='Config file to use. Default=~/.rst2pdf/config')
parser.add_option('-o', '--output', dest='output', metavar='FILE',
help='Write the PDF to FILE')
def_ssheets = ','.join([expanduser(p) for p in
config.getValue("general", "stylesheets", "").split(',')])
parser.add_option('-s', '--stylesheets', dest='style',
type='string', action='append',
metavar='STYLESHEETS', default=[def_ssheets],
help='A comma-separated list of custom stylesheets.'\
' Default="%s"' % def_ssheets)
def_sheetpath = os.pathsep.join([expanduser(p) for p in
config.getValue("general", "stylesheet_path", "").split(os.pathsep)])
parser.add_option('--stylesheet-path', dest='stylepath',
metavar='FOLDER%sFOLDER%s...%sFOLDER'%((os.pathsep, )*3),
default=def_sheetpath,
help='A list of folders to search for stylesheets,"\
" separated using "%s". Default="%s"' %(os.pathsep, def_sheetpath))
def_compressed = config.getValue("general", "compressed", False)
parser.add_option('-c', '--compressed', dest='compressed',
action="store_true", default=def_compressed,
help='Create a compressed PDF. Default=%s'%def_compressed)
parser.add_option('--print-stylesheet', dest='printssheet',
action="store_true", default=False,
help='Print the default stylesheet and exit')
parser.add_option('--font-folder', dest='ffolder', metavar='FOLDER',
help='Search this folder for fonts. (Deprecated)')
def_fontpath = os.pathsep.join([expanduser(p) for p in
config.getValue("general", "font_path", "").split(os.pathsep)])
parser.add_option('--font-path', dest='fpath',
metavar='FOLDER%sFOLDER%s...%sFOLDER'%((os.pathsep, )*3),
default=def_fontpath,
help='A list of folders to search for fonts,'\
' separated using "%s". Default="%s"'%(os.pathsep, def_fontpath))
def_baseurl = urlunparse(['file',os.getcwd(),'','','',''])
parser.add_option('--baseurl', dest='baseurl', metavar='URL',
default=def_baseurl,
help='The base URL for relative URLs. Default="%s"'%def_baseurl)
def_lang = config.getValue("general", "language", None)
parser.add_option('-l', '--language', metavar='LANG',
default=def_lang, dest='language',
help='Language to be used for hyphenation and '\
'docutils localizations. Default="%s"' % def_lang)
def_header = config.getValue("general", "header")
parser.add_option('--header', metavar='HEADER',
default=def_header, dest='header',
help='Page header if not specified in the document.'\
' Default="%s"' % def_header)
def_footer = config.getValue("general", "footer")
parser.add_option('--footer', metavar='FOOTER',
default=def_footer, dest='footer',
help='Page footer if not specified in the document.'\
' Default="%s"' % def_footer)
def_smartquotes = config.getValue("general", "smartquotes", "0")
parser.add_option("--smart-quotes", metavar="VALUE",
default=def_smartquotes, dest="smarty",
help='Try to convert ASCII quotes, ellipsis and dashes '\
'to the typographically correct equivalent. For details,'\
' read the man page or the manual. Default="%s"'%def_smartquotes)
def_fit = config.getValue("general", "fit_mode", "shrink")
parser.add_option('--fit-literal-mode', metavar='MODE',
default=def_fit, dest='fit_mode',
help='What todo when a literal is too wide. One of error,'\
' overflow,shrink,truncate. Default="%s"'%def_fit)
parser.add_option('--inline-links', action="store_true",
dest='inlinelinks', default=False,
help='shows target between parenthesis instead of active link')
parser.add_option('--repeat-table-rows', action="store_true",
dest='repeattablerows', default=False,
help='Repeats header row for each splitted table')
parser.add_option('-q', '--quiet', action="store_true",
dest='quiet', default=False,
help='Print less information.')
parser.add_option('-v', '--verbose', action="store_true",
dest='verbose', default=False,
help='Print debug information.')
parser.add_option('--very-verbose', action="store_true",
dest='vverbose', default=False,
help='Print even more debug information.')
parser.add_option('--version', action="store_true",
dest='version', default=False,
help='Print version number and exit.')
def_footnote_backlinks = config.getValue("general",
"footnote_backlinks", True)
parser.add_option('--no-footnote-backlinks', action='store_false',
dest='footnote_backlinks', default=def_footnote_backlinks,
help='Disable footnote backlinks.'\
' Default=%s' % str(not def_footnote_backlinks))
def_inline_footnotes = config.getValue("general",
"inline_footnotes", False)
parser.add_option('--inline-footnotes', action='store_true',
dest='inline_footnotes', default=def_inline_footnotes,
help='Show footnotes inline.'\
' Default=%s' % str(not def_inline_footnotes))
def_dpi = config.getValue("general", "default_dpi", 300)
parser.add_option('--default-dpi', dest='def_dpi', metavar='NUMBER',
default=def_dpi,
help='DPI for objects sized in pixels. Default=%d'%def_dpi)
parser.add_option('--show-frame-boundary', dest='show_frame',
action='store_true', default=False,
help='Show frame borders (only useful for debugging). Default=False')
parser.add_option('--disable-splittables', dest='splittables',
action='store_false', default=True,
help='Don\'t use splittable flowables in some elements. '
'Only try this if you can\'t process a document any other way.')
def_break = config.getValue("general", "break_level", 0)
parser.add_option('-b', '--break-level', dest='breaklevel',
metavar='LEVEL', default=def_break,
help='Maximum section level that starts in a new page.'\
' Default: %d' % def_break)
def_fpeven = config.getValue("general", "first_page_even", False)
parser.add_option('--first-page-even', dest='first_page_even',
action='store_true', default=def_fpeven,
help='Whether first page is odd (as in the screen on "facing pages"), '\
'or even (as in a book)')
def_blankfirst = config.getValue("general", "blank_first_page", False)
parser.add_option('--blank-first-page', dest='blank_first_page',
action='store_true', default=def_blankfirst,
help='Add a blank page at the beginning of the document.')
def_breakside = config.getValue("general", "break_side", 'any')
parser.add_option('--break-side', dest='breakside', metavar='VALUE',
default=def_breakside,
help='How section breaks work. Can be "even", and sections start in an even page,'\
'"odd", and sections start in odd pages, or "any" and sections start in the next page,'\
'be it even or odd. See also the -b option.')
return parser
def main(args=None):
"""Parse command line and call createPdf with the correct data."""
parser = parse_commandline()
options, args = parser.parse_args(copy(args))
if options.configfile:
options.cfname=options.configfile
parser = parse_commandline()
options, args = parser.parse_args(copy(args))
if options.version:
from rst2pdf import version
print version
sys.exit(0)
if options.quiet:
log.setLevel(logging.CRITICAL)
if options.verbose:
log.setLevel(logging.INFO)
if options.vverbose:
log.setLevel(logging.DEBUG)
if options.printssheet:
print open(join(abspath(dirname(__file__)),
'styles', 'styles.json')).read()
sys.exit(0)
filename = False
if len(args) == 0 or args[0] == '-':
infile = sys.stdin
basedir=os.getcwd()
elif len(args) > 1:
log.critical('Usage: %s file.txt [ -o file.pdf ]', sys.argv[0])
sys.exit(1)
else:
filename = args[0]
basedir=os.path.dirname(os.path.abspath(filename))
infile = open(filename)
if options.output:
outfile = options.output
if outfile == '-':
outfile = sys.stdout
options.compressed = False
#we must stay quiet
log.setLevel(logging.CRITICAL)
else:
if filename:
if filename.endswith('.txt') or filename.endswith('.rst'):
outfile = filename[:-4] + '.pdf'
else:
outfile = filename + '.pdf'
else:
outfile = sys.stdout
options.compressed = False
#we must stay quiet
log.setLevel(logging.CRITICAL)
#/reportlab/pdfbase/pdfdoc.py output can
#be a callable (stringio, stdout ...)
ssheet = []
if options.style:
for l in options.style:
ssheet += l.split(',')
else:
ssheet = []
ssheet = [x for x in ssheet if x]
fpath = []
if options.fpath:
fpath = options.fpath.split(os.pathsep)
if options.ffolder:
fpath.append(options.ffolder)
spath = []
if options.stylepath:
spath = options.stylepath.split(os.pathsep)
if reportlab.Version < '2.3':
log.warning('You are using Reportlab version %s.'\
' The suggested version '\
'is 2.3 or higher'%reportlab.Version)
RstToPdf(
stylesheets=ssheet,
language=options.language,
header=options.header, footer=options.footer,
inlinelinks=options.inlinelinks,
breaklevel=int(options.breaklevel),
baseurl=options.baseurl,
fit_mode=options.fit_mode,
smarty=str(options.smarty),
font_path=fpath,
style_path=spath,
repeat_table_rows=options.repeattablerows,
footnote_backlinks=options.footnote_backlinks,
inline_footnotes=options.inline_footnotes,
def_dpi=int(options.def_dpi),
basedir=basedir,
show_frame=options.show_frame,
splittables=options.splittables,
blank_first_page=options.blank_first_page,
breakside=options.breakside
).createPdf(text=infile.read(),
source_path=infile.name,
output=outfile,
compressed=options.compressed)
if __name__ == "__main__":
main(sys.argv[1:])
| {
"content_hash": "c5af9775334cdecaaa4d89e250a8bb33",
"timestamp": "",
"source": "github",
"line_count": 2410,
"max_line_length": 117,
"avg_line_length": 40.28423236514523,
"alnum_prop": 0.5249317608281403,
"repo_name": "thomaspurchas/rst2pdf",
"id": "051a7a88de5ac8f5b6b8d15355cb6d12f80eae65",
"size": "99223",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.12",
"path": "rst2pdf/createpdf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "730755"
},
{
"name": "Shell",
"bytes": "4786"
}
],
"symlink_target": ""
} |
"""Module defining statements supported by parser."""
import cx_Logging
import cx_OracleUtils
class Statement(object):
message = None
def __init__(self, sql, lineNumber):
self.sql = sql
self.lineNumber = lineNumber
def __repr__(self):
return "<%s>" % self.__class__.__name__
def Execute(self, cursor):
cursor.execute(self.sql)
def GetLogMessage(self, cursor):
return self.message
def Process(self, cursor):
self.Execute(cursor)
message = self.GetLogMessage(cursor)
if message is not None:
cx_Logging.Trace("%s", message)
class ObjectStatement(Statement):
def __init__(self, sql, lineNumber, type, name, owner = None):
super(ObjectStatement, self).__init__(sql, lineNumber)
self.type = type
self.name = name
self.owner = owner
def __repr__(self):
if self.owner is None:
return "<%s %s (%s)>" % \
(self.__class__.__name__, self.name, self.type.upper())
return "<%s %s.%s (%s)>" % \
(self.__class__.__name__, self.owner, self.name,
self.type.upper())
def GetLogMessage(self, cursor):
if self.owner is None:
return "%s %s %s." % \
(self.type.capitalize(), self.name, self.action)
return "%s %s.%s %s." % \
(self.type.capitalize(), self.owner, self.name, self.action)
class DMLStatement(ObjectStatement):
def __init__(self, sql, lineNumber, owner, name):
super(ObjectStatement, self).__init__(sql, lineNumber)
self.owner = owner
self.name = name
def __repr__(self):
return "<%s %s.%s>" % (self.__class__.__name__, self.owner, self.name)
def GetLogMessage(self, cursor):
rowsAffected = cursor.rowcount
modifier = "row"
if rowsAffected != 1:
modifier = "rows"
return "%s %s %s in %s.%s." % \
(self.action.capitalize(), rowsAffected, modifier, self.owner,
self.name)
class AlterObjectStatement(ObjectStatement):
action = "altered"
class AnonymousPlsqlBlock(Statement):
message = "PL/SQL procedure successfully completed."
class CommentStatement(Statement):
message = "Comment created."
class CommitStatement(Statement):
message = "Commit point reached."
def Execute(self, cursor):
cursor.connection.commit()
class ConnectStatement(Statement):
def __init__(self, sql, lineNumber, user, password = None, dsn = None):
super(ConnectStatement, self).__init__(sql, lineNumber)
self.user = user
self.password = password
self.dsn = dsn
def GetLogMessage(self, cursor):
return "Connected to %s" % self.user
class CreateObjectStatement(ObjectStatement):
action = "created"
def Execute(self, cursor):
super(ObjectStatement, self).Execute(cursor)
cursor = cx_OracleUtils.PrepareErrorsCursor(cursor.connection)
cx_OracleUtils.CheckForErrors(cursor, self.owner, self.name,
self.type.upper(), self.action + " with", self.lineNumber - 1)
class CreateConstraintStatement(CreateObjectStatement):
def __init__(self, sql, lineNumber, type, owner, name, tableName):
super(CreateConstraintStatement, self).__init__(sql, lineNumber, type,
name, owner)
self.tableName = tableName
class DropObjectStatement(ObjectStatement):
action = "dropped"
class DeleteStatement(DMLStatement):
action = "deleted"
class GrantStatement(Statement):
message = "Privilege(s) granted."
class InsertStatement(DMLStatement):
action = "inserted"
class RenameObjectStatement(ObjectStatement):
action = "renamed"
def __init__(self, sql, lineNumber, name):
super(RenameObjectStatement, self).__init__(sql, lineNumber, "object",
name)
class RevokeStatement(Statement):
message = "Privilege(s) revoked."
class RollbackStatement(Statement):
message = "Rolled back."
def Execute(self, cursor):
cursor.connection.rollback()
class TruncateObjectStatement(ObjectStatement):
action = "truncated"
def __init__(self, sql, lineNumber, owner, name):
super(TruncateObjectStatement, self).__init__(sql, lineNumber, "table",
name, owner)
class UpdateStatement(DMLStatement):
action = "Updated"
| {
"content_hash": "173c168a86e511d2e2dcaf30cf41202c",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 79,
"avg_line_length": 26.664670658682635,
"alnum_prop": 0.6173366269930384,
"repo_name": "marhar/cx_OracleTools",
"id": "5a062c627b69936440395c08ecd8c3009921b6d0",
"size": "4453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cx_PyOracleLib/cx_OracleParser/simple/Statements.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "103874"
},
{
"name": "CSS",
"bytes": "15808"
},
{
"name": "HTML",
"bytes": "144984"
},
{
"name": "PLSQL",
"bytes": "8765"
},
{
"name": "Python",
"bytes": "574257"
},
{
"name": "Shell",
"bytes": "434"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import six
import inspect
from .requires import Requires
from .dsl import build_requirements_factory as default_build_requirements_factory, init_transformer, init_parser
from .exceptions import DecoratorError
class Validate(object):
def __init__(self, callables_dict=None, build_requirements_factory=None):
self.callables_dict = callables_dict or {}
self.build_requirements_factory = build_requirements_factory or default_build_requirements_factory
self.requirements_builder = self.build_requirements_factory(
init_parser(),
init_transformer(callables_dict)
)
def _inherit_callables_dict(self, callables_dict):
new_callables_dict = callables_dict.copy()
new_callables_dict.update(self.callables_dict)
return new_callables_dict
def register_callables(self, callables_dict):
return Validate(self._inherit_callables_dict(callables_dict), self.build_requirements_factory)
def __call__(self, arg=None, callables_dict=None):
if callables_dict is not None and arg is None:
# called with validate(callable_dict=...)
return self.register_callables(callables_dict)
if arg is None:
raise DecoratorError('Error, arg must be provided if callables_dict is None')
if isinstance(arg, Requires):
requires = arg
func = None
else:
func = arg
docstring = func.__doc__
if not docstring:
raise DecoratorError("If function doesn't have a docstring, you must pass a requires object explicitly")
requirements_builder = None
if callables_dict:
requirements_builder = self.build_requirements_factory(
init_parser(),
init_transformer(self._inherit_callables_dict(callables_dict))
)
requirements_builder = requirements_builder or self.requirements_builder
requires = requirements_builder(
docstring,
)
def validate_decorator(func):
if six.PY3:
fullargspec = inspect.getfullargspec(func)
inspected_args = fullargspec.args or ()
inspected_defaults = fullargspec.defaults or ()
inspected_kwonlyargs_defaults = fullargspec.kwonlydefaults or {}
else:
argspec = inspect.getargspec(func)
inspected_args = argspec.args or ()
inspected_defaults = argspec.defaults or ()
inspected_kwonlyargs_defaults = {}
@six.wraps(func)
def func_wrapper(*args, **kwargs):
args_as_dict = dict(zip(inspected_args, args + inspected_defaults))
args_as_dict.update(inspected_kwonlyargs_defaults)
args_as_dict.update(kwargs)
requires.validate(args_as_dict)
return func(*args, **kwargs)
return func_wrapper
return validate_decorator(func) if func is not None else validate_decorator
validate = Validate()
| {
"content_hash": "e87be851b58b2f8068dafb6944f3f194",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 120,
"avg_line_length": 37.904761904761905,
"alnum_prop": 0.6187185929648241,
"repo_name": "shezadkhan137/required",
"id": "282cc97bdc813bb79460742a28b35060cf163f5c",
"size": "3208",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/required/decorator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "174"
},
{
"name": "Python",
"bytes": "51411"
}
],
"symlink_target": ""
} |
class SimpleEngagementCalculator(object):
def calculate_user_engagement_score(self, user, start_date, end_date):
return 0
ROOT_URLCONF=None
DATABASE_ENGINE='sqlite3'
DATABASE_NAME=':memory:'
DATABASE_SUPPORTS_TRANSACTIONS=False
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'experiments']
TEMPLATE_CONTEXT_PROCESSORS =(
"django.core.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.request")
| {
"content_hash": "c1cba690914d6c8ab314b903bdb0c8c7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 74,
"avg_line_length": 29.59090909090909,
"alnum_prop": 0.7311827956989247,
"repo_name": "uhuramedia/django-lean",
"id": "3432b5425e3dac0f0c4807ed6632aa6fcb6fa41d",
"size": "651",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "django_lean/experiments/testsettings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "13774"
},
{
"name": "JavaScript",
"bytes": "2774"
},
{
"name": "Python",
"bytes": "310391"
}
],
"symlink_target": ""
} |
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
from django.db import models
from django.utils import timezone
from .FuelSupplier import FuelSupplier
from .CreditTrade import CreditTrade
from auditable.models import Auditable
class FuelSupplierBalance(Auditable):
fuelSupplierFK = models.ForeignKey('FuelSupplier', related_name='FuelSupplierBalancefuelSupplierFK')
validatedCredits = models.IntegerField()
encumberedCredits = models.IntegerField()
effectiveDate = models.DateField()
endDate = models.DateField(blank=True, null=True)
creditTradeFK = models.ForeignKey('CreditTrade', related_name='FuelSupplierBalancecreditTradeFK', blank=True, null=True)
class Meta:
db_table = 'FUEL_SUPPLIER_BALANCE'
| {
"content_hash": "8b3c44ca17ae43e6f6727c291f4c070f",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 208,
"avg_line_length": 40.6,
"alnum_prop": 0.7549261083743842,
"repo_name": "jeffmcnd/tfrs",
"id": "eb139075bcf9a1e6f2facb7e86c0a5a30be1e38e",
"size": "1624",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/models/FuelSupplierBalance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "6851"
},
{
"name": "CSS",
"bytes": "322498"
},
{
"name": "Groovy",
"bytes": "4596"
},
{
"name": "HTML",
"bytes": "234138"
},
{
"name": "JavaScript",
"bytes": "225944"
},
{
"name": "Python",
"bytes": "715059"
},
{
"name": "Shell",
"bytes": "12332"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='LoginMethod',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('provider_id', models.CharField(unique=True, max_length=50)),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='application',
name='login_methods',
field=models.ManyToManyField(to='users.LoginMethod'),
),
]
| {
"content_hash": "c06625da28d9c8ac7a4f24c3daa3e420",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 114,
"avg_line_length": 29.03846153846154,
"alnum_prop": 0.5642384105960265,
"repo_name": "mikkokeskinen/tunnistamo",
"id": "0d44c21b4928cd47d564cb801f1428d7f41105f7",
"size": "779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/migrations/0002_auto_20151031_1511.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2730"
},
{
"name": "Dockerfile",
"bytes": "330"
},
{
"name": "HTML",
"bytes": "4360"
},
{
"name": "Python",
"bytes": "301704"
}
],
"symlink_target": ""
} |
import re
from ..count_symbols import count_symbols
from ..stream.id_generator import IdGenerator
class Compiler(object):
def __init__(self, id_generator=None):
self.id_generator = id_generator
if self.id_generator is None:
self.id_generator = IdGenerator()
def compile(self, gherkin_document):
pickles = []
if 'feature' not in gherkin_document:
return pickles
feature = gherkin_document['feature']
if not feature['children']:
return pickles
uri = gherkin_document['uri']
feature_tags = feature['tags']
language = feature['language']
background_steps = []
for scenario_definition in feature['children']:
if 'background' in scenario_definition:
if scenario_definition['background']['steps']:
background_steps += scenario_definition['background']['steps']
elif 'rule' in scenario_definition:
self._compile_rule(uri, feature_tags, background_steps, scenario_definition['rule'], language, pickles)
else:
scenario = scenario_definition['scenario']
args = (uri, feature_tags, background_steps, scenario, language, pickles)
if not scenario['examples']:
self._compile_scenario(*args)
else:
self._compile_scenario_outline(*args)
return pickles
def _compile_rule(self, uri, feature_tags, feature_background_steps, rule, language, pickles):
tags = list(feature_tags) + list(rule['tags'])
background_steps = []
background_steps += feature_background_steps
for scenario_definition in rule['children']:
if 'background' in scenario_definition:
if scenario_definition['background']['steps']:
background_steps += scenario_definition['background']['steps']
else:
scenario = scenario_definition['scenario']
args = (uri, tags, background_steps, scenario, language, pickles)
if not scenario['examples']:
self._compile_scenario(*args)
else:
self._compile_scenario_outline(*args)
return pickles
def _compile_scenario(self, uri, inherited_tags, background_steps, scenario, language, pickles):
tags = list(inherited_tags) + list(scenario['tags'])
last_keyword_type = 'Unknown'
steps = list()
if scenario['steps']:
for step in background_steps + scenario['steps']:
last_keyword_type = last_keyword_type if step['keywordType'] == 'Conjunction' else step['keywordType']
steps.append(self._pickle_step(step, last_keyword_type))
pickle = {
'astNodeIds': [scenario['id']],
'id': self.id_generator.get_next_id(),
'tags': self._pickle_tags(tags),
'name': scenario['name'],
'language': language,
'steps': steps,
'uri': uri
}
pickles.append(pickle)
def _compile_scenario_outline(self, uri, inherited_tags, background_steps, scenario, language, pickles):
for examples in (e for e in scenario['examples'] if 'tableHeader' in e):
variable_cells = examples['tableHeader']['cells']
for values in examples['tableBody']:
value_cells = values['cells']
tags = list(inherited_tags) + list(scenario['tags']) + list(examples['tags'])
last_keyword_type = None
steps = list()
if scenario['steps']:
for step in background_steps:
last_keyword_type = last_keyword_type if step['keywordType'] == 'Conjunction' else step['keywordType']
steps.append(self._pickle_step(step, last_keyword_type))
if scenario['steps']:
for outline_step in scenario['steps']:
last_keyword_type = last_keyword_type if outline_step['keywordType'] == 'Conjunction' else outline_step['keywordType']
step_text = self._interpolate(
outline_step['text'],
variable_cells,
value_cells)
argument = self._create_pickle_arguments(
outline_step,
variable_cells,
value_cells)
_pickle_step = {
'astNodeIds': [outline_step['id'], values['id']],
'id': self.id_generator.get_next_id(),
'type': last_keyword_type,
'text': step_text
}
if argument is not None:
_pickle_step['argument'] = argument
steps.append(_pickle_step)
pickle = {
'astNodeIds': [scenario['id'], values['id']],
'id': self.id_generator.get_next_id(),
'name': self._interpolate(
scenario['name'],
variable_cells,
value_cells),
'language': language,
'steps': steps,
'tags': self._pickle_tags(tags),
'uri': uri
}
pickles.append(pickle)
def _create_pickle_arguments(self, step, variables, values):
if 'dataTable' in step:
table = {'rows': []}
for row in step['dataTable']['rows']:
cells = [
{
'value': self._interpolate(cell['value'], variables, values)
} for cell in row['cells']
]
table['rows'].append({'cells': cells})
return {'dataTable': table}
elif 'docString' in step:
argument = step['docString']
docstring = {
'content': self._interpolate(argument['content'], variables, values)
}
if 'mediaType' in argument:
docstring['mediaType'] = self._interpolate(argument['mediaType'], variables, values)
return {'docString': docstring}
else:
return None
def _interpolate(self, name, variable_cells, value_cells):
if name is None:
return name
for n, variable_cell in enumerate(variable_cells):
value_cell = value_cells[n]
# For the case of trailing backslash, re-escaping backslashes are needed
reescaped_value = re.sub(r'\\', r'\\\\', value_cell['value'])
name = re.sub(
u'<{0[value]}>'.format(variable_cell),
reescaped_value,
name
)
return name
def _pickle_step(self, step, keyword_type):
pickle_step = {
'astNodeIds': [step['id']],
'id': self.id_generator.get_next_id(),
'type': keyword_type,
'text': step['text'],
}
argument = self._create_pickle_arguments(
step,
[],
[])
if argument is not None:
pickle_step['argument'] = argument
return pickle_step
def _pickle_tags(self, tags):
return [self._pickle_tag(tag) for tag in tags]
def _pickle_tag(self, tag):
return {
'astNodeId': tag['id'],
'name': tag['name']
}
| {
"content_hash": "7cde1e0f1d73a4644af6d587e269635a",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 142,
"avg_line_length": 40.382198952879584,
"alnum_prop": 0.5078438999092442,
"repo_name": "cucumber/gherkin",
"id": "07f5ca91fb45e666a3e82c714e3a70799c05d032",
"size": "7713",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/gherkin/pickles/compiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "531151"
},
{
"name": "C#",
"bytes": "333964"
},
{
"name": "CMake",
"bytes": "4163"
},
{
"name": "Dart",
"bytes": "281454"
},
{
"name": "Elixir",
"bytes": "237904"
},
{
"name": "Gherkin",
"bytes": "2207"
},
{
"name": "Go",
"bytes": "284237"
},
{
"name": "Groovy",
"bytes": "2360"
},
{
"name": "HTML",
"bytes": "86256"
},
{
"name": "Java",
"bytes": "271600"
},
{
"name": "Makefile",
"bytes": "41308"
},
{
"name": "Objective-C",
"bytes": "340338"
},
{
"name": "PHP",
"bytes": "290041"
},
{
"name": "Perl",
"bytes": "261132"
},
{
"name": "Python",
"bytes": "227750"
},
{
"name": "Ruby",
"bytes": "187098"
},
{
"name": "Shell",
"bytes": "2771"
},
{
"name": "TypeScript",
"bytes": "202891"
},
{
"name": "jq",
"bytes": "4133"
}
],
"symlink_target": ""
} |
from django import template
from django.utils.html import escape
register = template.Library()
@register.filter('github_title')
def github_title(title):
if title:
repo = title.split('/')
if len(repo) == 2:
return '{0}/<strong>{1}</strong>'.format(escape(repo[0]), escape(repo[1]))
return title
| {
"content_hash": "870117e4ca855351182bee31f063a747",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.6366366366366366,
"repo_name": "vitorfs/woid",
"id": "19c4647a789e3b7cbea244b977e4bc654987c5f9",
"size": "350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "woid/apps/services/templatetags/github_title.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14252"
},
{
"name": "HTML",
"bytes": "13482"
},
{
"name": "JavaScript",
"bytes": "863"
},
{
"name": "Python",
"bytes": "50476"
}
],
"symlink_target": ""
} |
import random
import sys
import gdb
# List of colors used for printing messages
RED = "\033[1;31m"
BLUE = "\033[1;34m"
CYAN = "\033[1;36m"
GREEN = "\033[0;32m"
RESET = "\033[0;0m"
BOLD = "\033[;1m"
REVERSE = "\033[;7m"
def error_output(massage):
"""Print out a red output"""
sys.stdout.write(BOLD + RED)
print(massage)
sys.stdout.write(RESET)
def warning_output(massage):
"""Print out a cyan output"""
sys.stdout.write(BOLD + CYAN)
print(massage)
sys.stdout.write(RESET)
def positive_output(massage):
"""Print out a green output"""
sys.stdout.write(BOLD + GREEN)
print(massage)
sys.stdout.write(RESET)
def neutral_output(massage):
"""Print out a blue output"""
sys.stdout.write(BLUE)
print(massage)
sys.stdout.write(RESET)
def parse_input_file(inputfile_name):
""" Read in the inputfile and parse it.
Then return a list of important tokens."""
with open(inputfile_name) as inputfile:
for line in inputfile:
stripped_line = line.strip()
# Ignore commented lines
if not stripped_line.startswith("#"):
if stripped_line.startswith("executable"):
# Get the path of the executable for fault injection experiment
try:
filename = stripped_line.split()[1]
except BaseException:
error_output("Please provide a valid path for your executable\n")
elif stripped_line.startswith("arguments"):
# Get the arguments for target program
if len(stripped_line.split()) != 1:
arguments = stripped_line.split(maxsplit=1)[1]
else:
arguments = None
elif stripped_line.startswith("fault"):
# Get the fault list for fault injection experiment
if len(stripped_line.split()) == 7:
# If the user set the scope using line number
variable = stripped_line.split()[1]
scope_begin = stripped_line.split()[2]
scope_end = stripped_line.split()[3]
scope_hit_threshold = stripped_line.split()[4]
variable_access_threshold = stripped_line.split()[5]
fault_model = stripped_line.split()[6]
elif len(stripped_line.split()) == 6:
# If the user set the scope using function name
variable = stripped_line.split()[1]
scope_begin = stripped_line.split()[2]
scope_end = None
scope_hit_threshold = stripped_line.split()[3]
variable_access_threshold = stripped_line.split()[4]
fault_model = stripped_line.split()[5]
elif len(stripped_line.split()) == 5:
# If the user want to inject a fault into a global variable
variable = stripped_line.split()[1]
scope_begin = None
scope_end = None
scope_hit_threshold = stripped_line.split()[2]
variable_access_threshold = stripped_line.split()[3]
fault_model = stripped_line.split()[4]
elif stripped_line.startswith("probe"):
# Get the list of variables we wish to observe
# after fault injection
probe_var = stripped_line.split()[1]
else:
error_output("Please follow the instruction in the input file template\n")
gdb.execute('set confirm off')
gdb.execute('quit')
return (filename, arguments,
variable, scope_begin, scope_end,
scope_hit_threshold, variable_access_threshold,
fault_model,
probe_var)
def load_target(filename):
"""Try to set a access watchpoint on the target variable."""
try:
gdb.execute('file ' + filename)
gdb.execute('clone-inferior') # For comparing
return True
except gdb.error:
error_output("Error reading the target program")
return False
def set_arguments(r_inferior, f_inferior, arguments):
"""Try to set a access watchpoint on the target variable."""
try:
gdb.execute('inferior ' + str(r_inferior.num))
gdb.execute('set args ' + arguments)
gdb.execute('inferior ' + str(f_inferior.num))
gdb.execute('set args ' + arguments)
return True
except gdb.error:
error_output("Error setting arguments for target program")
return False
def run_target(r_inferior, f_inferior):
"""Try to start the program."""
try:
gdb.execute('inferior ' + str(r_inferior.num))
gdb.execute('run')
gdb.execute('inferior ' + str(f_inferior.num))
gdb.execute('run')
return True
except gdb.error:
error_output("Unable to run the target program with breakpoints/watchpoints set")
gdb.execute('delete')
gdb.execute('set confirm off')
gdb.execute('quit')
return False
def continue_target(inferior_obj):
"""Continue chosen inferior"""
try:
gdb.execute('inferior ' + str(inferior_obj.num))
gdb.execute('continue')
return True
except gdb.error:
error_output("Unable to continue the target inferior:")
error_output(str(inferior_obj.num))
return False
def watch_variable(inferior_obj, variable):
"""Try to set a access watchpoint on the target variable."""
try:
gdb.execute('inferior ' + str(inferior_obj.num))
watched_variable = gdb.Breakpoint(variable, gdb.BP_WATCHPOINT, gdb.WP_ACCESS)
watched_variable.silent = True
return watched_variable
except RuntimeError:
error_output("Unable to set a watchpoint for the target variable, "
"it is optimized-out")
return None
def get_variable(inferior_obj, variable):
"""Try to get the value of the target variable and return it as a string."""
try:
gdb.execute('inferior ' + str(inferior_obj.num))
return str(gdb.parse_and_eval(variable))
except gdb.error:
return None
def set_scope(scope_begin, scope_end=None):
"""Setting breakpoints at the beginning and the end of the scope
before running the program."""
if scope_end is None:
# Setting scope when function name is the beginning of the scope
neutral_output("The scope set in the function " + str(scope_begin))
try:
b_begin = gdb.Breakpoint(scope_begin)
b_begin.silent = True
b_end = None
except gdb.error:
error_output("It is a invalid scope beginning")
else:
# Setting scope when scope is specified in line numbers
neutral_output("The scope starts from line " + str(scope_begin) + " to " + str(scope_end))
try:
b_begin = gdb.Breakpoint(scope_begin)
b_begin.silent = True
except gdb.error:
error_output("It is a invalid scope beginning")
try:
b_end = gdb.Breakpoint(scope_end)
b_end.silent = True
except gdb.error:
error_output("It is a invalid scope end")
return (b_begin, b_end)
def delete_breakpoint(bp0, bp1=None, bp2=None, bp3=None):
"""Deleting breakpoints to get ready to finish the target program"""
if bp1 is None:
try:
bp0.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
elif bp2 is None:
try:
bp0.delete()
bp1.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
elif bp3 is None:
try:
bp0.delete()
bp1.delete()
bp2.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
else:
try:
bp0.delete()
bp1.delete()
bp2.delete()
bp3.delete()
return True
except gdb.error:
error_output("Unable to delete the breakpoints")
return False
def inject(inferior_obj, variable, fault_model):
""" Whitebox fault injection."""
gdb.execute('inferior ' + str(inferior_obj.num))
try:
target_variable = gdb.parse_and_eval(variable)
except gdb.error:
error_output("Out of the scope which the target variable is valid in")
return False
if target_variable.is_optimized_out:
error_output("The target variable is optimized out, fault injection is unable to complete")
return False
if fault_model == 'BIT_FLIPS':
try:
size = target_variable.type.sizeof * 8
error = 1 << random.randint(0, size)
gdb.execute('set var ' + variable + ' = ' + variable + ' ^ ' + str(error))
return True
except gdb.error:
error_output("Unable to perform BIT-FLIP fault injection")
return False
elif fault_model == 'INC_VALUE':
try:
gdb.execute('set var ' + variable + ' = ' + variable + ' + 1')
return True
except gdb.error:
error_output("Unable to perform INCREMENTATION fault injection")
return False
elif fault_model == 'DEC_VALUE':
try:
gdb.execute('set var ' + variable + ' = ' + variable + ' - 1')
return True
except gdb.error:
error_output("Unable to perform DECREMENTATION fault injection")
return False
elif fault_model == 'SET_ZERO':
try:
gdb.execute('set var ' + variable + ' = 0')
return True
except gdb.error:
error_output("Unable to perform SET-ZERO fault injection")
return False
else:
warning_output("Please set the target variable and fault_model correctly")
return False
def try_injecting_in_scope(r_inferior, f_inferior,
variable, variable_access_threshold, fault_model,
b_begin, b_end=None):
"""When in the right code scope, wait for the variable access threshold being hit
and inject fault"""
if variable_access_threshold == 0:
# If the variable access threshold is set to 0, inject fault immediately
# regardless whether the target variable is accessed
injection_status = inject(f_inferior, variable, fault_model)
delete_breakpoint(b_begin, b_end)
else:
watched_variable_r = watch_variable(r_inferior, variable)
watched_variable_f = watch_variable(f_inferior, variable)
if watched_variable_f is None:
injection_status = False
delete_breakpoint(b_begin, b_end)
else:
while watched_variable_f.is_valid() is True and watched_variable_f.hit_count != variable_access_threshold:
neutral_output("Waiting for reaching the variable access threshold")
if b_end is not None and b_end.hit_count != 0:
warning_output("Scope end hit, finishing target execution")
injection_status = False
delete_breakpoint(watched_variable_r, watched_variable_f, b_begin, b_end)
break
continue_target(r_inferior)
continue_target(f_inferior)
else:
if watched_variable_f.is_valid() is False:
injection_status = False
delete_breakpoint(b_begin, b_end)
else:
injection_status = inject(f_inferior, variable, fault_model)
delete_breakpoint(watched_variable_r, watched_variable_f, b_begin, b_end)
return injection_status
def injection_process(r_inferior, f_inferior,
variable,
scope_begin, scope_end,
scope_hit_threshold,
variable_access_threshold,
fault_model):
"""The main fault injection process."""
if scope_begin is None:
neutral_output("Target global variable:")
neutral_output(variable)
# When the target variable is a global one, skip setting the scope and start injecting fault
watched_variable_r = watch_variable(r_inferior, variable)
watched_variable_f = watch_variable(f_inferior, variable)
run_target(r_inferior, f_inferior)
if watched_variable_f is None:
continue_target(r_inferior)
continue_target(f_inferior)
if variable_access_threshold == 0:
# If the variable access threshold is set to 0, inject fault immediately
# regardless whether the target variable is accessed
injection_status = inject(f_inferior, variable, fault_model)
else:
while watched_variable_f.hit_count != variable_access_threshold:
neutral_output("Waiting for reaching the variable access threshold")
continue_target(r_inferior)
continue_target(f_inferior)
else:
injection_status = inject(f_inferior, variable, fault_model)
delete_breakpoint(watched_variable_r, watched_variable_f)
elif scope_end is None:
neutral_output("Target function: ")
neutral_output(scope_begin)
neutral_output("Target variable:")
neutral_output(variable)
# When the scope is specified as a function's name
(b_begin, _) = set_scope(scope_begin, scope_end)
run_target(r_inferior, f_inferior)
while b_begin.hit_count != scope_hit_threshold:
neutral_output("Waiting for reaching the scope hit threshold")
continue_target(r_inferior)
continue_target(f_inferior)
else:
injection_status = try_injecting_in_scope(r_inferior, f_inferior,
variable,
variable_access_threshold,
fault_model,
b_begin)
else:
neutral_output("Target scope: ")
neutral_output(scope_begin)
neutral_output(scope_end)
neutral_output("Target variable:")
neutral_output(variable)
# When the scope is specified in line number
(b_begin, b_end) = set_scope(scope_begin, scope_end)
run_target(r_inferior, f_inferior)
while b_begin.hit_count != scope_hit_threshold:
neutral_output("Waiting for reaching the scope hit threshold")
continue_target(r_inferior)
continue_target(f_inferior)
else:
injection_status = try_injecting_in_scope(r_inferior, f_inferior,
variable, variable_access_threshold,
fault_model,
b_begin, b_end)
return injection_status
def observe_outcome(r_inferior, f_inferior, probe_var):
"""Start watching a target variable for observing fault outcome."""
pc_start = int(gdb.selected_frame().pc())
watched_variable_r = watch_variable(r_inferior, probe_var)
if watched_variable_r is None:
error_output("Unable to set a watchpoint on probe variable in reference inferior")
return None
watched_variable_f = watch_variable(f_inferior, probe_var)
if watched_variable_f is None:
error_output("Unable to set a watchpoint on probe variable in injected inferior")
return None
while True:
variable_r = get_variable(r_inferior, probe_var)
variable_f = get_variable(f_inferior, probe_var)
if watched_variable_r.is_valid() is False or watched_variable_f.is_valid() is False:
error_output("Watched variables are optimized-out")
break
elif variable_r is None or variable_f is None:
error_output("Out of the scope which the probe variable is valid in")
break
elif variable_r == variable_f:
continue_target(r_inferior)
continue_target(f_inferior)
else:
pc_stop = int(gdb.selected_frame().pc())
delete_breakpoint(watched_variable_r, watched_variable_f)
break
try:
latency = pc_stop - pc_start
positive_output("Variable contaminated by the fault at:")
positive_output(hex(pc_stop))
except NameError:
neutral_output("\nThe probe variable is not contaminated by the injected fault\n")
return None
return hex(latency)
def exit_handler(event):
"""Quit GDB as soon as the program exited. """
if hasattr(event, 'exit_code'):
# neutral_output("Program exited with exit code: %d\n" % event.exit_code)
neutral_output("--------------------------------------------------------------------------")
gdb.execute('quit')
else:
error_output("Program terminated abnormally")
neutral_output("--------------------------------------------------------------------------")
gdb.execute('quit')
# def stop_handler(event):
# # TODO: Find a way to make the stop handler useful
# print("event type: program stopped")
# print("breakpoint hit count: %s" % (event.breakpoint.hit_count))
def fault_injection(input_file):
""" The main process. """
gdb.events.exited.connect(exit_handler)
# gdb.events.stop.connect(stop_handler)
(filename, arguments,
variable, scope_begin, scope_end,
scope_hit_threshold, variable_access_threshold,
fault_model,
probe_var) = parse_input_file(input_file)
scope_hit_threshold = int(scope_hit_threshold)
variable_access_threshold = int(variable_access_threshold)
# By definition, scope hit threshold cannot be 0
if scope_hit_threshold == 0:
scope_hit_threshold = 1
if load_target(filename) is False:
gdb.execute('set confirm off')
gdb.execute('quit')
(r_inferior, f_inferior) = gdb.inferiors()
if arguments is not None:
if set_arguments(r_inferior, f_inferior, arguments) is False:
gdb.execute('set confirm off')
gdb.execute('quit')
if injection_process(r_inferior, f_inferior,
variable, scope_begin, scope_end,
scope_hit_threshold,
variable_access_threshold,
fault_model) is True:
try:
positive_output("\nFault Injection Complete\n")
# Continue running the target program till it exits
latency = observe_outcome(r_inferior, f_inferior, probe_var)
if latency is None:
continue_target(r_inferior)
continue_target(f_inferior)
else:
positive_output("\nThe fault latency is:")
positive_output(latency)
print("")
continue_target(r_inferior)
continue_target(f_inferior)
except gdb.error:
error_output("Unable to finish the program after fault injection, debug?\n")
gdb.execute('set confirm off')
gdb.execute('quit')
else:
warning_output("Program ran with no faults injected\n")
continue_target(r_inferior)
continue_target(f_inferior)
gdb.execute('set pagination off')
| {
"content_hash": "f400e3a418a38a038ce7ca83c8ed8998",
"timestamp": "",
"source": "github",
"line_count": 512,
"max_line_length": 118,
"avg_line_length": 39.15625,
"alnum_prop": 0.57127893056664,
"repo_name": "timtian090/Playground",
"id": "03f3bce3579004a113c157f55313982205b58e7f",
"size": "20048",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GDB/Experiments/example/observed_fault_injection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "455497"
},
{
"name": "Batchfile",
"bytes": "11990"
},
{
"name": "C",
"bytes": "28095202"
},
{
"name": "C++",
"bytes": "1140083"
},
{
"name": "CSS",
"bytes": "68761"
},
{
"name": "Coq",
"bytes": "8402"
},
{
"name": "DIGITAL Command Language",
"bytes": "234545"
},
{
"name": "Emacs Lisp",
"bytes": "481"
},
{
"name": "Forth",
"bytes": "3527"
},
{
"name": "HTML",
"bytes": "1389398"
},
{
"name": "Haskell",
"bytes": "57451"
},
{
"name": "Logos",
"bytes": "540"
},
{
"name": "M4",
"bytes": "580948"
},
{
"name": "Makefile",
"bytes": "2911831"
},
{
"name": "Objective-C",
"bytes": "1066317"
},
{
"name": "OpenEdge ABL",
"bytes": "1334"
},
{
"name": "PHP",
"bytes": "22166"
},
{
"name": "PLSQL",
"bytes": "133870"
},
{
"name": "Pawn",
"bytes": "9784"
},
{
"name": "Perl",
"bytes": "13699416"
},
{
"name": "Perl 6",
"bytes": "868681"
},
{
"name": "Prolog",
"bytes": "90000"
},
{
"name": "Python",
"bytes": "217011"
},
{
"name": "Roff",
"bytes": "605811"
},
{
"name": "Rust",
"bytes": "5300"
},
{
"name": "Scala",
"bytes": "277077"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "1945400"
},
{
"name": "SourcePawn",
"bytes": "3657"
},
{
"name": "Stata",
"bytes": "48808"
},
{
"name": "SystemVerilog",
"bytes": "3612098"
},
{
"name": "Tcl",
"bytes": "16765"
},
{
"name": "TeX",
"bytes": "539642"
},
{
"name": "VHDL",
"bytes": "519227"
},
{
"name": "Verilog",
"bytes": "470753"
},
{
"name": "XS",
"bytes": "1205397"
},
{
"name": "XSLT",
"bytes": "12288"
},
{
"name": "Yacc",
"bytes": "38397"
}
],
"symlink_target": ""
} |
from django.shortcuts import render, render_to_response, redirect, get_object_or_404
from django.contrib.auth import authenticate, login , logout
from .models import Course, Department, User, Student, ExamPaper, Material, Announcement, CourseAllotment, Bookmark, Feedback, Contributor, Stat
from .forms import RegisterForm , LoginForm , AnnouncementForm , MaterialForm , ExamPaperForm, FeedbackForm, AvatorForm, ForgetPasswordForm
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.http import JsonResponse,HttpResponse
from django.urls import reverse
import datetime
import json
rewardvalue=5
# Create your views here.
def home(request):
if request.user.is_anonymous():
return render(request,"feed.html",context={})
bookmarks =Bookmark.objects.filter(user=request.user)
bookmarkcourses = Bookmark.objects.select_related('course').filter(user=request.user)
courselist=[]
for bookmark in bookmarks:
courselist.append(bookmark.course)
start_from = int(request.GET.get('start_from',0))
announcements = Announcement.objects.select_related('author').filter(course__in=courselist).order_by('-updated_on')[start_from*6:start_from*6+6]
return render(request,"feed.html",context={"feed":announcements,"next":start_from+1,"bookmark":bookmarkcourses})
def about(request):
stats = Stat.objects.get(tag='initial')
return render(request,"about.html",context={'stats':stats})
def _logout(request):
logout(request)
return redirect('home')
def forgetpassword(request):
if request.method =='GET':
form = ForgetPasswordForm()
return render(request,"form.html",context={"form":form})
elif request.method == 'POST':
return render(request,"form.html",context={"message":"Check your email for password :)"})
def _login(request):
if request.method =='GET':
form = LoginForm()
return render(request,"login.html",context={"form":form})
elif request.method == 'POST':
email = request.POST.get('email')
password = request.POST.get('password')
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
print(user.email)
return redirect('home')
form = LoginForm()
return render(request,"login.html",context={"form":form,"message":"forget password"})
def _register(request,register_as=None):
if request.method =='GET':
form = RegisterForm()
return render(request,"register.html",context={"form":form})
elif request.method == 'POST':
form = RegisterForm(request.POST)
if form.is_valid():
user = form.save()
if(register_as=='student'):
user.user_role = 'student'
elif(register_as=='instructor'):
user.user_role = 'instructor'
user.save()
stat = Stat.objects.get(tag='initial')
stat.user_count +=1
stat.save()
return redirect(reverse('login'))
return redirect(reverse('register',kwargs={"register_as":register_as}))
@login_required
def profile(request):
if request.method == 'GET':
email = request.GET.get('email',request.user)
form = AvatorForm()
owner = User.objects.get(email=email)
contributor,created = Contributor.objects.get_or_create(user=owner)
is_owner = True if owner == request.user else False
if owner.user_role == "student":
student,created = Student.objects.get_or_create(user=owner)
return render(request,"profile.html",context={"user":owner,"student":student,'form':form, 'is_owner':is_owner,"contributor":contributor})
return render(request,"profile.html",context={"user":owner,'form':form, 'is_owner':is_owner,"contributor":contributor})
elif request.method =='POST':
if 'name' in request.POST.keys():
if request.POST['name'] in ['semester','registration_no','branch']:
student = Student.objects.get(user=request.user)
if request.POST['name'] == 'semester':
print('yeah')
student.semester = request.POST['value']
elif request.POST['name'] == 'registration_no':
student.registration_no = request.POST['value']
elif request.POST['name'] == 'branch':
student.branch = request.POST['value']
student.save()
elif request.POST['name'] in ['first_name','last_name']:
user = User.objects.get(email = request.user.email)
if request.POST['name'] == 'first_name':
user.first_name = request.POST['value']
elif request.POST['name'] == 'last_name':
user.last_name = request.POST['value']
user.save()
else:
user = User.objects.get(email=request.user.email)
form = AvatorForm(request.POST,request.FILES)
if form.is_valid():
user.avatar = form.cleaned_data["avator"]
user.save()
print(user.avatar)
print(form.errors)
return redirect(reverse('profile'))
# Profile edit to be implemented.
def getDepartments(request):
if request.method =='GET':
dept = serializers.serialize("json",Department.objects.all(),use_natural_foreign_keys=True)
data = json.loads(dept)
result = {"result":data}
return HttpResponse(json.dumps(result),content_type='application/json')
def getCourses(request,department=None):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
course = serializers.serialize("json",Course.objects.filter(dept=dept),use_natural_foreign_keys=True)
data = json.loads(course)
result = {"result":data}
return HttpResponse(json.dumps(result),content_type='application/json')
def Announcements(request,department=None,coursecode=None):
if request.method =='GET':
form= AnnouncementForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
if not request.user.is_authenticated:
return redirect(reverse('login'))
form = AnnouncementForm(request.POST,request.FILES)
if(form.is_valid()):
obj=Announcement()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.description = form.cleaned_data["description"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.announcement +=1
contributor.points += rewardvalue
contributor.save()
stat = Stat.objects.get(tag='initial')
stat.announcement_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
def Materials(request,department=None,coursecode=None):
if request.method =='GET':
form= MaterialForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
if not request.user.is_authenticated:
return redirect(reverse('login'))
form = MaterialForm(request.POST,request.FILES)
if(form.is_valid()):
obj = Material()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.material +=1
contributor.points += rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
stat = Stat.objects.get(tag='initial')
stat.material_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
@login_required
def FeedbackView(request):
if request.method =='GET':
form= FeedbackForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
form = FeedbackForm(request.POST,request.FILES)
if(form.is_valid()):
obj = Feedback()
obj.files = form.cleaned_data["files"]
obj.title = form.cleaned_data["title"]
obj.feedback = form.cleaned_data["feedback"]
obj.author=request.user
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.feedback +=1
contributor.points += 2*rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
return render(request,"form.html",context={'feedback':True,'message':"Thanks for your valuable feedback. We will be working on your query."})
print(form.errors)
return redirect(reverse("course"))
@login_required
def ExamPaperView(request,department=None,coursecode=None):
if request.method =='GET':
form= ExamPaperForm()
return render(request,"form.html",context={"form":form})
elif request.method =="POST":
form = ExamPaperForm(request.POST,request.FILES)
if(form.is_valid()):
obj = ExamPaper()
obj.files = form.cleaned_data["files"]
obj.term = form.cleaned_data["term"]
obj.author=request.user
obj.course= Course.objects.get(code=coursecode)
obj.save()
contributor,created = Contributor.objects.get_or_create(user=request.user)
contributor.paper +=1
contributor.points += rewardvalue
contributor.save()
if created:
stat = Stat.objects.get(tag='initial')
stat.contributor_count +=1
stat.save()
stat = Stat.objects.get(tag='initial')
stat.paper_count +=1
stat.save()
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
print(form.errors)
return redirect(reverse("course", kwargs={'department':department,'coursecode':coursecode}))
def DepartmentView(request,department=None,year=0,semester=0):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
year = int(year)
semester = int(semester)
if year < 1:
# course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).order_by('semester')
return render(request,"years.html",context={'department':department})
else:
if semester == 0 :
course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).filter(semester__in=[(2*year -1),(2*year)])
print(course)
else:
course = CourseAllotment.objects.select_related('course').filter(course__dept=dept).filter(semester=semester)
return render(request,"department.html",context={"department":dept,"courses":course})
def CourseView(request,department=None,coursecode=None):
if request.method =='GET':
dept = get_object_or_404(Department,acronym=department)
course = get_object_or_404(Course,code=coursecode)
announcements = Announcement.objects.filter(course=course)
materials = Material.objects.filter(course=course)
papers = ExamPaper.objects.filter(course=course)
try:
bookmark = Bookmark.objects.get(course=course,user=request.user)
except:
bookmark = None
is_bookmarked = True if bookmark else False
return render(request,"course.html",context={"department":dept,"course":course,"announcements":announcements,"materials":materials,"papers":papers,"is_bookmarked":is_bookmarked})
@login_required
def FeedView(request):
if request.method=='GET':
bookmarks =Bookmark.objects.filter(user=request.user)
bookmarkcourses = Bookmark.objects.select_related('course').filter(user=request.user)
courselist=[]
for bookmark in bookmarks:
courselist.append(bookmark.course)
start_from = int(request.GET.get('start_from',0))
announcements = Announcement.objects.select_related('author').filter(course__in =courselist ).order_by('-updated_on')[start_from*6:start_from*6+6]
return render(request,"feed.html",context={"feed":announcements,"next":start_from+1})
@login_required
def BookmarkView(request):
if request.method =='POST':
course = request.POST.get('course')
user = request.POST.get('user')
course_obj = get_object_or_404(Course,id=course)
try:
bookmark = Bookmark.objects.get(course=course,user=request.user)
except:
bookmark = None
if bookmark is not None:
bookmark.delete()
else:
obj = Bookmark()
obj.course = course_obj
obj.user = request.user
obj.save()
return HttpResponse(json.dumps({"success":True}),content_type='application/json')
| {
"content_hash": "c39bc4ad18c3b44ae9e0462c614d380a",
"timestamp": "",
"source": "github",
"line_count": 315,
"max_line_length": 186,
"avg_line_length": 45.384126984126986,
"alnum_prop": 0.6158365976496922,
"repo_name": "rajexp/collegeassist",
"id": "65c52e3273d9ac7c9f857a5730f07e699cca87b0",
"size": "14296",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assist/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "34575"
},
{
"name": "Python",
"bytes": "48008"
}
],
"symlink_target": ""
} |
from dep import *
from dep.helpers import *
def command_branch(args):
tree = dependency.Tree()
tree.branch_dependency_tree(args.name, args.startpoint, vars(args))
parser_branch = opts.subparsers.add_parser("branch",
help="Branch all dependencies to new branch",
description="Branch all dependencies to new branch. Each dependency gets a new commit.")
add_list_arguments(parser_branch)
parser_branch.add_argument("name",
help="Name of branch to create, must not exist")
parser_branch.add_argument("startpoint", nargs="?",
help="Optional start point for each branch, should be a common tag")
parser_branch.set_defaults(func=command_branch)
| {
"content_hash": "6af4c74b466bc329fa71dfbfd90e191d",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 126,
"avg_line_length": 46.05882352941177,
"alnum_prop": 0.632183908045977,
"repo_name": "harveyt/dep",
"id": "052b270b6717e228ddbda1023a6fcd41d10a599f",
"size": "837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dep/cmd/branch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "80292"
},
{
"name": "Shell",
"bytes": "21258"
}
],
"symlink_target": ""
} |
""" Various kinds of layout components.
"""
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
from ..core import validation
from ..core.validation.warnings import EMPTY_LAYOUT, BOTH_CHILD_AND_ROOT
from ..core.properties import abstract
from ..core.properties import Int, Instance, List
from .component import Component
@abstract
class Layout(Component):
""" An abstract base class for layout components. ``Layout`` is not
generally useful to instantiate on its own.
"""
width = Int(help="""
An optional width for the component (in pixels).
""")
height = Int(help="""
An optional height for the component (in pixels).
""")
@abstract
class BaseBox(Layout):
""" Abstract base class for HBox and VBox. Do not use directly.
"""
def __init__(self, *args, **kwargs):
if len(args) > 0 and "children" in kwargs:
raise ValueError("'children' keyword cannot be used with positional arguments")
elif len(args) > 0:
kwargs["children"] = list(args)
super(BaseBox, self).__init__(**kwargs)
@validation.warning(EMPTY_LAYOUT)
def _check_empty_layout(self):
from itertools import chain
if not list(chain(self.children)):
return str(self)
@validation.warning(BOTH_CHILD_AND_ROOT)
def _check_child_is_also_root(self):
problems = []
for c in self.children:
if c.document is not None and c in c.document.roots:
problems.append(str(c))
if problems:
return ", ".join(problems)
else:
return None
children = List(Instance(Component), help="""
The list of children, which can be other components including layouts,
widgets and plots.
""")
class HBox(BaseBox):
""" Lay out child components in a single horizontal row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
"""
class VBox(BaseBox):
""" Lay out child components in a single vertical row.
Children can be specified as positional arguments, as a single argument
that is a sequence, or using the ``children`` keyword argument.
"""
# parent class only, you need to set the fields you want
class VBoxForm(VBox):
"""
Basically, a VBox, where all components (generally form stuff)
is wrapped in a <form> tag - important for bootstrap css
"""
| {
"content_hash": "5d67eafb61dc62ab8bfc0b9966a95907",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 91,
"avg_line_length": 28.770114942528735,
"alnum_prop": 0.6532161406312426,
"repo_name": "pombredanne/bokeh",
"id": "5a1a13216caf5e75646057305926cf09df44d4d2",
"size": "2503",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bokeh/models/layouts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import GencodeGTFParser
__author__ = 'Guorong Xu<g1xu@ucsd.edu>'
import re
import sys
def merge_all_sample_count(workflow, project_name, sample_list):
gencode_gtf_file = "/shared/workspace/software/gencode/gencode.v19.annotation.gtf"
localpath = "/shared/workspace/data_archive/RNASeq/" + project_name + "/" + workflow + "/"
output_file = localpath + "all_gene_counts.txt"
gene_table = GencodeGTFParser.parse(gencode_gtf_file)
all_gene_counts = []
samples = get_sample_list(sample_list)
for sample_index, sample_file in enumerate(samples):
line_index = 0
count_file = sample_file.replace(".fastq", "_counts.txt")
with open(localpath + count_file, 'r+') as f:
lines = f.readlines()
for line in lines:
if workflow == "kallisto_deseq_workflow":
fields = re.split(r'\t+', line)
if fields[0].startswith("gene") or len(fields[0]) == 0:
continue
if sample_index == 0:
all_gene_counts.insert(line_index, [fields[0], fields[1], fields[2], fields[3][:-1]])
else:
sample_count = all_gene_counts[line_index]
sample_count.extend([fields[3][:-1]])
line_index = line_index + 1
elif workflow == "star_htseq_workflow":
fields = re.split(r'\t+', line)
if sample_index == 0:
if fields[0] in gene_table:
all_gene_counts.insert(line_index, [gene_table.get(fields[0]), fields[1][:-1]])
else:
all_gene_counts.insert(line_index, [fields[0], fields[1][:-1]])
else:
sample_count = all_gene_counts[line_index]
sample_count.extend([fields[1][:-1]])
line_index = line_index + 1
filewriter = open(output_file, "a")
if workflow == "kallisto_deseq_workflow":
header = "gene\tsymbol\tdescription\t"
elif workflow == "star_htseq_workflow":
header = "gene\t"
for sample_file in samples:
header = header + sample_file + "\t"
filewriter.write(header[:-1] + "\n")
for items in all_gene_counts:
item_str = ""
for item in items:
item_str = item_str + item + "\t"
filewriter.write(item_str[:-1] + "\n")
filewriter.close()
def get_sample_list(sample_list):
samples = []
fields = re.split(r':+', sample_list)
for field in fields:
samples.append(field)
return samples
if __name__ == "__main__":
workflow = sys.argv[1]
project_name = sys.argv[2]
sample_list = sys.argv[3]
merge_all_sample_count(workflow, project_name, sample_list) | {
"content_hash": "36ede801dc8c5f21c30b4ece98a0bfae",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 109,
"avg_line_length": 34.17857142857143,
"alnum_prop": 0.5357018460466736,
"repo_name": "ucsd-ccbb/jupyter-genomics",
"id": "6db45db82265c85aa7e9ae8728a2210551a46cb9",
"size": "2871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/awsCluster/rnaSeq/MergeCountFile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "254329"
},
{
"name": "Java",
"bytes": "278021"
},
{
"name": "Jupyter Notebook",
"bytes": "19771596"
},
{
"name": "Perl",
"bytes": "14052"
},
{
"name": "Python",
"bytes": "428899"
},
{
"name": "R",
"bytes": "6817"
},
{
"name": "Shell",
"bytes": "37476"
}
],
"symlink_target": ""
} |
"""NASNet-A models for Keras.
NASNet refers to Neural Architecture Search Network, a family of models
that were designed automatically by learning the model architectures
directly on the dataset of interest.
Here we consider NASNet-A, the highest performance model that was found
for the CIFAR-10 dataset, and then extended to ImageNet 2012 dataset,
obtaining state of the art performance on CIFAR-10 and ImageNet 2012.
Only the NASNet-A models, and their respective weights, which are suited
for ImageNet 2012 are provided.
The below table describes the performance on ImageNet 2012:
--------------------------------------------------------------------------------
Architecture | Top-1 Acc | Top-5 Acc | Multiply-Adds | Params (M)
--------------------------------------------------------------------------------
| NASNet-A (4 @ 1056) | 74.0 % | 91.6 % | 564 M | 5.3 |
| NASNet-A (6 @ 4032) | 82.7 % | 96.2 % | 23.8 B | 88.9 |
--------------------------------------------------------------------------------
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
"""
from tensorflow.python.keras import backend
from tensorflow.python.keras.applications import imagenet_utils
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import VersionAwareLayers
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.keras.utils import layer_utils
from tensorflow.python.lib.io import file_io
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
BASE_WEIGHTS_PATH = ('https://storage.googleapis.com/tensorflow/'
'keras-applications/nasnet/')
NASNET_MOBILE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-mobile.h5'
NASNET_MOBILE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-mobile-no-top.h5'
NASNET_LARGE_WEIGHT_PATH = BASE_WEIGHTS_PATH + 'NASNet-large.h5'
NASNET_LARGE_WEIGHT_PATH_NO_TOP = BASE_WEIGHTS_PATH + 'NASNet-large-no-top.h5'
layers = VersionAwareLayers()
def NASNet(input_shape=None,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000,
default_size=None,
classifier_activation='softmax'):
"""Instantiates a NASNet model.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
For image classification use cases, see
[this page for detailed examples](
https://keras.io/api/applications/#usage-examples-for-image-classification-models).
For transfer learning use cases, make sure to read the
[guide to transfer learning & fine-tuning](
https://keras.io/guides/transfer_learning/).
Note: each Keras Application expects a specific kind of input preprocessing.
For NasNet, call `tf.keras.applications.nasnet.preprocess_input`
on your inputs before passing them to the model.
`nasnet.preprocess_input` will scale input pixels between -1 and 1.
Args:
input_shape: Optional shape tuple, the input shape
is by default `(331, 331, 3)` for NASNetLarge and
`(224, 224, 3)` for NASNetMobile.
It should have exactly 3 input channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
penultimate_filters: Number of filters in the penultimate layer.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
num_blocks: Number of repeated blocks of the NASNet model.
NASNet models use the notation `NASNet (N @ P)`, where:
- N is the number of blocks
- P is the number of penultimate filters
stem_block_filters: Number of filters in the initial stem block
skip_reduction: Whether to skip the reduction step at the tail
end of the network.
filter_multiplier: Controls the width of the network.
- If `filter_multiplier` < 1.0, proportionally decreases the number
of filters in each layer.
- If `filter_multiplier` > 1.0, proportionally increases the number
of filters in each layer.
- If `filter_multiplier` = 1, default number of filters from the
paper are used at each layer.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional block.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional block, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
default_size: Specifies the default image size of the model
classifier_activation: A `str` or callable. The activation function to use
on the "top" layer. Ignored unless `include_top=True`. Set
`classifier_activation=None` to return the logits of the "top" layer.
When loading pretrained weights, `classifier_activation` can only
be `None` or `"softmax"`.
Returns:
A `keras.Model` instance.
"""
if not (weights in {'imagenet', None} or file_io.file_exists_v2(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top` '
'as true, `classes` should be 1000')
if (isinstance(input_shape, tuple) and None in input_shape and
weights == 'imagenet'):
raise ValueError('When specifying the input shape of a NASNet'
' and loading `ImageNet` weights, '
'the input_shape argument must be static '
'(no None entries). Got: `input_shape=' +
str(input_shape) + '`.')
if default_size is None:
default_size = 331
# Determine proper input shape and default size.
input_shape = imagenet_utils.obtain_input_shape(
input_shape,
default_size=default_size,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=True,
weights=weights)
if backend.image_data_format() != 'channels_last':
logging.warning('The NASNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
backend.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if not backend.is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if penultimate_filters % (24 * (filter_multiplier**2)) != 0:
raise ValueError(
'For NASNet-A models, the `penultimate_filters` must be a multiple '
'of 24 * (`filter_multiplier` ** 2). Current value: %d' %
penultimate_filters)
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
filters = penultimate_filters // 24
x = layers.Conv2D(
stem_block_filters, (3, 3),
strides=(2, 2),
padding='valid',
use_bias=False,
name='stem_conv1',
kernel_initializer='he_normal')(
img_input)
x = layers.BatchNormalization(
axis=channel_dim, momentum=0.9997, epsilon=1e-3, name='stem_bn1')(
x)
p = None
x, p = _reduction_a_cell(
x, p, filters // (filter_multiplier**2), block_id='stem_1')
x, p = _reduction_a_cell(
x, p, filters // filter_multiplier, block_id='stem_2')
for i in range(num_blocks):
x, p = _normal_a_cell(x, p, filters, block_id='%d' % (i))
x, p0 = _reduction_a_cell(
x, p, filters * filter_multiplier, block_id='reduce_%d' % (num_blocks))
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x, p, filters * filter_multiplier, block_id='%d' % (num_blocks + i + 1))
x, p0 = _reduction_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id='reduce_%d' % (2 * num_blocks))
p = p0 if not skip_reduction else p
for i in range(num_blocks):
x, p = _normal_a_cell(
x,
p,
filters * filter_multiplier**2,
block_id='%d' % (2 * num_blocks + i + 1))
x = layers.Activation('relu')(x)
if include_top:
x = layers.GlobalAveragePooling2D()(x)
imagenet_utils.validate_activation(classifier_activation, weights)
x = layers.Dense(classes, activation=classifier_activation,
name='predictions')(x)
else:
if pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = layers.GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = layer_utils.get_source_inputs(input_tensor)
else:
inputs = img_input
model = training.Model(inputs, x, name='NASNet')
# Load weights.
if weights == 'imagenet':
if default_size == 224: # mobile version
if include_top:
weights_path = data_utils.get_file(
'nasnet_mobile.h5',
NASNET_MOBILE_WEIGHT_PATH,
cache_subdir='models',
file_hash='020fb642bf7360b370c678b08e0adf61')
else:
weights_path = data_utils.get_file(
'nasnet_mobile_no_top.h5',
NASNET_MOBILE_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='1ed92395b5b598bdda52abe5c0dbfd63')
model.load_weights(weights_path)
elif default_size == 331: # large version
if include_top:
weights_path = data_utils.get_file(
'nasnet_large.h5',
NASNET_LARGE_WEIGHT_PATH,
cache_subdir='models',
file_hash='11577c9a518f0070763c2b964a382f17')
else:
weights_path = data_utils.get_file(
'nasnet_large_no_top.h5',
NASNET_LARGE_WEIGHT_PATH_NO_TOP,
cache_subdir='models',
file_hash='d81d89dc07e6e56530c4e77faddd61b5')
model.load_weights(weights_path)
else:
raise ValueError('ImageNet weights can only be loaded with NASNetLarge'
' or NASNetMobile')
elif weights is not None:
model.load_weights(weights)
if old_data_format:
backend.set_image_data_format(old_data_format)
return model
@keras_export('keras.applications.nasnet.NASNetMobile',
'keras.applications.NASNetMobile')
def NASNetMobile(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates a Mobile NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` for NASNetMobile
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
For loading `imagenet` weights, `input_shape` should be (224, 224, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: In case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
return NASNet(
input_shape,
penultimate_filters=1056,
num_blocks=4,
stem_block_filters=32,
skip_reduction=False,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=224)
@keras_export('keras.applications.nasnet.NASNetLarge',
'keras.applications.NASNetLarge')
def NASNetLarge(input_shape=None,
include_top=True,
weights='imagenet',
input_tensor=None,
pooling=None,
classes=1000):
"""Instantiates a NASNet model in ImageNet mode.
Reference:
- [Learning Transferable Architectures for Scalable Image Recognition](
https://arxiv.org/abs/1707.07012) (CVPR 2018)
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
Note: each Keras Application expects a specific kind of input preprocessing.
For NASNet, call `tf.keras.applications.nasnet.preprocess_input` on your
inputs before passing them to the model.
Args:
input_shape: Optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(331, 331, 3)` for NASNetLarge.
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
include_top: Whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
For loading `imagenet` weights, `input_shape` should be (331, 331, 3)
input_tensor: Optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: Optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
Returns:
A Keras model instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
return NASNet(
input_shape,
penultimate_filters=4032,
num_blocks=6,
stem_block_filters=96,
skip_reduction=True,
filter_multiplier=2,
include_top=include_top,
weights=weights,
input_tensor=input_tensor,
pooling=pooling,
classes=classes,
default_size=331)
def _separable_conv_block(ip,
filters,
kernel_size=(3, 3),
strides=(1, 1),
block_id=None):
"""Adds 2 blocks of [relu-separable conv-batchnorm].
Args:
ip: Input tensor
filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions
strides: Strided convolution for downsampling
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('separable_conv_block_%s' % block_id):
x = layers.Activation('relu')(ip)
if strides == (2, 2):
x = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(x, kernel_size),
name='separable_conv_1_pad_%s' % block_id)(x)
conv_pad = 'valid'
else:
conv_pad = 'same'
x = layers.SeparableConv2D(
filters,
kernel_size,
strides=strides,
name='separable_conv_1_%s' % block_id,
padding=conv_pad,
use_bias=False,
kernel_initializer='he_normal')(
x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_1_bn_%s' % (block_id))(
x)
x = layers.Activation('relu')(x)
x = layers.SeparableConv2D(
filters,
kernel_size,
name='separable_conv_2_%s' % block_id,
padding='same',
use_bias=False,
kernel_initializer='he_normal')(
x)
x = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_2_bn_%s' % (block_id))(
x)
return x
def _adjust_block(p, ip, filters, block_id=None):
"""Adjusts the input `previous path` to match the shape of the `input`.
Used in situations where the output number of filters needs to be changed.
Args:
p: Input tensor which needs to be modified
ip: Input tensor whose shape needs to be matched
filters: Number of output filters to be matched
block_id: String block_id
Returns:
Adjusted Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
img_dim = 2 if backend.image_data_format() == 'channels_first' else -2
ip_shape = backend.int_shape(ip)
if p is not None:
p_shape = backend.int_shape(p)
with backend.name_scope('adjust_block'):
if p is None:
p = ip
elif p_shape[img_dim] != ip_shape[img_dim]:
with backend.name_scope('adjust_reduction_block_%s' % block_id):
p = layers.Activation('relu', name='adjust_relu_1_%s' % block_id)(p)
p1 = layers.AveragePooling2D((1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_1_%s' % block_id)(
p)
p1 = layers.Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_1_%s' % block_id,
kernel_initializer='he_normal')(
p1)
p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
p2 = layers.AveragePooling2D((1, 1),
strides=(2, 2),
padding='valid',
name='adjust_avg_pool_2_%s' % block_id)(
p2)
p2 = layers.Conv2D(
filters // 2, (1, 1),
padding='same',
use_bias=False,
name='adjust_conv_2_%s' % block_id,
kernel_initializer='he_normal')(
p2)
p = layers.concatenate([p1, p2], axis=channel_dim)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
elif p_shape[channel_dim] != filters:
with backend.name_scope('adjust_projection_block_%s' % block_id):
p = layers.Activation('relu')(p)
p = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='adjust_conv_projection_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
p)
p = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='adjust_bn_%s' % block_id)(
p)
return p
def _normal_a_cell(ip, p, filters, block_id=None):
"""Adds a Normal cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('normal_A_block_%s' % block_id):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation('relu')(ip)
h = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='normal_conv_1_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='normal_bn_1_%s' % block_id)(
h)
with backend.name_scope('block_1'):
x1_1 = _separable_conv_block(
h, filters, kernel_size=(5, 5), block_id='normal_left1_%s' % block_id)
x1_2 = _separable_conv_block(
p, filters, block_id='normal_right1_%s' % block_id)
x1 = layers.add([x1_1, x1_2], name='normal_add_1_%s' % block_id)
with backend.name_scope('block_2'):
x2_1 = _separable_conv_block(
p, filters, (5, 5), block_id='normal_left2_%s' % block_id)
x2_2 = _separable_conv_block(
p, filters, (3, 3), block_id='normal_right2_%s' % block_id)
x2 = layers.add([x2_1, x2_2], name='normal_add_2_%s' % block_id)
with backend.name_scope('block_3'):
x3 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_left3_%s' % (block_id))(
h)
x3 = layers.add([x3, p], name='normal_add_3_%s' % block_id)
with backend.name_scope('block_4'):
x4_1 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_left4_%s' % (block_id))(
p)
x4_2 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='normal_right4_%s' % (block_id))(
p)
x4 = layers.add([x4_1, x4_2], name='normal_add_4_%s' % block_id)
with backend.name_scope('block_5'):
x5 = _separable_conv_block(
h, filters, block_id='normal_left5_%s' % block_id)
x5 = layers.add([x5, h], name='normal_add_5_%s' % block_id)
x = layers.concatenate([p, x1, x2, x3, x4, x5],
axis=channel_dim,
name='normal_concat_%s' % block_id)
return x, ip
def _reduction_a_cell(ip, p, filters, block_id=None):
"""Adds a Reduction cell for NASNet-A (Fig. 4 in the paper).
Args:
ip: Input tensor `x`
p: Input tensor `p`
filters: Number of output filters
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
with backend.name_scope('reduction_A_block_%s' % block_id):
p = _adjust_block(p, ip, filters, block_id)
h = layers.Activation('relu')(ip)
h = layers.Conv2D(
filters, (1, 1),
strides=(1, 1),
padding='same',
name='reduction_conv_1_%s' % block_id,
use_bias=False,
kernel_initializer='he_normal')(
h)
h = layers.BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='reduction_bn_1_%s' % block_id)(
h)
h3 = layers.ZeroPadding2D(
padding=imagenet_utils.correct_pad(h, 3),
name='reduction_pad_1_%s' % block_id)(
h)
with backend.name_scope('block_1'):
x1_1 = _separable_conv_block(
h,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_left1_%s' % block_id)
x1_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_right1_%s' % block_id)
x1 = layers.add([x1_1, x1_2], name='reduction_add_1_%s' % block_id)
with backend.name_scope('block_2'):
x2_1 = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_left2_%s' % block_id)(
h3)
x2_2 = _separable_conv_block(
p,
filters, (7, 7),
strides=(2, 2),
block_id='reduction_right2_%s' % block_id)
x2 = layers.add([x2_1, x2_2], name='reduction_add_2_%s' % block_id)
with backend.name_scope('block_3'):
x3_1 = layers.AveragePooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_left3_%s' % block_id)(
h3)
x3_2 = _separable_conv_block(
p,
filters, (5, 5),
strides=(2, 2),
block_id='reduction_right3_%s' % block_id)
x3 = layers.add([x3_1, x3_2], name='reduction_add3_%s' % block_id)
with backend.name_scope('block_4'):
x4 = layers.AveragePooling2D((3, 3),
strides=(1, 1),
padding='same',
name='reduction_left4_%s' % block_id)(
x1)
x4 = layers.add([x2, x4])
with backend.name_scope('block_5'):
x5_1 = _separable_conv_block(
x1, filters, (3, 3), block_id='reduction_left4_%s' % block_id)
x5_2 = layers.MaxPooling2D((3, 3),
strides=(2, 2),
padding='valid',
name='reduction_right5_%s' % block_id)(
h3)
x5 = layers.add([x5_1, x5_2], name='reduction_add4_%s' % block_id)
x = layers.concatenate([x2, x3, x4, x5],
axis=channel_dim,
name='reduction_concat_%s' % block_id)
return x, ip
@keras_export('keras.applications.nasnet.preprocess_input')
def preprocess_input(x, data_format=None):
return imagenet_utils.preprocess_input(x, data_format=data_format, mode='tf')
@keras_export('keras.applications.nasnet.decode_predictions')
def decode_predictions(preds, top=5):
return imagenet_utils.decode_predictions(preds, top=top)
preprocess_input.__doc__ = imagenet_utils.PREPROCESS_INPUT_DOC.format(
mode='',
ret=imagenet_utils.PREPROCESS_INPUT_RET_DOC_TF,
error=imagenet_utils.PREPROCESS_INPUT_ERROR_DOC)
decode_predictions.__doc__ = imagenet_utils.decode_predictions.__doc__
| {
"content_hash": "e7e5d6bafefbe4017bcfa8bd2734ee8e",
"timestamp": "",
"source": "github",
"line_count": 801,
"max_line_length": 87,
"avg_line_length": 37.25717852684145,
"alnum_prop": 0.5810072713869249,
"repo_name": "frreiss/tensorflow-fred",
"id": "2650af29cfe7033c50b1dc9900a64e3b073ac56a",
"size": "30563",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/applications/nasnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "6729"
},
{
"name": "Batchfile",
"bytes": "49527"
},
{
"name": "C",
"bytes": "871761"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "79093233"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "110545"
},
{
"name": "Go",
"bytes": "1852128"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "961600"
},
{
"name": "Jupyter Notebook",
"bytes": "549457"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1644156"
},
{
"name": "Makefile",
"bytes": "62398"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "303063"
},
{
"name": "PHP",
"bytes": "20523"
},
{
"name": "Pascal",
"bytes": "3982"
},
{
"name": "Pawn",
"bytes": "18876"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "40003007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Roff",
"bytes": "2472"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "Shell",
"bytes": "681596"
},
{
"name": "Smarty",
"bytes": "34740"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
from pylab import *
linewidth = 2
#markers= ['+', ',', 'o', '.', 's', 'v', 'x', '>', '<', '^']
#markers= [ 'x', '+', 'o', 's', 'v', '^', '>', '<', ]
markers = ['s', 'o', 'v', '^', '+', 'x', '>', '<', ]
markersize = 8
def get_values(filename):
sizes = []
values = []
for line in Path(filename).read_text().splitlines():
if line.startswith('Processing database:'):
txtime = 0
line = line.split(':')[1]
# Check if entry is compressed and if has to be processed
line = line[:line.rfind('.')]
params = line.split('-')
for param in params:
if param[-1] in ('k', 'm', 'g'):
size = param
isize = int(size[:-1]) * 1000
if size[-1] == "m":
isize *= 1000
elif size[-1] == "g":
isize *= 1000 * 1000
elif insert and line.startswith('Insert time'):
tmp = line.split(':')[1]
itime = float(tmp)
sizes.append(isize)
values.append(itime)
elif (overlaps or entropy) and line.startswith('overlaps'):
tmp = line.split(':')[1]
e1, e2 = tmp.split()
if isize in sizes:
sizes.pop()
values.pop()
sizes.append(isize)
if overlaps:
values.append(int(e1) + 1)
else:
values.append(float(e2) + 1)
elif (create_total or create_index) and line.startswith('Index time'):
tmp = line.split(':')[1]
xtime = float(tmp)
txtime += xtime
if create_index and create_index in line:
sizes.append(isize)
values.append(xtime)
elif create_total and txtime > xtime:
sizes.append(isize)
values.append(txtime)
elif table_size and line.startswith('Table size'):
tsize = float(line.split(':')[1])
sizes.append(isize)
values.append(tsize)
elif indexes_size and line.startswith('Indexes size'):
xsize = float(line.split(':')[1])
sizes.append(isize)
values.append(xsize)
elif total_size and line.startswith('Full size'):
fsize = float(line.split(':')[1])
sizes.append(isize)
values.append(fsize)
elif query and line.startswith('Query time'):
tmp = line.split(':')[1]
qtime = float(tmp)
if colname in line:
sizes.append(isize)
values.append(qtime)
elif ((query or query_cold or query_warm) and
line.startswith('[NOREP]')):
tmp = line.split(':')[1]
try:
qtime = float(tmp[:tmp.index('+-')])
except ValueError:
qtime = float(tmp)
if colname in line:
if query and '1st' in line:
sizes.append(isize)
values.append(qtime)
elif query_cold and 'cold' in line:
sizes.append(isize)
values.append(qtime)
elif query_warm and 'warm' in line:
sizes.append(isize)
values.append(qtime)
elif query_repeated and line.startswith('[REP]'):
if colname in line and 'warm' in line:
tmp = line.split(':')[1]
qtime = float(tmp[:tmp.index('+-')])
sizes.append(isize)
values.append(qtime)
return sizes, values
def show_plot(plots, yaxis, legends, gtitle):
xlabel('Number of rows')
ylabel(yaxis)
title(gtitle)
#xlim(10**3, 10**9)
xlim(10 ** 3, 10 ** 10)
# ylim(1.0e-5)
#ylim(-1e4, 1e5)
#ylim(-1e3, 1e4)
#ylim(-1e2, 1e3)
grid(True)
# legends = [f[f.find('-'):f.index('.out')] for f in filenames]
# legends = [l.replace('-', ' ') for l in legends]
legend([p[0] for p in plots], legends, loc="upper left")
#legend([p[0] for p in plots], legends, loc = "center left")
#subplots_adjust(bottom=0.2, top=None, wspace=0.2, hspace=0.2)
if outfile:
savefig(outfile)
else:
show()
if __name__ == '__main__':
import sys
import getopt
usage = """usage: %s [-o file] [-t title] [--insert] [--create-index] [--create-total] [--overlaps] [--entropy] [--table-size] [--indexes-size] [--total-size] [--query=colname] [--query-cold=colname] [--query-warm=colname] [--query-repeated=colname] files
-o filename for output (only .png and .jpg extensions supported)
-t title of the plot
--insert -- Insert time for table
--create-index=colname -- Index time for column
--create-total -- Total time for creation of table + indexes
--overlaps -- The overlapping for the created index
--entropy -- The entropy for the created index
--table-size -- Size of table
--indexes-size -- Size of all indexes
--total-size -- Total size of table + indexes
--query=colname -- Time for querying the specified column
--query-cold=colname -- Time for querying the specified column (cold cache)
--query-warm=colname -- Time for querying the specified column (warm cache)
--query-repeated=colname -- Time for querying the specified column (rep query)
\n""" % sys.argv[0]
try:
opts, pargs = getopt.getopt(sys.argv[1:], 'o:t:',
['insert',
'create-index=',
'create-total',
'overlaps',
'entropy',
'table-size',
'indexes-size',
'total-size',
'query=',
'query-cold=',
'query-warm=',
'query-repeated=',
])
except:
sys.stderr.write(usage)
sys.exit(0)
progname = sys.argv[0]
args = sys.argv[1:]
# if we pass too few parameters, abort
if len(pargs) < 1:
sys.stderr.write(usage)
sys.exit(0)
# default options
outfile = None
insert = 0
create_index = None
create_total = 0
overlaps = 0
entropy = 0
table_size = 0
indexes_size = 0
total_size = 0
query = 0
query_cold = 0
query_warm = 0
query_repeated = 0
colname = None
yaxis = "No axis name"
tit = None
gtitle = "Please set a title!"
# Get the options
for option in opts:
if option[0] == '-o':
outfile = option[1]
elif option[0] == '-t':
tit = option[1]
elif option[0] == '--insert':
insert = 1
yaxis = "Time (s)"
gtitle = "Insert time for table"
elif option[0] == '--create-index':
create_index = option[1]
yaxis = "Time (s)"
gtitle = "Create index time for " + create_index + " column"
elif option[0] == '--create-total':
create_total = 1
yaxis = "Time (s)"
gtitle = "Create time for table + indexes"
elif option[0] == '--overlaps':
overlaps = 1
yaxis = "Overlapping index + 1"
gtitle = "Overlapping for col4 column"
elif option[0] == '--entropy':
entropy = 1
yaxis = "Entropy + 1"
gtitle = "Entropy for col4 column"
elif option[0] == '--table-size':
table_size = 1
yaxis = "Size (MB)"
gtitle = "Table size"
elif option[0] == '--indexes-size':
indexes_size = 1
yaxis = "Size (MB)"
#gtitle = "Indexes size"
gtitle = "Index size for col4 column"
elif option[0] == '--total-size':
total_size = 1
yaxis = "Size (MB)"
gtitle = "Total size (table + indexes)"
elif option[0] == '--query':
query = 1
colname = option[1]
yaxis = "Time (s)"
gtitle = "Query time for " + colname + " column (first query)"
elif option[0] == '--query-cold':
query_cold = 1
colname = option[1]
yaxis = "Time (s)"
gtitle = "Query time for " + colname + " column (cold cache)"
elif option[0] == '--query-warm':
query_warm = 1
colname = option[1]
yaxis = "Time (s)"
gtitle = "Query time for " + colname + " column (warm cache)"
elif option[0] == '--query-repeated':
query_repeated = 1
colname = option[1]
yaxis = "Time (s)"
gtitle = "Query time for " + colname + " column (repeated query)"
gtitle = gtitle.replace('col2', 'Int32')
gtitle = gtitle.replace('col4', 'Float64')
filenames = pargs
if tit:
gtitle = tit
plots = []
legends = []
for i, filename in enumerate(filenames):
plegend = filename[:filename.index('.out')]
plegend = plegend.replace('-', ' ')
#plegend = plegend.replace('zlib1', '')
if filename.find('PyTables') != -1:
xval, yval = get_values(filename)
print(f"Values for {filename} --> {xval}, {yval}")
if xval != []:
plot = loglog(xval, yval)
#plot = semilogx(xval, yval)
setp(plot, marker=markers[i], markersize=markersize,
linewidth=linewidth)
plots.append(plot)
legends.append(plegend)
else:
xval, yval = get_values(filename)
print(f"Values for {filename} --> {xval}, {yval}")
plots.append(loglog(xval, yval, linewidth=3, color='m'))
#plots.append(semilogx(xval, yval, linewidth=linewidth, color='m'))
legends.append(plegend)
if 0: # Per a introduir dades simulades si es vol...
xval = [1000, 10_000, 100_000, 1_000_000, 10_000_000,
100_000_000, 1_000_000_000]
# yval = [0.003, 0.005, 0.02, 0.06, 1.2,
# 40, 210]
yval = [0.0009, 0.0011, 0.0022, 0.005, 0.02,
0.2, 5.6]
plots.append(loglog(xval, yval, linewidth=linewidth))
legends.append("PyTables Std")
show_plot(plots, yaxis, legends, gtitle)
| {
"content_hash": "7782f5fc58d7e19416c9c3d2664232e4",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 259,
"avg_line_length": 36.732638888888886,
"alnum_prop": 0.48700255222610833,
"repo_name": "avalentino/PyTables",
"id": "ca1ecf14da7a0630db4210e3e0e20f26945a5c29",
"size": "10579",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bench/get-figures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "165578"
},
{
"name": "CMake",
"bytes": "2417"
},
{
"name": "Cython",
"bytes": "283042"
},
{
"name": "Gnuplot",
"bytes": "2104"
},
{
"name": "Makefile",
"bytes": "2291"
},
{
"name": "Python",
"bytes": "3119836"
},
{
"name": "Shell",
"bytes": "19408"
}
],
"symlink_target": ""
} |
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.modules import rh_ip
import jinja2.exceptions
import os
# Globals
rh_ip.__grains__ = {}
rh_ip.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class RhipTestCase(TestCase):
'''
Test cases for salt.modules.rh_ip
'''
def test_build_bond(self):
'''
Test to create a bond script in /etc/modprobe.d with the passed
settings and load the bonding kernel module.
'''
with patch.dict(rh_ip.__grains__, {'osrelease': 'osrelease'}):
with patch.object(rh_ip, '_parse_settings_bond', MagicMock()):
mock = jinja2.exceptions.TemplateNotFound('foo')
with patch.object(jinja2.Environment, 'get_template',
MagicMock(side_effect=mock)):
self.assertEqual(rh_ip.build_bond('iface'), '')
with patch.dict(rh_ip.__salt__, {'kmod.load':
MagicMock(return_value=None)}):
with patch.object(rh_ip, '_write_file_iface',
return_value=None):
with patch.object(rh_ip, '_read_temp', return_value='A'):
self.assertEqual(rh_ip.build_bond('iface', test='A'),
'A')
with patch.object(rh_ip, '_read_file', return_value='A'):
self.assertEqual(rh_ip.build_bond('iface', test=None),
'A')
def test_build_interface(self):
'''
Test to build an interface script for a network interface.
'''
with patch.dict(rh_ip.__grains__, {'os': 'Fedora'}):
with patch.object(rh_ip, '_raise_error_iface', return_value=None):
self.assertRaises(AttributeError,
rh_ip.build_interface,
'iface', 'slave', True)
with patch.dict(rh_ip.__salt__, {'network.interfaces': lambda: {'eth': True}}):
self.assertRaises(AttributeError,
rh_ip.build_interface,
'iface', 'eth', True, netmask='255.255.255.255', prefix=32,
test=True)
with patch.object(rh_ip, '_parse_settings_bond', MagicMock()):
mock = jinja2.exceptions.TemplateNotFound('foo')
with patch.object(jinja2.Environment,
'get_template',
MagicMock(side_effect=mock)):
self.assertEqual(rh_ip.build_interface('iface',
'vlan',
True), '')
with patch.object(rh_ip, '_read_temp', return_value='A'):
with patch.object(jinja2.Environment,
'get_template', MagicMock()):
self.assertEqual(rh_ip.build_interface('iface',
'vlan',
True,
test='A'),
'A')
with patch.object(rh_ip, '_write_file_iface',
return_value=None):
with patch.object(os.path, 'join',
return_value='A'):
with patch.object(rh_ip, '_read_file',
return_value='A'):
self.assertEqual(rh_ip.build_interface
('iface', 'vlan',
True), 'A')
def test_build_routes(self):
'''
Test to build a route script for a network interface.
'''
with patch.dict(rh_ip.__grains__, {'osrelease': '5.0'}):
with patch.object(rh_ip, '_parse_routes', MagicMock()):
mock = jinja2.exceptions.TemplateNotFound('foo')
with patch.object(jinja2.Environment,
'get_template', MagicMock(side_effect=mock)):
self.assertEqual(rh_ip.build_routes('iface'), '')
with patch.object(jinja2.Environment,
'get_template', MagicMock()):
with patch.object(rh_ip, '_read_temp', return_value=['A']):
self.assertEqual(rh_ip.build_routes('i', test='t'), ['A', 'A'])
with patch.object(rh_ip, '_read_file', return_value=['A']):
with patch.object(os.path, 'join', return_value='A'):
with patch.object(rh_ip, '_write_file_iface',
return_value=None):
self.assertEqual(rh_ip.build_routes('i',
test=None),
['A', 'A'])
def test_down(self):
'''
Test to shutdown a network interface
'''
with patch.dict(rh_ip.__salt__, {'cmd.run':
MagicMock(return_value='A')}):
self.assertEqual(rh_ip.down('iface', 'iface_type'), 'A')
self.assertEqual(rh_ip.down('iface', 'slave'), None)
def test_get_bond(self):
'''
Test to return the content of a bond script
'''
with patch.object(os.path, 'join', return_value='A'):
with patch.object(rh_ip, '_read_file', return_value='A'):
self.assertEqual(rh_ip.get_bond('iface'), 'A')
def test_get_interface(self):
'''
Test to return the contents of an interface script
'''
with patch.object(os.path, 'join', return_value='A'):
with patch.object(rh_ip, '_read_file', return_value='A'):
self.assertEqual(rh_ip.get_interface('iface'), 'A')
def test_up(self):
'''
Test to start up a network interface
'''
with patch.dict(rh_ip.__salt__, {'cmd.run':
MagicMock(return_value='A')}):
self.assertEqual(rh_ip.up('iface', 'iface_type'), 'A')
self.assertEqual(rh_ip.up('iface', 'slave'), None)
def test_get_routes(self):
'''
Test to return the contents of the interface routes script.
'''
with patch.object(os.path, 'join', return_value='A'):
with patch.object(rh_ip, '_read_file', return_value=['A']):
self.assertEqual(rh_ip.get_routes('iface'), ['A', 'A'])
def test_get_network_settings(self):
'''
Test to return the contents of the global network script.
'''
with patch.object(rh_ip, '_read_file', return_value='A'):
self.assertEqual(rh_ip.get_network_settings(), 'A')
def test_apply_network_settings(self):
'''
Test to apply global network configuration.
'''
with patch.dict(rh_ip.__salt__, {'service.restart':
MagicMock(return_value=True)}):
self.assertTrue(rh_ip.apply_network_settings())
def test_build_network_settings(self):
'''
Test to build the global network script.
'''
with patch.object(rh_ip, '_parse_rh_config', MagicMock()):
with patch.object(rh_ip, '_parse_network_settings', MagicMock()):
mock = jinja2.exceptions.TemplateNotFound('foo')
with patch.object(jinja2.Environment,
'get_template', MagicMock(side_effect=mock)):
self.assertEqual(rh_ip.build_network_settings(), '')
with patch.object(jinja2.Environment,
'get_template', MagicMock()):
with patch.object(rh_ip, '_read_temp', return_value='A'):
self.assertEqual(rh_ip.build_network_settings
(test='t'), 'A')
with patch.object(rh_ip, '_write_file_network',
return_value=None):
with patch.object(rh_ip, '_read_file',
return_value='A'):
self.assertEqual(rh_ip.build_network_settings
(test=None), 'A')
if __name__ == '__main__':
from integration import run_tests
run_tests(RhipTestCase, needs_daemon=False)
| {
"content_hash": "c4385dfeb2c461b2073eeb015af843c8",
"timestamp": "",
"source": "github",
"line_count": 214,
"max_line_length": 97,
"avg_line_length": 43.925233644859816,
"alnum_prop": 0.4553191489361702,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "3457ecb48f8edf530504a824fdb1b8015889f296",
"size": "9424",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "salt-2016.3.3/tests/unit/modules/rh_ip_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from openpyxl.descriptors.serialisable import Serialisable
from openpyxl.descriptors import (
String,
)
from openpyxl.descriptors.excel import Relation
class Hyperlink(Serialisable):
tagname = "hyperlink"
ref = String()
location = String(allow_none=True)
tooltip = String(allow_none=True)
display = String(allow_none=True)
id = Relation()
target = String(allow_none=True)
__attrs__ = ("ref", "location", "tooltip", "display", "id")
def __init__(self,
ref=None,
location=None,
tooltip=None,
display=None,
id=None,
target=None,
):
self.ref = ref
self.location = location
self.tooltip = tooltip
self.display = display
self.id = id
self.target = target
| {
"content_hash": "90c27d034717cb1e02a329f437ff6eed",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 63,
"avg_line_length": 25.02777777777778,
"alnum_prop": 0.5715871254162043,
"repo_name": "aragos/tichu-tournament",
"id": "cdc3cfcbec9f9cf46989c9d3585c3f4be5a69b96",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/openpyxl/worksheet/hyperlink.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AMPL",
"bytes": "8008"
},
{
"name": "CSS",
"bytes": "1695"
},
{
"name": "HTML",
"bytes": "63890"
},
{
"name": "JavaScript",
"bytes": "320642"
},
{
"name": "Python",
"bytes": "3432940"
}
],
"symlink_target": ""
} |
from functools import partial
class Solver(object):
"""
Solver class taking a NEQSys instance as input
"""
abstol = 1e-6
reltol = 1e-6
logger = None
solve_args = {} # speical keyword arguments to run in subclasses
def set_neqsys(self, neqsys):
self._neqsys = neqsys
def run(self, x0, params, itermax=100, **kwargs):
"""
Solves the neqsys
store solution in self.solution with variable symbols as keys
set success equal to True or False
"""
pass
def __getitem__(self, key):
if self.num_result.success:
try:
return self.solution[key]
except KeyError:
return self.solution[self._neqsys[key]]
class SciPy_Solver(Solver):
method = 'lm' # Least sqaure sense (linearly dep. rel. incl.)
@property
def options(self):
return {'xtol': self.abstol}
def run(self, x0, params, itermax=100):
import scipy.optimize
self.num_result = scipy.optimize.root(
fun=partial(self._neqsys.evaluate_residual,
param_vals=params),
x0=x0,
method=self.method,
jac=partial(self._neqsys.evaluate_jac,
param_vals=params),
options=self.options)
| {
"content_hash": "e61423e2d28d5e45d7cbc1cb78cb6071",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 69,
"avg_line_length": 25.615384615384617,
"alnum_prop": 0.5675675675675675,
"repo_name": "bjodah/symneqsys",
"id": "236f669ce6ec42851c1c551b7fc507fc4164e1f4",
"size": "1357",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symneqsys/solver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "10579"
},
{
"name": "Fortran",
"bytes": "6400"
},
{
"name": "Makefile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "29308"
},
{
"name": "Shell",
"bytes": "5142"
}
],
"symlink_target": ""
} |
"""Fichier contenant le module secondaire navigation."""
# Configuration des loggers
type(importeur).man_logs.creer_logger("navigation", "ordres", "ordres.log")
type(importeur).man_logs.creer_logger("navigation", "monstres", "monstres.log")
import os
from random import randint
from vector import *
from abstraits.module import *
from corps.fonctions import valider_cle
from primaires.format.fonctions import format_nb
from primaires.salle.chemin import Chemin
from primaires.salle.salle import Salle
from primaires.vehicule.vecteur import Vecteur
from secondaires.navigation.config import CFG_TEXTE
from .navire import Navire
from .navire_automatique import NavireAutomatique
from .elements import types as types_elements
from .elements.base import BaseElement
from .vent import Vent
from .visible import Visible
from . import cherchables
from . import commandes
from . import editeurs
from . import masques
from . import types
from .modele import ModeleNavire
from .constantes import *
from .equipage.equipage import Equipage
from .equipage.fiche import FicheMatelot
from .monstre.prototype import PrototypeMonstreMarin, types_monstres
from .chantier_naval import ChantierNaval
from .navires_vente import NaviresVente
from .matelots_vente import MatelotsVente
from .repere import Repere
from .trajet import Trajet
from .prompt import PromptNavigation
class Module(BaseModule):
"""Module secondaire définissant la navigation.
Ce module définit les navires, modèles de navires et objets liés.
"""
def __init__(self, importeur):
"""Constructeur du module"""
BaseModule.__init__(self, importeur, "navigation", "secondaire")
self.preparer_apres = ["salle"]
self.commandes = []
self.cfg = None
self.fichier_suivi = None
self.modeles = {}
self.nav_logger = type(self.importeur).man_logs.creer_logger(
"navigation", "navires", "navires.log")
self.navires = {}
self.navires_automatiques = {}
self.elements = {}
self.types_elements = types_elements
self.vents = {}
self.vents_par_etendue = {}
self.fiches = {}
self.chantiers = {}
self.trajets = {}
self.reperes = {}
self.matelots = {}
self.types_monstres = types_monstres
self.monstres = {}
self.points_ovservables = {
"cotes": Visible.trouver_cotes,
"navires": Visible.trouver_navires,
"reperes": Repere.trouver_reperes,
}
def config(self):
"""Configuration du module."""
self.cfg = type(self.importeur).anaconf.get_config("navigation",
"navigation/navigation.cfg", "modele navigationt", CFG_TEXTE)
self.fichier_suivi = self.cfg.fichier_suivi
self.importeur.scripting.a_charger.append(self)
his_voile = self.importeur.perso.ajouter_etat("hisser_voile")
his_voile.msg_refus = "Vous êtes en train de hisser la voile"
his_voile.msg_visible = "hisse une voile ici"
his_voile.act_autorisees = ["regarder", "parler"]
pli_voile = self.importeur.perso.ajouter_etat("plier_voile")
pli_voile.msg_refus = "Vous êtes en train de replier la voile"
pli_voile.msg_visible = "replie une voile ici"
pli_voile.act_autorisees = ["regarder", "parler"]
charger_canon = self.importeur.perso.ajouter_etat("charger_canon")
charger_canon.msg_refus = "Vous êtes en train de charger le canon"
charger_canon.msg_visible = "charge le canon ici"
charger_canon.act_autorisees = ["parler"]
ten_gouv = self.importeur.perso.ajouter_etat("tenir_gouvernail")
ten_gouv.msg_refus = "Vous tenez actuellement le gouvernail"
ten_gouv.msg_visible = "tient le gouvernail ici"
ten_gouv.act_autorisees = ["regarder", "parler"]
u_loch = self.importeur.perso.ajouter_etat("utiliser_loch")
u_loch.msg_refus = "Vous êtes en train de manipuler le loch"
u_loch.msg_visible = "manipule le loch ici"
u_loch.act_autorisees = ["regarder", "parler"]
point = self.importeur.perso.ajouter_etat("faire_point")
point.msg_refus = "Vous êtes en train de faire votre point"
point.msg_visible = "fait le point ici"
point.act_autorisees = ["parler"]
ten_rames = self.importeur.perso.ajouter_etat("tenir_rames")
ten_rames.msg_refus = "Vous tenez actuellement les rames"
ten_rames.msg_visible = "rame ici"
ten_rames.act_autorisees = ["regarder", "parler"]
# Ajout du niveau
importeur.perso.ajouter_niveau("navigation", "navigation")
# Ajout des services
importeur.commerce.types_services["navire"] = NaviresVente()
importeur.commerce.aides_types["navire"] = \
"Ce service permet la vente de navires. Vous devez tout " \
"simplement préciser la clé du modèle de navire. Attention " \
"cependant : pour que la vente de navires dans ce magasin " \
"puisse se faire, le magasin doit être relié à un chantier " \
"naval."
importeur.commerce.types_services["matelot"] = MatelotsVente()
importeur.commerce.aides_types["matelot"] = \
"Ce service permet la vente de matelots. Vous devez tout " \
"simplement préciser la clé du matelot à mettre en vente " \
"(sa clé de prototype de PNJ). La fiche du matelot " \
"correspondant à ce prototype doit avoir été définie au " \
"préalable."
# Ajout d'hooks
importeur.hook.ajouter_hook("navire:sombre",
"Hook appelé quand un navire fait nauffrage.")
BaseModule.config(self)
def init(self):
"""Chargement des navires et modèles."""
self.importeur.scripting.valeurs["navire"] = self.navires
# Ajout du prompt
importeur.perso.ajouter_prompt(PromptNavigation)
self.importeur.hook["salle:regarder"].ajouter_evenement(
self.navire_amarre)
self.importeur.hook["salle:regarder"].ajouter_evenement(
self.navire_accoste)
self.importeur.hook["objet:peut_boire"].ajouter_evenement(
Navire.peut_boire)
self.importeur.interpreteur.categories["navire"] = \
"Commandes de navigation"
self.importeur.hook["pnj:arrive"].ajouter_evenement(
self.combat_matelot)
self.importeur.hook["pnj:attaque"].ajouter_evenement(
self.armer_matelot)
self.importeur.hook["pnj:détruit"].ajouter_evenement(
self.detruire_pnj)
self.importeur.hook["pnj:meurt"].ajouter_evenement(
self.meurt_PNJ)
self.importeur.hook["pnj:nom"].ajouter_evenement(
Equipage.get_nom_matelot)
self.importeur.hook["salle:trouver_chemins_droits"].ajouter_evenement(
self.trouver_chemins_droits)
self.importeur.hook["stats:infos"].ajouter_evenement(
self.stats_navigation)
self.importeur.hook["personnage:deplacer"].ajouter_evenement(
self.modifier_prompt)
# Ajout des talents
importeur.perso.ajouter_talent("calfeutrage", "calfeutrage",
"navigation", 0.5)
# On récupère les modèles
modeles = self.importeur.supenr.charger_groupe(ModeleNavire)
for modele in modeles:
self.modeles[modele.cle] = modele
nb_modeles = len(modeles)
self.nav_logger.info(format_nb(nb_modeles,
"{nb} modèle{s} de navire récupéré{s}"))
# On récupère les navires
navires = self.importeur.supenr.charger_groupe(Navire)
for navire in navires:
self.ajouter_navire(navire)
nb_navires = len(navires)
self.nav_logger.info(format_nb(nb_navires,
"{nb} navire{s} récupéré{s}"))
# On récupère les navires automatiques
fiches = self.importeur.supenr.charger_groupe(NavireAutomatique)
for fiche in fiches:
self.ajouter_navire_automatique(fiche)
nb_autos = len(fiches)
self.nav_logger.info(format_nb(nb_autos,
"{nb} fiche{s} de navire{s} automatique{s} " \
"récupérée{s}", fem=True))
# On récupère les éléments
elements = self.importeur.supenr.charger_groupe(BaseElement)
for element in elements:
self.elements[element.cle] = element
nb_elements = len(elements)
self.nav_logger.info(format_nb(nb_elements,
"{nb} élément{s} de navire récupéré{s}"))
# On récupère les vents
vents = self.importeur.supenr.charger_groupe(Vent)
for vent in vents:
self.ajouter_vent(vent)
nb_vents = len(self.vents)
self.nav_logger.info(format_nb(nb_vents,
"{nb} vent{s} récupéré{s}"))
# On récupère les fiches
fiches = self.importeur.supenr.charger_groupe(FicheMatelot)
for fiche in fiches:
self.ajouter_fiche_matelot(fiche)
nb_mat = len(self.fiches)
self.nav_logger.info(format_nb(nb_mat,
"{nb} fiche{s} de matelot récupérée{s}", fem=True))
# On récupère les trajets
trajets = self.importeur.supenr.charger_groupe(Trajet)
for trajet in trajets:
self.ajouter_trajet(trajet)
nb_trajets = len(self.trajets)
self.nav_logger.info(format_nb(nb_trajets,
"{nb} trajet{s} maritime{s} récupéré{s}"))
# On récupère les repères
reperes = self.importeur.supenr.charger_groupe(Repere)
for repere in reperes:
self.ajouter_repere(repere)
nb_reperes = len(self.reperes)
self.nav_logger.info(format_nb(nb_reperes,
"{nb} repère{s} récupéré{s}"))
# On récupère les chantiers navals
chantiers = self.importeur.supenr.charger_groupe(ChantierNaval)
for chantier in chantiers:
self.ajouter_chantier_naval(chantier)
nb_chantiers = len(chantiers)
self.nav_logger.info(format_nb(nb_chantiers,
"{nb} chantier{s} naval{s} récupéré{s}"))
# On récupère les monstres marins
# On charge les prototypes
chemin = os.path.join(self.chemin, "monstre", "types")
pychemin = "secondaires.navigation.monstre.types"
for nom_fichier in os.listdir(chemin):
if nom_fichier.startswith("_") or not nom_fichier.endswith(".py"):
continue
nom_fichier = pychemin + "." + nom_fichier[:-3]
__import__(nom_fichier)
# Ajout des actions différées
self.importeur.diffact.ajouter_action("dep_navire", TPS_VIRT,
self.avancer_navires)
self.importeur.diffact.ajouter_action("vir_navire", 3,
self.virer_navires)
self.importeur.diffact.ajouter_action("nauffrages", 5,
self.nauffrages)
self.importeur.diffact.ajouter_action("tick_chantiers", 60,
self.tick_chantiers)
# Ajout des bateaux au module salle
self.importeur.salle.salles_a_cartographier.append(
self.get_navires_presents)
# Ajout d'évènements
importeur.evt.ajouter_evenement("sombre", "Un navire sombre",
"Nauffrage de {navire.cle}.", "navire:sombre")
BaseModule.init(self)
def ajouter_commandes(self):
"""Ajout des commandes dans l'interpréteur"""
self.commandes = [
commandes.allure.CmdAllure(),
commandes.amarre.CmdAmarre(),
commandes.ancre.CmdAncre(),
commandes.cale.CmdCale(),
commandes.calfeutrer.CmdCalfeutrer(),
commandes.canon.CmdCanon(),
commandes.cap.CmdCap(),
commandes.chantier.CmdChantier(),
commandes.debarquer.CmdDebarquer(),
commandes.detailler.CmdDetailler(),
commandes.ecoper.CmdEcoper(),
commandes.eltedit.CmdEltedit(),
commandes.embarquer.CmdEmbarquer(),
commandes.equipage.CmdEquipage(),
commandes.gouvernail.CmdGouvernail(),
commandes.loch.CmdLoch(),
commandes.matelot.CmdMatelot(),
commandes.navire.CmdNavire(),
commandes.navire_automatique.CmdNavireAutomatique(),
commandes.passerelle.CmdPasserelle(),
commandes.pavillon.CmdPavillon(),
commandes.point.CmdPoint(),
commandes.rames.CmdRames(),
commandes.saborder.CmdSaborder(),
commandes.vent.CmdVent(),
commandes.voile.CmdVoile(),
]
for cmd in self.commandes:
self.importeur.interpreteur.ajouter_commande(cmd)
# Ajout des éditeurs
self.importeur.interpreteur.ajouter_editeur(
editeurs.autonavire.EdtNaedit)
self.importeur.interpreteur.ajouter_editeur(
editeurs.matedit.EdtMatedit)
self.importeur.interpreteur.ajouter_editeur(
editeurs.eltedit.EdtEltedit)
self.importeur.interpreteur.ajouter_editeur(editeurs.shedit.EdtShedit)
def preparer(self):
"""Préparation du module.
Actions effectuées :
- Mise à jour systématique des éléments du navire
- Écriture des matelots
"""
self.nav_logger.info("Mise à jour des navires...")
for navire in self.navires.values():
for salle in navire.salles.values():
for element in salle.elements:
element.mettre_a_jour_attributs()
rames = salle.rames
if rames:
rames.vitesse = "immobile"
rames.centrer()
rames.tenu = None
navire.construire_depuis_modele(False)
if (len(navire.salles) ** 2 - len(navire.salles)) != \
len(navire.modele.graph):
self.nav_logger.info("Calcul du graph du modèle de " \
"navire {}.".format(navire.modele.cle))
navire.modele.generer_graph()
self.nav_logger.info("... mise à jour des navires terminée.")
for navire in self.navires.values():
for matelot in navire.equipage.matelots.values():
if matelot.personnage:
self.matelots[matelot.personnage.identifiant] = matelot
if matelot.ordres:
matelot.nettoyer_ordres()
matelot.executer_ordres()
navire.equipage.points_max = navire.equipage.points_actuels
# On renseigne le terrain récif
Navire.obs_recif = (
self.importeur.salle.obstacles["récif"],
self.importeur.salle.obstacles["rapide"],
self.importeur.salle.obstacles["banc de sable"],
self.importeur.salle.obstacles["corail"],
)
for obstacle in Navire.obs_recif:
obstacle.symbole = "!"
def creer_modele(self, cle):
"""Crée un modèle de navire et l'ajoute dans le dictionnaire.
Retourne le modèle créé.
Lève une exception KeyError si le modèle existe déjà.
"""
valider_cle(cle)
if cle in self.modeles:
raise KeyError("le modèle de navire {} existe déjà".format(cle))
modele = ModeleNavire(cle)
self.ajouter_modele(modele)
return modele
def ajouter_modele(self, modele):
"""Ajoute le modèle de navire dans le dictionnaire."""
self.modeles[modele.cle] = modele
def supprimer_modele(self, cle):
"""Supprime le modèle de navire portant la clé passée en paramètre."""
if cle not in self.modeles:
raise KeyError("le modèle de navire de clé {} est inconnue".format(
cle))
modele = self.modeles[cle]
del self.modeles[cle]
modele.detruire()
def creer_navire(self, modele):
"""Crée un navire sur le modèle.
Retourne le navire créé.
"""
navire = Navire(modele)
self.ajouter_navire(navire)
return navire
def ajouter_navire(self, navire):
"""Ajoute le navire à la liste."""
cle = navire.cle
self.navires[cle] = navire
# Créé les actions différées
self.importeur.diffact.ajouter_action("tick_equipages_{}".format(cle),
1, self.tick_equipages, navire)
self.importeur.diffact.ajouter_action("tick_vigies_{}".format(cle),
randint(0, 20), self.tick_vigies, navire)
self.importeur.diffact.ajouter_action("controle_equipages_{}".format(
cle), randint(0, 5), self.controle_equipages, navire)
self.importeur.diffact.ajouter_action("objectif_equipages_{}".format(
cle), randint(0, 15), self.objectif_equipages, navire)
def supprimer_navire(self, cle):
"""Supprime le navire dont la clé est passée en paramètre."""
if cle not in self.navires:
raise KeyError("le navire de clé {} est introuvable".format(cle))
navire = self.navires[cle]
# Destruction des action différées
self.importeur.diffact.retirer_action("tick_equipages_{}".format(cle),
False)
self.importeur.diffact.retirer_action("tick_vigies_{}".format(cle),
False)
self.importeur.diffact.retirer_action("controle_equipages_{}".format(
cle), False)
self.importeur.diffact.retirer_action("objectif_equipages_{}".format(
cle), False)
navire.detruire()
del self.navires[cle]
def creer_navire_automatique(self, cle):
"""Crée un navire automatique."""
fiche = NavireAutomatique(cle)
self.ajouter_navire_automatique(fiche)
return fiche
def ajouter_navire_automatique(self, fiche):
"""Ajoute le navire automatique à la liste."""
self.navires_automatiques[fiche.cle] = fiche
def supprimer_navire_automatique(self, cle):
"""Supprime le navire automatique dont la clé est précisée."""
self.navires_automatiques.pop(cle).detruire()
def creer_element(self, cle, type_elt):
"""Crée un élément du type indiqué.
Retourne l'élément créé.
"""
elt = type_elt(cle)
self.ajouter_element(elt)
return elt
def ajouter_element(self, element):
"""Ajoute l'élément au dictionnaire."""
self.elements[element.cle] = element
def supprimer_element(self, cle):
"""Supprime l'élément dont la clé est passée en paramètre."""
if cle not in self.elements:
raise KeyError("l'élément de clé {} est introuvable".format(cle))
element = self.elements[cle]
element.detruire()
del self.elements[cle]
def get_vents_etendue(self, cle):
"""Retourne une liste des vents de l'étendue."""
return self.vents_par_etendue.get(cle, [])
def creer_vent(self, etendue, x, y, z, vitesse=1, direction=0):
"""Crée un vent dans une étendue.
Pour les paramètres, se référez au constructeur de la classe Vent.
"""
vent = Vent(etendue, x, y, z, vitesse, direction)
self.ajouter_vent(vent)
return vent
def ajouter_vent(self, vent):
"""Ajoute le vent."""
self.vents[vent.cle] = vent
self.vents_par_etendue[vent.etendue.cle] = self.vents_par_etendue.get(
vent.etendue.cle, []) + [vent]
def supprimer_vent(self, cle):
"""Supprime le vent."""
vent = self.vents[cle]
self.vents_par_etendue[vent.etendue.cle].remove(vent)
del self.vents[cle]
vent.detruire()
def creer_fiche_matelot(self, prototype):
"""Crée une fiche de matelot sur un prototype de PNJ."""
fiche = FicheMatelot(prototype)
self.ajouter_fiche_matelot(fiche)
return fiche
def ajouter_fiche_matelot(self, fiche):
"""Ajoute le matelot."""
self.fiches[fiche.cle] = fiche
def supprimer_fiche_matelot(self, cle):
"""Supprime le matelot."""
self.fiches.pop(cle).detruire()
def creer_trajet(self, cle):
"""Crée un trajet maritime."""
if cle in self.trajets:
raise ValueError("la clé {} est déjà utilisée".format(cle))
trajet = Trajet(cle)
self.ajouter_trajet(trajet)
return trajet
def ajouter_trajet(self, trajet):
"""Ajoute le trajet."""
self.trajets[trajet.cle] = trajet
def supprimer_trajet(self, cle):
"""Supprime le trajet."""
self.trajets.pop(cle).detruire()
def creer_repere(self, x, y):
"""Crée un repère."""
if (x, y) in self.reperes:
raise ValueError("le repère en {}.{} existe déjà".format(x, y))
repere = Repere(x, y)
self.ajouter_repere(repere)
return repere
def ajouter_repere(self, repere):
"""Ajoute le repère."""
self.reperes[(repere.x, repere.y)] = repere
def supprimer_repere(self, x, y):
"""Supprime le repère."""
self.reperes.pop((x, y)).detruire()
def creer_chantier_naval(self, cle):
"""Crée un chantier naval."""
if cle in self.chantiers:
raise ValueError("la clé {} est déjà utilisée par un autre " \
"chantier".format(cle))
chantier = ChantierNaval(cle)
self.ajouter_chantier_naval(chantier)
return chantier
def ajouter_chantier_naval(self, chantier):
"""Ajoute un chantier naval."""
if chantier.cle in self.chantiers:
raise ValueError("la clé {} est déjà utilisée par un autre " \
"chantier".format(chantier.cle))
self.chantiers[chantier.cle] = chantier
def supprimer_chantier_naval(self, cle):
"""Suppression d'un chantier naval."""
if cle not in self.chantiers:
raise ValueError("la clé {} n'est utilisée par aucun " \
"chantier".format(cle))
self.chantiers.pop(cle).detruire()
def get_chantier_naval(self, salle):
"""Retourne, si trouvé, le chantier naval lié à cette salle."""
for chantier in self.chantiers.values():
if chantier.salle_magasin is salle:
return chantier
return None
def avancer_navires(self):
"""Fait avancer les navires."""
self.importeur.diffact.ajouter_action("dep_navire", TPS_VIRT,
self.avancer_navires)
for navire in list(self.navires.values()):
if navire.etendue:
navire.avancer(DIST_AVA)
def virer_navires(self):
"""Fait virer les navires."""
self.importeur.diffact.ajouter_action("vir_navire", 3,
self.virer_navires)
for navire in self.navires.values():
if not navire.immobilise:
orientation = navire.orientation
if orientation != 0:
navire.virer(orientation)
def nauffrages(self):
"""Gère les nauffrages."""
self.importeur.diffact.ajouter_action("nauffrages", 5,
self.nauffrages)
for navire in list(self.navires.values()):
for salle in navire.salles.values():
if salle.noyable and salle.voie_eau == COQUE_OUVERTE:
salle.poids_eau = int(salle.poids_eau * 1.1)
poids = navire.poids
poids_max = navire.poids_max
if poids >= poids_max:
navire.sombrer()
elif poids > poids_max / 3:
navire.envoyer("L'eau emplit le navire de plus en plus vite.")
def tick_chantiers(self):
"""Tick des chantiers navals."""
self.importeur.diffact.ajouter_action("tick_chantiers", 60,
self.tick_chantiers)
for chantier in self.chantiers.values():
chantier.executer_commandes()
def tick_equipages(self, navire):
"""Tick des équipages."""
cle = navire.cle
self.importeur.diffact.ajouter_action("tick_equipages_{}".format(cle),
1, self.tick_equipages, navire)
if not navire.equipage.matelots:
return
navire.equipage.tick()
def tick_vigies(self, navire):
"""Tick les vigies."""
cle = navire.cle
self.importeur.diffact.ajouter_action("tick_vigies_{}".format(
cle), 5, self.tick_vigies, navire)
if not navire.equipage.matelots:
return
if not navire.immobilise and navire.vitesse.norme > 0:
navire.equipage.verifier_vigie()
def controle_equipages(self, navire):
"""Contrôle des équipages."""
cle = navire.cle
self.importeur.diffact.ajouter_action("controle_equipages_{}".format(
cle), 3, self.controle_equipages, navire)
if not navire.equipage.matelots:
return
if navire.immobilise:
return
for controle in navire.equipage.controles.values():
controle.controler()
def objectif_equipages(self, navire):
"""Travail sur les objectifs des équipages."""
cle = navire.cle
self.importeur.diffact.ajouter_action("objectif_equipages_{}".format(
cle), 5, self.objectif_equipages, navire)
if not navire.equipage.matelots:
return
navire.equipage.objectifs = [o for o in navire.equipage.objectifs if \
o.actif]
prioritaire = True
for objectif in navire.equipage.objectifs:
objectif.verifier(prioritaire)
prioritaire = False
def navire_amarre(self, salle, liste_messages, flags):
"""Si un navire est amarré, on l'affiche."""
if salle.etendue is None or salle.nom_terrain not in TERRAINS_QUAI:
return
etendue = salle.etendue
navires = [n for n in self.navires.values() if n.etendue is etendue]
for navire in navires:
for t_salle in navire.salles.values():
if t_salle.amarre and t_salle.amarre.attachee is salle:
e = "" if navire.modele.masculin else "e"
liste_messages.insert(0, "{} est amarré{e} ici.".format(
navire.desc_survol.capitalize(), e=e))
return
def navire_accoste(self, salle, liste_messages, flags):
"""Si un navire est accosté, on l'affiche."""
if salle.etendue is None or salle.nom_terrain not in TERRAINS_QUAI:
return
try:
sortie = salle.sorties.get_sortie_par_nom("passerelle")
except KeyError:
return
if sortie and sortie.salle_dest and hasattr(sortie.salle_dest,
"navire"):
navire = sortie.salle_dest.navire
e = "" if navire.modele.masculin else "e"
liste_messages.insert(0, "{} a accosté{e} ici.".format(
navire.desc_survol.capitalize(), e=e))
def combat_matelot(self, pnj, arrive):
"""Méthode appelé quand un PNJ arrive dans la salle d'un autre.
On profite de cette méthode (reliée à un hook) pour demander
à deux matelots de différents équipages de s'attaquer.
"""
if pnj is not arrive and hasattr(pnj, "identifiant") and \
pnj.identifiant in self.matelots:
matelot = self.matelots[pnj.identifiant]
navire = matelot.navire
immobilise = getattr(navire, "immobilise", True)
equipage = matelot.equipage
if not immobilise and equipage and not equipage.est_matelot(
arrive):
matelot.armer()
pnj.attaquer(arrive)
def armer_matelot(self, pnj, adversaire):
"""Méthode appelé quand un PNJ est attaqué.
On profite de cette méthode (reliée à un hook) pour demander
au matelot derrière le PNJ (si existe) de s'armer si besoin.
"""
print(pnj, "est attaqué")
if hasattr(pnj, "identifiant") and pnj.identifiant in self.matelots:
matelot = self.matelots[pnj.identifiant]
navire = matelot.navire
immobilise = getattr(navire, "immobilise", True)
equipage = matelot.equipage
if not immobilise and equipage:
matelot.armer()
def meurt_PNJ(self, pnj, adversaire):
"""PNJ meurt, tué par personnage.
Si pnj est un matelot, affiche une tip si le statut du navire
passe en abordable.
"""
if adversaire and hasattr(pnj, "identifiant") and \
pnj.identifiant in self.matelots:
matelot = self.matelots[pnj.identifiant]
navire = matelot.navire
equipage = matelot.equipage
if equipage:
actuels = equipage.points_actuels
futurs = actuels - matelot.poste.points
if not est_capturable(navire, actuels) and est_capturable(
navire, futurs):
adversaire.envoyer_tip("Vous pouvez maintenant " \
"conquérir ce navire en utilisant %équipage% " \
"%équipage:conquérir%.")
def detruire_pnj(self, pnj):
"""Détruit le matelot spécifié."""
if pnj.identifiant in self.matelots:
matelot = self.matelots[pnj.identifiant]
if matelot.equipage:
matelot.equipage.supprimer_matelot(matelot.nom, False)
else:
self.matelots.pop(pnj.identifiant).detruire()
def get_symbole(self, point):
"""Retourne le symbole correspondant."""
if isinstance(point, Salle) and point.nom_terrain in \
TERRAINS_ACCOSTABLES:
return "#"
elif hasattr(point, "symbole"):
return point.symbole
elif isinstance(point, Navire):
return "*"
elif isinstance(point, Repere):
return "~"
return ""
def points_navires(self, navire):
"""Retourne un tuple de couples (cooreds, salle).
Le navire passé en paramètre est à la fois l'exception qui
n'apparaîtra pas dans le tuple retourné et celui sur lequel
on se base pour savoir quelle étendue tester.
"""
etendue = navire.etendue
navires = [n for n in self.navires.values() if \
n.etendue is etendue and n is not navire]
points = []
for navire in navires:
for salle in navire.salles.values():
x, y, z = salle.coords.tuple()
points.append(((x, y), salle))
return tuple(points)
def get_navires_presents(self):
"""Retourne les navires présents sous la forme d'une liste de tuples
(nom, False, (x, y)) (voir module salle, commande cartographier).
"""
l_navires = []
for navire in self.navires.values():
l_navires.append(("Navire", False, \
(round(navire.position.x), round(navire.position.y))))
return l_navires
def dernier_ID(self, cle):
"""Retourne la prochaine clé non utilisée d'un modèle.
Si par exemple on passe en paramètre de cette fonction la clé
"voilier" et qu'il existe déjà un "voilier_1" et "voilier_2" alors
on retourne "voilier_3".
"""
ids = [int(n.cle[len(cle) + 1:]) for n in self.navires.values() if \
n.cle.startswith(cle + "_")]
n_id = max(ids) + 1 if ids else 1
return "{}_{}".format(cle, n_id)
def distance_min_avec_navires(self, vecteur):
"""Retourne la distance minimum avec tous les navires présents."""
distances = []
for navire in self.navires.values():
if navire.etendue is None:
continue
for salle in navire.salles.values():
t_x, t_y, t_z = salle.coords.x, salle.coords.y, salle.coords.z
t_vecteur = Vector(t_x, t_y, t_z)
distances.append((t_vecteur - vecteur).mag)
if distances:
return min(distances)
return None
def ecrire_suivi(self, message):
"""Écrit le message dans le fichier de suivi si défin."""
try:
if self.fichier_suivi:
with open(self.fichier_suivi, "a") as fichier:
fichier.write(message + "\n")
except Exception as err:
print(err)
def get_navires_possedes(self, personnage):
"""Retour les navires proches et possédés par le personnage.
Deux cas sont à distinguer :
* La salle du personnage est une salle de la terre ferme.
Dans ce cas, les navires récupérés sont ceux accostés
(amarrés ou ancrés) dans la même zone. On part du principe
que, si ils sont dans la même zone, alors ils peuvent être
joints par sorties.
* La salle du personnage est une salle de navire : dans ce
cas, on retourne les autres navires autour, c'est-à-dire
qui ont une salle à moins de 10 brasses. Cela demande de
vérifier chaque salle de chaque navire et est surtout utile
pour recruter des matelots d'un autre navire en mer.
"""
salle = personnage.salle
navires = []
if getattr(salle, "navire", None) is None:
# Premier cas, salle de la terre ferme
for navire in importeur.navigation.navires.values():
if not navire.a_le_droit(personnage, "maître d'équipage"):
continue
if not navire.accoste:
continue
if navire.point_accostage.nom_zone == salle.nom_zone:
navires.append(navire)
else:
# Second cas, c'est une salle de navire
navire = salle.navire
if not navire.a_le_droit(personnage, "maître d'équipage"):
return
coords = [s.coords.tuple() for s in navire.salles.values()]
for autre in importeur.navigation.navires.values():
if navire is autre or not autre.a_le_droit(
personnage, "maître d'équipage") or \
autre.etendue is None:
continue
for autre_salle in autre.salles.values():
tup = salle.coords.tuple()
distance = min(mag(tup[0], tup[1], tup[2], *c) for c in \
coords)
if distance < 4:
navires.append(autre)
break
navires.sort(key=lambda n: n.cle)
return navires
def trouver_chemins_droits(self, salle, chemins, rayon):
"""Ajoute aux chemins droits."""
if salle.etendue is None and not hasattr(salle, "navire"):
return
o_x, o_y, o_z = salle.coords.tuple()
salles = []
if getattr(salle, "navire", None):
for etendue in importeur.salle.etendues.values():
salles.extend(list(etendue.cotes.values()))
if salle.etendue:
for navire in self.navires.values():
for t_salle in navire.salles.values():
salles.append(t_salle)
for t_salle in salles:
if salle is t_salle:
continue
d_x, d_y, d_z = t_salle.coords.tuple()
vecteur = Vecteur(d_x - o_x, d_y - o_y, d_z - o_z)
if vecteur.norme < rayon:
sortie = salle.get_sortie(vecteur, t_salle)
chemin = Chemin()
chemin.sorties.append(sortie)
chemins.chemins.append(chemin)
def stats_navigation(self, infos):
"""Ajoute les stats concernant la navigation."""
navires = len(self.navires)
modeles = len(self.modeles)
matelots = []
for navire in self.navires.values():
if navire.equipage:
matelots.extend(navire.equipage.matelots.values())
ordres = []
for matelot in matelots:
if matelot.ordres:
ordres.append((matelot, matelot.ordres))
ordres = sorted(ordres, key=lambda c: len(c[1]), reverse=True)
nb_ordres = sum(len(c[1]) for c in ordres)
msg = "|tit|Navigation :|ff|"
msg += "\n {} navires construits sur {} modèles".format(
navires, modeles)
msg += "\n {} matelots en jeu".format(len(matelots))
msg += "\n {} ordres en cours".format(nb_ordres)
i = 0
if ordres:
msg += "\n Matelots les plus solicités :"
for matelot, t_ordres in ordres:
if i > 2:
break
msg += "\n {} en {} : {} ordres".format(
matelot.personnage and matelot.personnage.identifiant \
or "inconnu", matelot.equipage and \
matelot.equipage.navire.cle or "inconnu", len(t_ordres))
i += 1
infos.append(msg)
def modifier_prompt(self, personnage, destination, sortie, endurance):
"""Modifie le prompt du personnage se déplaçant."""
if getattr(destination, "navire", None):
# On active le prompt de navigation
personnage.selectionner_prompt("navigation")
else:
# On désélectionne le prompt
personnage.deselectionner_prompt("navigation")
| {
"content_hash": "593c41b95543fc0825106700a107799b",
"timestamp": "",
"source": "github",
"line_count": 997,
"max_line_length": 80,
"avg_line_length": 37.945837512537615,
"alnum_prop": 0.5899503066187355,
"repo_name": "stormi/tsunami",
"id": "a168110aee0f3bb821984e40bf8d141a2586970e",
"size": "39662",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/navigation/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
import contextlib
import datetime
import uuid
import mock
import testscenarios
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_utils import importutils
from oslo_utils import timeutils
from sqlalchemy.orm import query
from neutron.common import constants
from neutron.common import topics
from neutron import context as q_context
from neutron.db import agents_db
from neutron.db import common_db_mixin
from neutron.db import db_base_plugin_v2 as db_v2
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_db
from neutron.db import l3_dvrscheduler_db
from neutron.db import l3_hamode_db
from neutron.db import l3_hascheduler_db
from neutron.extensions import l3agentscheduler as l3agent
from neutron import manager
from neutron.scheduler import l3_agent_scheduler
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit import testlib_api
# the below code is required for the following reason
# (as documented in testscenarios)
"""Multiply tests depending on their 'scenarios' attribute.
This can be assigned to 'load_tests' in any test module to make this
automatically work across tests in the module.
"""
load_tests = testscenarios.load_tests_apply_scenarios
HOST_DVR = 'my_l3_host_dvr'
DVR_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
HOST_DVR_SNAT = 'my_l3_host_dvr_snat'
DVR_SNAT_L3_AGENT = {
'binary': 'neutron-l3-agent',
'host': HOST_DVR_SNAT,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': 'dvr_snat'},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
class FakeL3Scheduler(l3_agent_scheduler.L3Scheduler):
def schedule(self):
pass
def _choose_router_agent(self):
pass
def _choose_router_agents_for_ha(self):
pass
class FakePortDB(object):
def __init__(self, port_list):
self._port_list = port_list
def _get_query_answer(self, port_list, filters):
answers = []
for port in port_list:
matched = True
for key, search_values in filters.items():
port_value = port.get(key, None)
if not port_value:
matched = False
break
if isinstance(port_value, list):
sub_answers = self._get_query_answer(port_value,
search_values)
matched = len(sub_answers) > 0
else:
matched = port_value in search_values
if not matched:
break
if matched:
answers.append(port)
return answers
def get_port(self, context, port_id):
for port in self._port_list:
if port['id'] == port_id:
if port['tenant_id'] == context.tenant_id or context.is_admin:
return port
break
return None
def get_ports(self, context, filters=None):
query_filters = dict()
if filters:
query_filters.update(filters)
if not context.is_admin:
query_filters['tenant_id'] = [context.tenant_id]
result = self._get_query_answer(self._port_list, query_filters)
return result
class L3SchedulerBaseTestCase(base.BaseTestCase):
def setUp(self):
super(L3SchedulerBaseTestCase, self).setUp()
self.scheduler = FakeL3Scheduler()
self.plugin = mock.Mock()
def test_auto_schedule_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, '_get_routers_to_schedule'),
mock.patch.object(self.scheduler, '_get_routers_can_schedule')
) as (gs, gr):
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertTrue(result)
self.assertTrue(gs.called)
self.assertTrue(gr.called)
def test_auto_schedule_routers_no_agents(self):
self.plugin.get_enabled_agent_on_host.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_unscheduled_routers(self):
type(self.plugin).supported_extension_aliases = (
mock.PropertyMock(return_value=[]))
with mock.patch.object(self.scheduler,
'_get_routers_to_schedule') as mock_routers:
mock_routers.return_value = []
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test_auto_schedule_routers_no_target_routers(self):
self.plugin.get_enabled_agent_on_host.return_value = [mock.ANY]
with contextlib.nested(
mock.patch.object(self.scheduler, '_get_routers_to_schedule'),
mock.patch.object(self.scheduler, '_get_routers_can_schedule')
) as (mock_unscheduled_routers, mock_target_routers):
mock_unscheduled_routers.return_value = mock.ANY
mock_target_routers.return_value = None
result = self.scheduler.auto_schedule_routers(
self.plugin, mock.ANY, mock.ANY, mock.ANY)
self.assertTrue(self.plugin.get_enabled_agent_on_host.called)
self.assertFalse(result)
def test__get_routers_to_schedule_with_router_ids(self):
router_ids = ['foo_router_1', 'foo_router_2']
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
self.plugin.get_routers.return_value = expected_routers
with mock.patch.object(self.scheduler,
'_filter_unscheduled_routers') as mock_filter:
mock_filter.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin, router_ids)
mock_filter.assert_called_once_with(
mock.ANY, self.plugin, expected_routers)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_without_router_ids(self):
expected_routers = [
{'id': 'foo_router1'}, {'id': 'foo_router_2'}
]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = expected_routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def test__get_routers_to_schedule_exclude_distributed(self):
routers = [
{'id': 'foo_router1', 'distributed': True}, {'id': 'foo_router_2'}
]
expected_routers = [{'id': 'foo_router_2'}]
with mock.patch.object(self.scheduler,
'_get_unscheduled_routers') as mock_get:
mock_get.return_value = routers
unscheduled_routers = self.scheduler._get_routers_to_schedule(
mock.ANY, self.plugin,
router_ids=None, exclude_distributed=True)
mock_get.assert_called_once_with(mock.ANY, self.plugin)
self.assertEqual(expected_routers, unscheduled_routers)
def _test__get_routers_can_schedule(self, routers, agent, target_routers):
self.plugin.get_l3_agent_candidates.return_value = agent
result = self.scheduler._get_routers_can_schedule(
mock.ANY, self.plugin, routers, mock.ANY)
self.assertEqual(target_routers, result)
def _test__filter_unscheduled_routers(self, routers, agents, expected):
self.plugin.get_l3_agents_hosting_routers.return_value = agents
unscheduled_routers = self.scheduler._filter_unscheduled_routers(
mock.ANY, self.plugin, routers)
self.assertEqual(expected, unscheduled_routers)
def test__filter_unscheduled_routers_already_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
[{'id': 'foo_agent_id'}], [])
def test__filter_unscheduled_routers_non_scheduled(self):
self._test__filter_unscheduled_routers(
[{'id': 'foo_router1'}, {'id': 'foo_router_2'}],
None, [{'id': 'foo_router1'}, {'id': 'foo_router_2'}])
def test__get_routers_can_schedule_with_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, mock.ANY, routers)
def test__get_routers_can_schedule_with_no_compat_agent(self):
routers = [{'id': 'foo_router'}]
self._test__get_routers_can_schedule(routers, None, [])
def test__bind_routers_centralized(self):
routers = [{'id': 'foo_router'}]
with mock.patch.object(self.scheduler, 'bind_router') as mock_bind:
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, mock.ANY)
mock_bind.assert_called_once_with(mock.ANY, 'foo_router', mock.ANY)
def _test__bind_routers_ha(self, has_binding):
routers = [{'id': 'foo_router', 'ha': True, 'tenant_id': '42'}]
agent = agents_db.Agent(id='foo_agent')
with contextlib.nested(
mock.patch.object(self.scheduler, '_router_has_binding',
return_value=has_binding),
mock.patch.object(self.scheduler, '_create_ha_router_binding')
) as (
mock_has_binding, mock_bind):
self.scheduler._bind_routers(mock.ANY, mock.ANY, routers, agent)
mock_has_binding.assert_called_once_with(mock.ANY, 'foo_router',
'foo_agent')
self.assertEqual(not has_binding, mock_bind.called)
def test__bind_routers_ha_has_binding(self):
self._test__bind_routers_ha(has_binding=True)
def test__bind_routers_ha_no_binding(self):
self._test__bind_routers_ha(has_binding=False)
class L3SchedulerBaseMixin(object):
def _register_l3_agent(self, host, agent_mode='legacy', plugin=None):
if not plugin:
plugin = self.plugin
agent = {
'binary': 'neutron-l3-agent',
'host': host,
'topic': topics.L3_AGENT,
'configurations': {'agent_mode': agent_mode},
'agent_type': constants.AGENT_TYPE_L3,
'start_flag': True
}
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': agent},
time=timeutils.strtime())
agent_db = plugin.get_agents_db(self.adminContext,
filters={'host': [agent['host']]})
return agent_db[0]
def _register_l3_agents(self, plugin=None):
self.agent1 = self._register_l3_agent('host_1', plugin=plugin)
self.agent_id1 = self.agent1.id
self.agent2 = self._register_l3_agent('host_2', plugin=plugin)
self.agent_id2 = self.agent2.id
def _register_l3_dvr_agents(self):
callback = agents_db.AgentExtRpcCallback()
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR]})
self.l3_dvr_agent = agent_db[0]
self.l3_dvr_agent_id = agent_db[0].id
callback.report_state(self.adminContext,
agent_state={'agent_state': DVR_SNAT_L3_AGENT},
time=timeutils.strtime())
agent_db = self.plugin.get_agents_db(self.adminContext,
filters={'host': [HOST_DVR_SNAT]})
self.l3_dvr_snat_id = agent_db[0].id
self.l3_dvr_snat_agent = agent_db[0]
def _set_l3_agent_admin_state(self, context, agent_id, state=True):
update = {'agent': {'admin_state_up': state}}
self.plugin.update_agent(context, agent_id, update)
def _set_l3_agent_dead(self, agent_id):
update = {
'agent': {
'heartbeat_timestamp':
timeutils.utcnow() - datetime.timedelta(hours=1)}}
self.plugin.update_agent(self.adminContext, agent_id, update)
@contextlib.contextmanager
def router_with_ext_gw(self, name='router1', admin_state_up=True,
fmt=None, tenant_id=str(uuid.uuid4()),
external_gateway_info=None,
subnet=None, set_context=False,
**kwargs):
router = self._make_router(fmt or self.fmt, tenant_id, name,
admin_state_up, external_gateway_info,
set_context, **kwargs)
self._add_external_gateway_to_router(
router['router']['id'],
subnet['subnet']['network_id'])
yield router
self._remove_external_gateway_from_router(
router['router']['id'], subnet['subnet']['network_id'])
self._delete('routers', router['router']['id'])
class L3SchedulerTestBaseMixin(object):
def _test_add_router_to_l3_agent(self,
distributed=False,
already_scheduled=False,
external_gw=None):
agent_id = self.agent_id1
agent = self.agent1
if distributed:
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
if already_scheduled:
self._test_schedule_bind_router(agent, router)
with contextlib.nested(
mock.patch.object(self, "validate_agent_router_combination"),
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])
) as (valid, auto_s, gr):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
self.assertNotEqual(already_scheduled, auto_s.called)
def test__unbind_router_removes_binding(self):
agent_id = self.agent_id1
agent = self.agent1
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
self._test_schedule_bind_router(agent, router)
self._unbind_router(self.adminContext,
router['router']['id'],
agent_id)
bindings = self._get_l3_bindings_hosting_routers(
self.adminContext, [router['router']['id']])
self.assertEqual(0, len(bindings))
def _create_router_for_l3_agent_dvr_test(self,
distributed=False,
external_gw=None):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
router['router']['distributed'] = distributed
router['router']['external_gateway_info'] = external_gw
return router
def _prepare_l3_agent_dvr_move_exceptions(self,
distributed=False,
external_gw=None,
agent_id=None,
expected_exception=None):
router = self._create_router_for_l3_agent_dvr_test(
distributed=distributed, external_gw=external_gw)
with contextlib.nested(
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])):
self.assertRaises(expected_exception,
self.add_router_to_l3_agent,
self.adminContext, agent_id,
router['router']['id'])
def test_add_router_to_l3_agent_mismatch_error_dvr_to_legacy(self):
self._register_l3_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.agent_id1,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_legacy_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.RouterL3AgentMismatch)
def test_add_router_to_l3_agent_mismatch_error_dvr_to_dvr(self):
self._register_l3_dvr_agents()
self._prepare_l3_agent_dvr_move_exceptions(
distributed=True,
agent_id=self.l3_dvr_agent_id,
expected_exception=l3agent.DVRL3CannotAssignToDvrAgent)
def test_add_router_to_l3_agent_dvr_to_snat(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._register_l3_dvr_agents()
agent_id = self.l3_dvr_snat_id
agent = self.l3_dvr_snat_agent
router = self._create_router_for_l3_agent_dvr_test(
distributed=True,
external_gw=external_gw_info)
with contextlib.nested(
mock.patch.object(self, "validate_agent_router_combination"),
mock.patch.object(self, "create_router_to_agent_binding"),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=router['router'])
) as (valid_agent_rtr, rtr_agent_binding, get_rtr):
self.add_router_to_l3_agent(self.adminContext, agent_id,
router['router']['id'])
rtr_agent_binding.assert_called_once_with(
self.adminContext, agent, router['router'])
def test_add_router_to_l3_agent(self):
self._test_add_router_to_l3_agent()
def test_add_distributed_router_to_l3_agent(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
external_gw=external_gw_info)
def test_add_router_to_l3_agent_already_scheduled(self):
self._test_add_router_to_l3_agent(already_scheduled=True)
def test_add_distributed_router_to_l3_agent_already_scheduled(self):
external_gw_info = {
"network_id": str(uuid.uuid4()),
"enable_snat": True
}
self._test_add_router_to_l3_agent(distributed=True,
already_scheduled=True,
external_gw=external_gw_info)
def _prepare_schedule_dvr_tests(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
plugin = mock.Mock()
plugin.get_l3_agents_hosting_routers.return_value = []
plugin.get_l3_agents.return_value = [agent]
plugin.get_l3_agent_candidates.return_value = [agent]
return scheduler, agent, plugin
def test_schedule_dvr_router_without_snatbinding_and_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True
}
plugin.get_router.return_value = sync_router
with contextlib.nested(
mock.patch.object(scheduler, 'bind_router'),
mock.patch.object(
plugin, 'get_snat_bindings', return_value=False)
):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.get_l3_agents_hosting_routers(
mock.ANY, ['foo_router_id'], admin_state_up=True),
mock.call.get_l3_agents(mock.ANY, active=True),
mock.call.get_l3_agent_candidates(mock.ANY, sync_router, [agent]),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_dvr_router_with_snatbinding_no_gw(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {'id': 'foo_router_id',
'distributed': True}
plugin.get_router.return_value = sync_router
with mock.patch.object(plugin, 'get_snat_bindings', return_value=True):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.unbind_snat_servicenode(mock.ANY, 'foo_router_id'),
]
plugin.assert_has_calls(expected_calls)
def test_schedule_router_distributed(self):
scheduler, agent, plugin = self._prepare_schedule_dvr_tests()
sync_router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
plugin.get_router.return_value = sync_router
with mock.patch.object(
plugin, 'get_snat_bindings', return_value=False):
scheduler._schedule_router(
plugin, self.adminContext, 'foo_router_id', None)
expected_calls = [
mock.call.get_router(mock.ANY, 'foo_router_id'),
mock.call.schedule_snat_router(
mock.ANY, 'foo_router_id', sync_router),
]
plugin.assert_has_calls(expected_calls)
def _test_schedule_bind_router(self, agent, router):
ctx = self.adminContext
session = ctx.session
db = l3_agentschedulers_db.RouterL3AgentBinding
scheduler = l3_agent_scheduler.ChanceScheduler()
rid = router['router']['id']
scheduler.bind_router(ctx, rid, agent)
results = (session.query(db).filter_by(router_id=rid).all())
self.assertTrue(len(results) > 0)
self.assertIn(agent.id, [bind.l3_agent_id for bind in results])
def test_bind_new_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('is scheduled', args[0])
def test_bind_absent_router(self):
scheduler = l3_agent_scheduler.ChanceScheduler()
# checking that bind_router() is not throwing
# when supplied with router_id of non-existing router
scheduler.bind_router(self.adminContext, "dummyID", self.agent1)
def test_bind_existing_router(self):
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
self._test_schedule_bind_router(self.agent1, router)
with mock.patch.object(l3_agent_scheduler.LOG, 'debug') as flog:
self._test_schedule_bind_router(self.agent1, router)
self.assertEqual(1, flog.call_count)
args, kwargs = flog.call_args
self.assertIn('has already been scheduled', args[0])
def _check_get_l3_agent_candidates(
self, router, agent_list, exp_host, count=1):
candidates = self.get_l3_agent_candidates(self.adminContext,
router, agent_list)
self.assertEqual(len(candidates), count)
if count:
self.assertEqual(candidates[0]['host'], exp_host)
def test_get_l3_agent_candidates_legacy(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test legacy agent_mode case: only legacy agent should be candidate
router['distributed'] = False
exp_host = 'host_1'
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
# test dvr agent_mode case only dvr agent should be candidate
router['distributed'] = True
exp_host = DVR_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
agent_list = [self.agent1, self.l3_dvr_agent]
exp_host = DVR_L3_AGENT.get('host')
router['distributed'] = True
# Test no VMs present case
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_dvr_snat(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=True)
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def test_get_l3_agent_candidates_dvr_snat_no_vms(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
router['distributed'] = True
agent_list = [self.l3_dvr_snat_agent]
exp_host = DVR_SNAT_L3_AGENT.get('host')
self.check_ports_exist_on_l3agent = mock.Mock(return_value=False)
# Test no VMs present case
self.check_ports_exist_on_l3agent.return_value = False
self._check_get_l3_agent_candidates(
router, agent_list, exp_host, count=0)
def test_get_l3_agent_candidates_centralized(self):
self._register_l3_dvr_agents()
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
# check centralized test case
router['distributed'] = False
exp_host = DVR_SNAT_L3_AGENT.get('host')
agent_list = [self.l3_dvr_snat_agent]
self._check_get_l3_agent_candidates(router, agent_list, exp_host)
def _prepare_check_ports_exist_tests(self):
l3_agent = agents_db.Agent()
l3_agent.admin_state_up = True
l3_agent.host = 'host_1'
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r2')
router['external_gateway_info'] = None
router['id'] = str(uuid.uuid4())
self.plugin.get_ports = mock.Mock(return_value=[])
self.get_subnet_ids_on_router = mock.Mock(return_value=[])
return l3_agent, router
def test_check_ports_exist_on_l3agent_no_subnets(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no subnets
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_if_no_subnets_then_return(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
with mock.patch.object(manager.NeutronManager,
'get_plugin') as getp:
getp.return_value = self.plugin
# no subnets and operation is remove_router_interface,
# so return immediately without calling get_ports
self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(self.plugin.get_ports.called)
def test_check_ports_exist_on_l3agent_no_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# no matching subnet
self.plugin.get_subnet_ids_on_router = mock.Mock(
return_value=[str(uuid.uuid4())])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertFalse(val)
def test_check_ports_exist_on_l3agent_subnet_match(self):
l3_agent, router = self._prepare_check_ports_exist_tests()
# matching subnet
port = {'subnet_id': str(uuid.uuid4()),
'binding:host_id': 'host_1',
'device_owner': 'compute:',
'id': 1234}
self.plugin.get_ports.return_value = [port]
self.get_subnet_ids_on_router = mock.Mock(
return_value=[port['subnet_id']])
val = self.check_ports_exist_on_l3agent(self.adminContext,
l3_agent, router['id'])
self.assertTrue(val)
def test_get_l3_agents_hosting_routers(self):
agent = self._register_l3_agent('host_6')
router = self._make_router(self.fmt,
tenant_id=str(uuid.uuid4()),
name='r1')
ctx = self.adminContext
router_id = router['router']['id']
self.plugin.router_scheduler.bind_router(ctx, router_id, agent)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([agent.id], [agt.id for agt in agents])
self._set_l3_agent_admin_state(ctx, agent.id, False)
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id])
self.assertEqual([agent.id], [agt.id for agt in agents])
agents = self.get_l3_agents_hosting_routers(ctx,
[router_id],
admin_state_up=True)
self.assertEqual([], agents)
class L3SchedulerTestCase(l3_agentschedulers_db.L3AgentSchedulerDbMixin,
l3_db.L3_NAT_db_mixin,
common_db_mixin.CommonDbMixin,
test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_l3.L3NatTestCaseMixin,
L3SchedulerBaseMixin,
L3SchedulerTestBaseMixin):
def setUp(self):
self.mock_rescheduling = False
ext_mgr = test_l3.L3TestExtensionManager()
plugin_str = ('neutron.tests.unit.extensions.test_l3.'
'TestL3NatIntAgentSchedulingPlugin')
super(L3SchedulerTestCase, self).setUp(plugin=plugin_str,
ext_mgr=ext_mgr)
self.adminContext = q_context.get_admin_context()
self.plugin = manager.NeutronManager.get_plugin()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
class L3AgentChanceSchedulerTestCase(L3SchedulerTestCase):
def test_random_scheduling(self):
random_patch = mock.patch('random.choice')
random_mock = random_patch.start()
def side_effect(seq):
return seq[0]
random_mock.side_effect = side_effect
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 1)
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
self.assertEqual(random_mock.call_count, 2)
random_patch.stop()
def test_scheduler_auto_schedule_when_agent_added(self):
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id1, True)
self.plugin.auto_schedule_routers(self.adminContext,
'host_1',
[r1['router']['id']])
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual('host_1', agents[0]['host'])
class L3AgentLeastRoutersSchedulerTestCase(L3SchedulerTestCase):
def setUp(self):
super(L3AgentLeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
# disable one agent to force the scheduling to the only one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
with self.subnet() as subnet:
self._set_net_external(subnet['subnet']['network_id'])
with self.router_with_ext_gw(name='r1', subnet=subnet) as r1:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r1['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id1 = agents[0]['id']
with self.router_with_ext_gw(name='r2', subnet=subnet) as r2:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r2['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id2 = agents[0]['id']
self.assertEqual(agent_id1, agent_id2)
# re-enable the second agent to see whether the next router
# spawned will be on this one.
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
with self.router_with_ext_gw(name='r3',
subnet=subnet) as r3:
agents = self.get_l3_agents_hosting_routers(
self.adminContext, [r3['router']['id']],
admin_state_up=True)
self.assertEqual(len(agents), 1)
agent_id3 = agents[0]['id']
self.assertNotEqual(agent_id1, agent_id3)
class L3DvrScheduler(l3_db.L3_NAT_db_mixin,
l3_dvrscheduler_db.L3_DVRsch_db_mixin):
pass
class L3DvrSchedulerTestCase(testlib_api.SqlTestCase):
def setUp(self):
plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
self.setup_coreplugin(plugin)
super(L3DvrSchedulerTestCase, self).setUp()
self.adminContext = q_context.get_admin_context()
self.dut = L3DvrScheduler()
def test__notify_port_delete(self):
plugin = manager.NeutronManager.get_plugin()
l3plugin = mock.Mock()
l3plugin.supported_extension_aliases = [
'router', constants.L3_AGENT_SCHEDULER_EXT_ALIAS,
constants.L3_DISTRIBUTED_EXT_ALIAS
]
with mock.patch.object(manager.NeutronManager,
'get_service_plugins',
return_value={'L3_ROUTER_NAT': l3plugin}):
kwargs = {
'context': self.adminContext,
'port': mock.ANY,
'removed_routers': [
{'agent_id': 'foo_agent', 'router_id': 'foo_id'},
],
}
l3_dvrscheduler_db._notify_port_delete(
'port', 'after_delete', plugin, **kwargs)
l3plugin.dvr_vmarp_table_update.assert_called_once_with(
self.adminContext, mock.ANY, 'del')
l3plugin.remove_router_from_l3_agent.assert_called_once_with(
self.adminContext, 'foo_agent', 'foo_id')
def test_dvr_update_router_addvm(self):
port = {
'device_id': 'abcd',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.3'
}
]
}
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
self.dut.dvr_update_router_addvm(self.adminContext, port)
def test_get_dvr_routers_by_portid(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_port', return_value=dvr_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
router_id = self.dut.get_dvr_routers_by_portid(self.adminContext,
dvr_port['id'])
self.assertEqual(router_id.pop(), r1['id'])
def test_get_subnet_ids_on_router(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'device_owner': 'network:router_interface_distributed',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port])):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
self.assertEqual(sub_ids.pop(),
dvr_port.get('fixed_ips').pop(0).get('subnet_id'))
def test_check_ports_active_on_host_and_subnet(self):
dvr_port = {
'id': 'dvr_port1',
'device_id': 'r1',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': 'compute:nova',
'fixed_ips': [
{
'subnet_id': '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
'ip_address': '10.10.10.1'
}
]
}
r1 = {
'id': 'r1',
'distributed': True,
}
with contextlib.nested(
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2'
'.get_ports', return_value=[dvr_port]),
mock.patch('neutron.manager.NeutronManager.get_service_plugins',
return_value=mock.Mock()),
mock.patch('neutron.db.l3_db.L3_NAT_db_mixin.get_router',
return_value=r1),
mock.patch('neutron.api.rpc.agentnotifiers.l3_rpc_agent_api'
'.L3AgentNotifyAPI')):
sub_ids = self.dut.get_subnet_ids_on_router(self.adminContext,
r1['id'])
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost', 'dvr_port1',
sub_ids)
self.assertFalse(result)
def _test_dvr_serviced_port_exists_on_subnet(self, port):
with mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', return_value=[port]):
result = self.dut.check_ports_active_on_host_and_subnet(
self.adminContext,
'thisHost',
'dvr1-intf-id',
'my-subnet-id')
self.assertTrue(result)
def test_dvr_serviced_vip_port_exists_on_subnet(self):
vip_port = {
'id': 'lbaas-vip-port1',
'device_id': 'vip-pool-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_LOADBALANCER,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.1'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=vip_port)
def _create_port(self, port_name, tenant_id, host, subnet_id, ip_address,
status='ACTIVE',
device_owner='compute:nova'):
return {
'id': port_name + '-port-id',
'tenant_id': tenant_id,
'device_id': port_name,
'device_owner': device_owner,
'status': status,
'binding:host_id': host,
'fixed_ips': [
{
'subnet_id': subnet_id,
'ip_address': ip_address
}
]
}
def test_dvr_deletens_if_no_port_no_routers(self):
# Delete a vm port, the port subnet has no router interface.
vm_tenant_id = 'tenant-1'
my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False)
vm_port_host = 'compute-node-1'
vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
'shared-subnet', '10.10.10.3',
status='INACTIVE')
vm_port_id = vm_port['id']
fakePortDB = FakePortDB([vm_port])
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.'
'get_port_binding_host', return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', return_value=vm_port)) as (
_, mock_get_port_binding_host, _, _):
routers = self.dut.dvr_deletens_if_no_port(my_context, vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, vm_port_id)
def test_dvr_deletens_if_no_ports_no_removeable_routers(self):
# A VM port is deleted, but the router can't be unscheduled from the
# compute node because there is another VM port present.
vm_tenant_id = 'tenant-1'
my_context = q_context.Context('user-1', vm_tenant_id, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
dvr_port = self._create_port(
'dvr-router', 'admin-tenant', vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant_id, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', 'tenant-2', vm_port_host,
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, deleted_vm_port, dvr_port])
vm_port_binding = {
'port_id': deleted_vm_port_id,
'host': vm_port_host
}
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host',
return_value=vm_port_binding)) as (_,
mock_get_port_binding_host, _,
mock_get_ports,
mock_get_dvr_port_binding_by_host):
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
self.assertEqual([], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
self.assertFalse(mock_get_dvr_port_binding_by_host.called)
def _test_dvr_deletens_if_no_ports_delete_routers(self,
vm_tenant,
router_tenant):
class FakeAgent(object):
def __init__(self, id, host, agent_type):
self.id = id
self.host = host
self.agent_type = agent_type
my_context = q_context.Context('user-1', vm_tenant, is_admin=False)
shared_subnet_id = '80947d4a-fbc8-484b-9f92-623a6bfcf3e0',
vm_port_host = 'compute-node-1'
router_id = 'dvr-router'
dvr_port = self._create_port(
router_id, router_tenant, vm_port_host,
shared_subnet_id, '10.10.10.1',
device_owner=constants.DEVICE_OWNER_DVR_INTERFACE)
dvr_port_id = dvr_port['id']
deleted_vm_port = self._create_port(
'deleted-vm', vm_tenant, vm_port_host,
shared_subnet_id, '10.10.10.3',
status='INACTIVE')
deleted_vm_port_id = deleted_vm_port['id']
running_vm_port = self._create_port(
'running-vn', vm_tenant, 'compute-node-2',
shared_subnet_id, '10.10.10.33')
fakePortDB = FakePortDB([running_vm_port, dvr_port, deleted_vm_port])
dvr_port_binding = {
'port_id': dvr_port_id, 'host': vm_port_host
}
agent_id = 'l3-agent-on-compute-node-1'
l3_agent_on_vm_host = FakeAgent(agent_id,
vm_port_host,
constants.AGENT_TYPE_L3)
with contextlib.nested(
mock.patch.object(my_context, 'elevated',
return_value=self.adminContext),
mock.patch('neutron.plugins.ml2.db.get_port_binding_host',
return_value=vm_port_host),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_port', side_effect=fakePortDB.get_port),
mock.patch('neutron.db.db_base_plugin_v2.NeutronDbPluginV2.'
'get_ports', side_effect=fakePortDB.get_ports),
mock.patch('neutron.plugins.ml2.db.get_dvr_port_binding_by_host',
return_value=dvr_port_binding),
mock.patch('neutron.db.agents_db.AgentDbMixin.'
'_get_agent_by_type_and_host',
return_value=l3_agent_on_vm_host)) as (_,
mock_get_port_binding_host, _,
mock_get_ports,
mock_get_dvr_port_binding_by_host,
mock__get_agent_by_type_and_host):
routers = self.dut.dvr_deletens_if_no_port(
my_context, deleted_vm_port_id)
expected_router = {
'router_id': router_id,
'host': vm_port_host,
'agent_id': agent_id
}
self.assertEqual([expected_router], routers)
mock_get_port_binding_host.assert_called_once_with(
self.adminContext.session, deleted_vm_port_id)
self.assertTrue(mock_get_ports.called)
mock_get_dvr_port_binding_by_host.assert_called_once_with(
my_context.session, dvr_port_id, vm_port_host)
def test_dvr_deletens_if_no_ports_delete_admin_routers(self):
# test to see whether the last VM using a router created
# by the admin will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'admin-tenant')
def test_dvr_deletens_if_no_ports_delete_tenant_routers(self):
# test to see whether the last VM using a tenant's private
# router will be unscheduled on the compute node
self._test_dvr_deletens_if_no_ports_delete_routers(
'tenant-1', 'tenant-1')
def test_dvr_serviced_dhcp_port_exists_on_subnet(self):
dhcp_port = {
'id': 'dhcp-port1',
'device_id': 'dhcp-net-id',
'status': 'ACTIVE',
'binding:host_id': 'thisHost',
'device_owner': constants.DEVICE_OWNER_DHCP,
'fixed_ips': [
{
'subnet_id': 'my-subnet-id',
'ip_address': '10.10.10.2'
}
]
}
self._test_dvr_serviced_port_exists_on_subnet(port=dhcp_port)
def _prepare_schedule_snat_tests(self):
agent = agents_db.Agent()
agent.admin_state_up = True
agent.heartbeat_timestamp = timeutils.utcnow()
router = {
'id': 'foo_router_id',
'distributed': True,
'external_gateway_info': {
'network_id': str(uuid.uuid4()),
'enable_snat': True
}
}
return agent, router
def test_schedule_snat_router_duplicate_entry(self):
self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'bind_snat_servicenode',
side_effect=db_exc.DBDuplicateEntry()),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode')
) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr):
self.dut.schedule_snat_router(self.adminContext, 'foo', 'bar')
self.assertTrue(mock_bind_snat.called)
self.assertFalse(mock_bind_dvr.called)
def test_schedule_snat_router_return_value(self):
agent, router = self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'bind_snat_servicenode'),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode')
) as (mock_gl3, mock_snat_canidates, mock_bind_snat, mock_bind_dvr):
mock_snat_canidates.return_value = [agent]
mock_bind_snat.return_value = [agent]
mock_bind_dvr.return_value = [agent]
chosen_agent = self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertEqual(chosen_agent, [agent])
def test_schedule_router_unbind_snat_servicenode_negativetest(self):
router = {
'id': 'foo_router_id',
'distributed': True
}
with contextlib.nested(
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'get_snat_bindings'),
mock.patch.object(self.dut, 'unbind_snat_servicenode')
) as (mock_rd, mock_snat_bind, mock_unbind):
mock_rd.return_value = router
mock_snat_bind.return_value = False
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', router)
self.assertFalse(mock_unbind.called)
def test_schedule_snat_router_with_snat_candidates(self):
agent, router = self._prepare_schedule_snat_tests()
with contextlib.nested(
mock.patch.object(query.Query, 'first'),
mock.patch.object(self.dut, 'get_l3_agents'),
mock.patch.object(self.dut, 'get_snat_candidates'),
mock.patch.object(self.dut, 'get_router'),
mock.patch.object(self.dut, 'bind_dvr_router_servicenode'),
mock.patch.object(self.dut, 'bind_snat_servicenode')) as (
mock_query, mock_agents,
mock_candidates, mock_rd, mock_dvr, mock_bind):
mock_rd.return_value = router
mock_query.return_value = []
mock_agents.return_value = [agent]
mock_candidates.return_value = [agent]
self.dut.schedule_snat_router(
self.adminContext, 'foo_router_id', mock.ANY)
mock_bind.assert_called_once_with(
self.adminContext, 'foo_router_id', [agent])
def test_unbind_snat_servicenode(self):
router_id = 'foo_router_id'
core_plugin = mock.PropertyMock()
type(self.dut)._core_plugin = core_plugin
(self.dut._core_plugin.get_ports_on_host_by_subnet.
return_value) = []
core_plugin.reset_mock()
l3_notifier = mock.PropertyMock()
type(self.dut).l3_rpc_notifier = l3_notifier
binding = l3_dvrscheduler_db.CentralizedSnatL3AgentBinding(
router_id=router_id, l3_agent_id='foo_l3_agent_id',
l3_agent=agents_db.Agent())
with contextlib.nested(
mock.patch.object(query.Query, 'one'),
mock.patch.object(self.adminContext.session, 'delete'),
mock.patch.object(query.Query, 'delete'),
mock.patch.object(self.dut, 'get_subnet_ids_on_router')) as (
mock_query, mock_session, mock_delete, mock_get_subnets):
mock_query.return_value = binding
mock_get_subnets.return_value = ['foo_subnet_id']
self.dut.unbind_snat_servicenode(self.adminContext, router_id)
mock_get_subnets.assert_called_with(self.adminContext, router_id)
self.assertTrue(mock_session.call_count)
self.assertTrue(mock_delete.call_count)
core_plugin.assert_called_once_with()
l3_notifier.assert_called_once_with()
class L3HAPlugin(db_v2.NeutronDbPluginV2,
l3_hamode_db.L3_HA_NAT_db_mixin,
l3_hascheduler_db.L3_HA_scheduler_db_mixin):
supported_extension_aliases = ["l3-ha"]
class L3HATestCaseMixin(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
def setUp(self):
super(L3HATestCaseMixin, self).setUp()
self.adminContext = q_context.get_admin_context()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
mock.patch.object(l3_hamode_db.L3_HA_NAT_db_mixin,
'_notify_ha_interfaces_updated').start()
cfg.CONF.set_override('max_l3_agents_per_router', 0)
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.ChanceScheduler'
)
self._register_l3_agents()
def _create_ha_router(self, ha=True, tenant_id='tenant1'):
self.adminContext.tenant_id = tenant_id
router = {'name': 'router1', 'admin_state_up': True}
if ha is not None:
router['ha'] = ha
return self.plugin.create_router(self.adminContext,
{'router': router})
class L3_HA_scheduler_db_mixinTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3_HA_scheduler_db_mixinTestCase,
self)._register_l3_agents(plugin=plugin)
self.agent3 = self._register_l3_agent('host_3', plugin=plugin)
self.agent_id3 = self.agent3.id
self.agent4 = self._register_l3_agent('host_4', plugin=plugin)
self.agent_id4 = self.agent4.id
def test_get_ha_routers_l3_agents_count(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router1['id'])
self.plugin.schedule_router(self.adminContext, router2['id'])
self.plugin.schedule_router(self.adminContext, router3['id'])
result = self.plugin.get_ha_routers_l3_agents_count(
self.adminContext).all()
self.assertEqual(2, len(result))
self.assertIn((router1['id'], router1['tenant_id'], 4), result)
self.assertIn((router2['id'], router2['tenant_id'], 4), result)
self.assertNotIn((router3['id'], router3['tenant_id'], mock.ANY),
result)
def test_get_ordered_l3_agents_by_num_routers(self):
router1 = self._create_ha_router()
router2 = self._create_ha_router()
router3 = self._create_ha_router(ha=False)
router4 = self._create_ha_router(ha=False)
# Agent 1 will host 0 routers, agent 2 will host 1, agent 3 will
# host 2, and agent 4 will host 3.
self.plugin.schedule_router(self.adminContext, router1['id'],
candidates=[self.agent2, self.agent4])
self.plugin.schedule_router(self.adminContext, router2['id'],
candidates=[self.agent3, self.agent4])
self.plugin.schedule_router(self.adminContext, router3['id'],
candidates=[self.agent3])
self.plugin.schedule_router(self.adminContext, router4['id'],
candidates=[self.agent4])
agent_ids = [self.agent_id1, self.agent_id2, self.agent_id3,
self.agent_id4]
result = self.plugin.get_l3_agents_ordered_by_num_routers(
self.adminContext, agent_ids)
self.assertEqual(agent_ids, [record['id'] for record in result])
class L3AgentSchedulerDbMixinTestCase(L3HATestCaseMixin):
def test_reschedule_ha_routers_from_down_agents(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_dead(self.agent_id1)
with mock.patch.object(self.plugin, 'reschedule_router') as reschedule:
self.plugin.reschedule_routers_from_down_agents()
self.assertFalse(reschedule.called)
def test_list_l3_agents_hosting_ha_router(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertEqual('standby', agent['ha_state'])
self.plugin.update_routers_states(
self.adminContext, {router['id']: 'active'}, self.agent1.host)
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
expected_state = ('active' if agent['host'] == self.agent1.host
else 'standby')
self.assertEqual(expected_state, agent['ha_state'])
def test_list_l3_agents_hosting_legacy_router(self):
router = self._create_ha_router(ha=False)
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.list_l3_agents_hosting_router(
self.adminContext, router['id'])['agents']
for agent in agents:
self.assertIsNone(agent['ha_state'])
def test_get_agents_dict_for_router_unscheduled_returns_empty_list(self):
self.assertEqual({'agents': []},
self.plugin._get_agents_dict_for_router([]))
class L3HAChanceSchedulerTestCase(L3HATestCaseMixin):
def test_scheduler_with_ha_enabled(self):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
for agent in agents:
sync_data = self.plugin.get_ha_sync_data_for_host(
self.adminContext, router_ids=[router['id']],
host=agent.host)
self.assertEqual(1, len(sync_data))
interface = sync_data[0][constants.HA_INTERFACE_KEY]
self.assertIsNotNone(interface)
def test_auto_schedule(self):
router = self._create_ha_router()
self.plugin.auto_schedule_routers(
self.adminContext, self.agent1.host, None)
self.plugin.auto_schedule_routers(
self.adminContext, self.agent2.host, None)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']])
self.assertEqual(2, len(agents))
def test_auto_schedule_specific_router_when_agent_added(self):
self._auto_schedule_when_agent_added(True)
def test_auto_schedule_all_routers_when_agent_added(self):
self._auto_schedule_when_agent_added(False)
def _auto_schedule_when_agent_added(self, specific_router):
router = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, router['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
agent = self._register_l3_agent('host_3')
self.agent_id3 = agent.id
routers_to_auto_schedule = [router['id']] if specific_router else []
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [router['id']],
admin_state_up=True)
self.assertEqual(3, len(agents))
# Simulate agent restart to make sure we don't try to re-bind
self.plugin.auto_schedule_routers(self.adminContext,
'host_3',
routers_to_auto_schedule)
def test_scheduler_with_ha_enabled_not_enough_agent(self):
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, False)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(0, len(agents))
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id2, True)
class L3HALeastRoutersSchedulerTestCase(L3HATestCaseMixin):
def _register_l3_agents(self, plugin=None):
super(L3HALeastRoutersSchedulerTestCase,
self)._register_l3_agents(plugin=plugin)
agent = self._register_l3_agent('host_3', plugin=plugin)
self.agent_id3 = agent.id
agent = self._register_l3_agent('host_4', plugin=plugin)
self.agent_id4 = agent.id
def setUp(self):
super(L3HALeastRoutersSchedulerTestCase, self).setUp()
self.plugin.router_scheduler = importutils.import_object(
'neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler'
)
def test_scheduler(self):
cfg.CONF.set_override('max_l3_agents_per_router', 2)
# disable the third agent to be sure that the router will
# be scheduled of the two firsts
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, False)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, False)
r1 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r1['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r1['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id1, agent_ids)
self.assertIn(self.agent_id2, agent_ids)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id3, True)
self._set_l3_agent_admin_state(self.adminContext,
self.agent_id4, True)
r2 = self._create_ha_router()
self.plugin.schedule_router(self.adminContext, r2['id'])
agents = self.plugin.get_l3_agents_hosting_routers(
self.adminContext, [r2['id']],
admin_state_up=True)
self.assertEqual(2, len(agents))
agent_ids = [agent['id'] for agent in agents]
self.assertIn(self.agent_id3, agent_ids)
self.assertIn(self.agent_id4, agent_ids)
class TestGetL3AgentsWithAgentModeFilter(testlib_api.SqlTestCase,
L3SchedulerBaseMixin):
"""Test cases to test get_l3_agents.
This class tests the L3AgentSchedulerDbMixin.get_l3_agents()
for the 'agent_mode' filter with various values.
5 l3 agents are registered in the order - legacy, dvr_snat, dvr, fake_mode
and legacy
"""
scenarios = [
('no filter',
dict(agent_modes=[],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'fake_mode', 'legacy'])),
('legacy',
dict(agent_modes=['legacy'],
expected_agent_modes=['legacy', 'legacy'])),
('dvr_snat',
dict(agent_modes=['dvr_snat'],
expected_agent_modes=['dvr_snat'])),
('dvr ',
dict(agent_modes=['dvr'],
expected_agent_modes=['dvr'])),
('legacy and dvr snat',
dict(agent_modes=['legacy', 'dvr_snat', 'legacy'],
expected_agent_modes=['legacy', 'dvr_snat', 'legacy'])),
('legacy and dvr',
dict(agent_modes=['legacy', 'dvr'],
expected_agent_modes=['legacy', 'dvr', 'legacy'])),
('dvr_snat and dvr',
dict(agent_modes=['dvr_snat', 'dvr'],
expected_agent_modes=['dvr_snat', 'dvr'])),
('legacy, dvr_snat and dvr',
dict(agent_modes=['legacy', 'dvr_snat', 'dvr'],
expected_agent_modes=['legacy', 'dvr_snat', 'dvr',
'legacy'])),
('invalid',
dict(agent_modes=['invalid'],
expected_agent_modes=[])),
]
def setUp(self):
super(TestGetL3AgentsWithAgentModeFilter, self).setUp()
self.plugin = L3HAPlugin()
self.setup_coreplugin('neutron.plugins.ml2.plugin.Ml2Plugin')
self.adminContext = q_context.get_admin_context()
hosts = ['host_1', 'host_2', 'host_3', 'host_4', 'host_5']
agent_modes = ['legacy', 'dvr_snat', 'dvr', 'fake_mode', 'legacy']
for host, agent_mode in zip(hosts, agent_modes):
self._register_l3_agent(host, agent_mode, self.plugin)
def _get_agent_mode(self, agent):
agent_conf = self.plugin.get_configuration_dict(agent)
return agent_conf.get('agent_mode', 'None')
def test_get_l3_agents(self):
l3_agents = self.plugin.get_l3_agents(
self.adminContext, filters={'agent_modes': self.agent_modes})
self.assertEqual(len(self.expected_agent_modes), len(l3_agents))
returned_agent_modes = [self._get_agent_mode(agent)
for agent in l3_agents]
self.assertEqual(self.expected_agent_modes, returned_agent_modes)
| {
"content_hash": "63bc09558bce77e32f2be6b9d36e9777",
"timestamp": "",
"source": "github",
"line_count": 1742,
"max_line_length": 79,
"avg_line_length": 42.8329506314581,
"alnum_prop": 0.554915231521812,
"repo_name": "Stavitsky/neutron",
"id": "c08991aafe29989ca4a10f9d43d4c29a9683ec70",
"size": "75255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/scheduler/test_l3_agent_scheduler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7134099"
},
{
"name": "Shell",
"bytes": "12319"
}
],
"symlink_target": ""
} |
"""Univariate features selection."""
# Authors: V. Michel, B. Thirion, G. Varoquaux, A. Gramfort, E. Duchesnay.
# L. Buitinck, A. Joly
# License: BSD 3 clause
import numpy as np
import warnings
from scipy import special, stats
from scipy.sparse import issparse
from ..base import BaseEstimator
from ..preprocessing import LabelBinarizer
from ..utils import (as_float_array, check_array, check_X_y, safe_sqr,
safe_mask)
from ..utils.extmath import norm, safe_sparse_dot, row_norms
from ..utils.validation import check_is_fitted
from .base import SelectorMixin
def _clean_nans(scores):
"""
Fixes Issue #1240: NaNs can't be properly compared, so change them to the
smallest value of scores's dtype. -inf seems to be unreliable.
"""
# XXX where should this function be called? fit? scoring functions
# themselves?
scores = as_float_array(scores, copy=True)
scores[np.isnan(scores)] = np.finfo(scores.dtype).min
return scores
######################################################################
# Scoring functions
# The following function is a rewriting of scipy.stats.f_oneway
# Contrary to the scipy.stats.f_oneway implementation it does not
# copy the data while keeping the inputs unchanged.
def f_oneway(*args):
"""Performs a 1-way ANOVA.
The one-way ANOVA tests the null hypothesis that 2 or more groups have
the same population mean. The test is applied to samples from two or
more groups, possibly with differing sizes.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
sample1, sample2, ... : array_like, sparse matrices
The sample measurements should be given as arguments.
Returns
-------
F-value : float
The computed F-value of the test.
p-value : float
The associated p-value from the F-distribution.
Notes
-----
The ANOVA test has important assumptions that must be satisfied in order
for the associated p-value to be valid.
1. The samples are independent
2. Each sample is from a normally distributed population
3. The population standard deviations of the groups are all equal. This
property is known as homoscedasticity.
If these assumptions are not true for a given set of data, it may still be
possible to use the Kruskal-Wallis H-test (`scipy.stats.kruskal`_) although
with some loss of power.
The algorithm is from Heiman[2], pp.394-7.
See ``scipy.stats.f_oneway`` that should give the same results while
being less efficient.
References
----------
.. [1] Lowry, Richard. "Concepts and Applications of Inferential
Statistics". Chapter 14.
http://faculty.vassar.edu/lowry/ch14pt1.html
.. [2] Heiman, G.W. Research Methods in Statistics. 2002.
"""
n_classes = len(args)
args = [as_float_array(a) for a in args]
n_samples_per_class = np.array([a.shape[0] for a in args])
n_samples = np.sum(n_samples_per_class)
ss_alldata = sum(safe_sqr(a).sum(axis=0) for a in args)
sums_args = [np.asarray(a.sum(axis=0)) for a in args]
square_of_sums_alldata = sum(sums_args) ** 2
square_of_sums_args = [s ** 2 for s in sums_args]
sstot = ss_alldata - square_of_sums_alldata / float(n_samples)
ssbn = 0.
for k, _ in enumerate(args):
ssbn += square_of_sums_args[k] / n_samples_per_class[k]
ssbn -= square_of_sums_alldata / float(n_samples)
sswn = sstot - ssbn
dfbn = n_classes - 1
dfwn = n_samples - n_classes
msb = ssbn / float(dfbn)
msw = sswn / float(dfwn)
constant_features_idx = np.where(msw == 0.)[0]
if (np.nonzero(msb)[0].size != msb.size and constant_features_idx.size):
warnings.warn("Features %s are constant." % constant_features_idx,
UserWarning)
f = msb / msw
# flatten matrix to vector in sparse case
f = np.asarray(f).ravel()
prob = special.fdtrc(dfbn, dfwn, f)
return f, prob
def f_classif(X, y):
"""Compute the ANOVA F-value for the provided sample.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = [n_samples, n_features]
The set of regressors that will be tested sequentially.
y : array of shape(n_samples)
The data matrix.
Returns
-------
F : array, shape = [n_features,]
The set of F values.
pval : array, shape = [n_features,]
The set of p-values.
See also
--------
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'])
args = [X[safe_mask(X, y == k)] for k in np.unique(y)]
return f_oneway(*args)
def _chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
k = len(f_obs)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq, special.chdtrc(k - 1, chisq)
def chi2(X, y):
"""Compute chi-squared stats between each non-negative feature and class.
This score can be used to select the n_features features with the
highest values for the test chi-squared statistic from X, which must
contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
Recall that the chi-square test measures dependence between stochastic
variables, so using this function "weeds out" the features that are the
most likely to be independent of class and therefore irrelevant for
classification.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features_in)
Sample vectors.
y : array-like, shape = (n_samples,)
Target vector (class labels).
Returns
-------
chi2 : array, shape = (n_features,)
chi2 statistics of each feature.
pval : array, shape = (n_features,)
p-values of each feature.
Notes
-----
Complexity of this algorithm is O(n_classes * n_features).
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
f_regression: F-value between label/feature for regression tasks.
"""
# XXX: we might want to do some of the following in logspace instead for
# numerical stability.
X = check_array(X, accept_sparse='csr')
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative.")
Y = LabelBinarizer().fit_transform(y)
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
observed = safe_sparse_dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _chisquare(observed, expected)
def f_regression(X, y, center=True):
"""Univariate linear regression tests.
Quick linear model for testing the effect of a single regressor,
sequentially for many regressors.
This is done in 3 steps:
1. The regressor of interest and the data are orthogonalized
wrt constant regressors.
2. The cross correlation between data and regressors is computed.
3. It is converted to an F score then to a p-value.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
X : {array-like, sparse matrix} shape = (n_samples, n_features)
The set of regressors that will be tested sequentially.
y : array of shape(n_samples).
The data matrix
center : True, bool,
If true, X and y will be centered.
Returns
-------
F : array, shape=(n_features,)
F values of features.
pval : array, shape=(n_features,)
p-values of F-scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
"""
if issparse(X) and center:
raise ValueError("center=True only allowed for dense data")
X, y = check_X_y(X, y, ['csr', 'csc', 'coo'], dtype=np.float64)
if center:
y = y - np.mean(y)
X = X.copy('F') # faster in fortran
X -= X.mean(axis=0)
# compute the correlation
corr = safe_sparse_dot(y, X)
corr /= row_norms(X.T)
corr /= norm(y)
# convert to p-value
degrees_of_freedom = y.size - (2 if center else 1)
F = corr ** 2 / (1 - corr ** 2) * degrees_of_freedom
pv = stats.f.sf(F, 1, degrees_of_freedom)
return F, pv
######################################################################
# Base classes
class _BaseFilter(BaseEstimator, SelectorMixin):
"""Initialize the univariate feature selection.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
"""
def __init__(self, score_func):
self.score_func = score_func
def fit(self, X, y):
"""Run score function on (X, y) and get the appropriate features.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
The training input samples.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, ['csr', 'csc'])
if not callable(self.score_func):
raise TypeError("The score function should be a callable, %s (%s) "
"was passed."
% (self.score_func, type(self.score_func)))
self._check_params(X, y)
self.scores_, self.pvalues_ = self.score_func(X, y)
self.scores_ = np.asarray(self.scores_)
self.pvalues_ = np.asarray(self.pvalues_)
return self
def _check_params(self, X, y):
pass
######################################################################
# Specific filters
######################################################################
class SelectPercentile(_BaseFilter):
"""Select features according to a percentile of the highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
percentile : int, optional, default=10
Percent of features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, percentile=10):
super(SelectPercentile, self).__init__(score_func)
self.percentile = percentile
def _check_params(self, X, y):
if not 0 <= self.percentile <= 100:
raise ValueError("percentile should be >=0, <=100; got %r"
% self.percentile)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
# Cater for NaNs
if self.percentile == 100:
return np.ones(len(self.scores_), dtype=np.bool)
elif self.percentile == 0:
return np.zeros(len(self.scores_), dtype=np.bool)
scores = _clean_nans(self.scores_)
treshold = stats.scoreatpercentile(scores,
100 - self.percentile)
mask = scores > treshold
ties = np.where(scores == treshold)[0]
if len(ties):
max_feats = len(scores) * self.percentile // 100
kept_ties = ties[:max_feats - mask.sum()]
mask[kept_ties] = True
return mask
class SelectKBest(_BaseFilter):
"""Select features according to the k highest scores.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
k : int or "all", optional, default=10
Number of top features to select.
The "all" option bypasses selection, for use in a parameter search.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
Notes
-----
Ties between features with equal scores will be broken in an unspecified
way.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, k=10):
super(SelectKBest, self).__init__(score_func)
self.k = k
def _check_params(self, X, y):
if not (self.k == "all" or 0 <= self.k <= X.shape[1]):
raise ValueError("k should be >=0, <= n_features; got %r."
"Use k='all' to return all features."
% self.k)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
if self.k == 'all':
return np.ones(self.scores_.shape, dtype=bool)
elif self.k == 0:
return np.zeros(self.scores_.shape, dtype=bool)
else:
scores = _clean_nans(self.scores_)
mask = np.zeros(scores.shape, dtype=bool)
# Request a stable sort. Mergesort takes more memory (~40MB per
# megafeature on x86-64).
mask[np.argsort(scores, kind="mergesort")[-self.k:]] = 1
return mask
class SelectFpr(_BaseFilter):
"""Filter: Select the pvalues below alpha based on a FPR test.
FPR test stands for False Positive Rate test. It controls the total
amount of false detections.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest p-value for features to be kept.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFpr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return self.pvalues_ < self.alpha
class SelectFdr(_BaseFilter):
"""Filter: Select the p-values for an estimated false discovery rate
This uses the Benjamini-Hochberg procedure. ``alpha`` is an upper bound
on the expected false discovery rate.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
References
----------
http://en.wikipedia.org/wiki/False_discovery_rate
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFwe: Select features based on family-wise error rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFdr, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
n_features = len(self.pvalues_)
sv = np.sort(self.pvalues_)
selected = sv[sv <= float(self.alpha) / n_features
* np.arange(n_features)]
if selected.size == 0:
return np.zeros_like(self.pvalues_, dtype=bool)
return self.pvalues_ <= selected.max()
class SelectFwe(_BaseFilter):
"""Filter: Select the p-values corresponding to Family-wise error rate
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
alpha : float, optional
The highest uncorrected p-value for features to keep.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
GenericUnivariateSelect: Univariate feature selector with configurable mode.
"""
def __init__(self, score_func=f_classif, alpha=5e-2):
super(SelectFwe, self).__init__(score_func)
self.alpha = alpha
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
return (self.pvalues_ < self.alpha / len(self.pvalues_))
######################################################################
# Generic filter
######################################################################
# TODO this class should fit on either p-values or scores,
# depending on the mode.
class GenericUnivariateSelect(_BaseFilter):
"""Univariate feature selector with configurable strategy.
Read more in the :ref:`User Guide <univariate_feature_selection>`.
Parameters
----------
score_func : callable
Function taking two arrays X and y, and returning a pair of arrays
(scores, pvalues).
mode : {'percentile', 'k_best', 'fpr', 'fdr', 'fwe'}
Feature selection mode.
param : float or int depending on the feature selection mode
Parameter of the corresponding mode.
Attributes
----------
scores_ : array-like, shape=(n_features,)
Scores of features.
pvalues_ : array-like, shape=(n_features,)
p-values of feature scores.
See also
--------
f_classif: ANOVA F-value between label/feature for classification tasks.
chi2: Chi-squared stats of non-negative features for classification tasks.
f_regression: F-value between label/feature for regression tasks.
SelectPercentile: Select features based on percentile of the highest scores.
SelectKBest: Select features based on the k highest scores.
SelectFpr: Select features based on a false positive rate test.
SelectFdr: Select features based on an estimated false discovery rate.
SelectFwe: Select features based on family-wise error rate.
"""
_selection_modes = {'percentile': SelectPercentile,
'k_best': SelectKBest,
'fpr': SelectFpr,
'fdr': SelectFdr,
'fwe': SelectFwe}
def __init__(self, score_func=f_classif, mode='percentile', param=1e-5):
super(GenericUnivariateSelect, self).__init__(score_func)
self.mode = mode
self.param = param
def _make_selector(self):
selector = self._selection_modes[self.mode](score_func=self.score_func)
# Now perform some acrobatics to set the right named parameter in
# the selector
possible_params = selector._get_param_names()
possible_params.remove('score_func')
selector.set_params(**{possible_params[0]: self.param})
return selector
def _check_params(self, X, y):
if self.mode not in self._selection_modes:
raise ValueError("The mode passed should be one of %s, %r,"
" (type %s) was passed."
% (self._selection_modes.keys(), self.mode,
type(self.mode)))
self._make_selector()._check_params(X, y)
def _get_support_mask(self):
check_is_fitted(self, 'scores_')
selector = self._make_selector()
selector.pvalues_ = self.pvalues_
selector.scores_ = self.scores_
return selector._get_support_mask()
| {
"content_hash": "cca72530447b685f77c205bbe6ac916f",
"timestamp": "",
"source": "github",
"line_count": 707,
"max_line_length": 80,
"avg_line_length": 33.408769448373405,
"alnum_prop": 0.624428450465707,
"repo_name": "nmayorov/scikit-learn",
"id": "9bd8ca273a8dc4a0a9b611938650cd8c4324e84c",
"size": "23620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/feature_selection/univariate_selection.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "C",
"bytes": "385829"
},
{
"name": "C++",
"bytes": "139482"
},
{
"name": "Makefile",
"bytes": "1388"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5344495"
},
{
"name": "Shell",
"bytes": "4031"
}
],
"symlink_target": ""
} |
from __future__ import division
import argparse
import functools
import multiprocessing
import numpy as np
import random
import six
import chainer
from chainer.dataset.convert import _concat_arrays
from chainer.dataset.convert import to_device
import chainer.links as L
from chainer.training import extensions
from chainercv.chainer_experimental.datasets.sliceable \
import TransformDataset
from chainercv.chainer_experimental.training.extensions import make_shift
from chainercv.datasets import coco_bbox_label_names
from chainercv.datasets import COCOBboxDataset
from chainercv.links.model.light_head_rcnn import LightHeadRCNNResNet101
from chainercv.links.model.light_head_rcnn import LightHeadRCNNTrainChain
from chainercv.links.model.ssd import GradientScaling
from chainercv import transforms
import chainermn
# https://docs.chainer.org/en/stable/tips.html#my-training-process-gets-stuck-when-using-multiprocessiterator
try:
import cv2
cv2.setNumThreads(0)
except ImportError:
pass
def concat_examples(batch, device=None, padding=None,
indices_concat=None, indices_to_device=None):
if len(batch) == 0:
raise ValueError('batch is empty')
first_elem = batch[0]
elem_size = len(first_elem)
if indices_concat is None:
indices_concat = range(elem_size)
if indices_to_device is None:
indices_to_device = range(elem_size)
result = []
if not isinstance(padding, tuple):
padding = [padding] * elem_size
for i in six.moves.range(elem_size):
res = [example[i] for example in batch]
if i in indices_concat:
res = _concat_arrays(res, padding[i])
if i in indices_to_device:
if i in indices_concat:
res = to_device(device, res)
else:
res = [to_device(device, r) for r in res]
result.append(res)
return tuple(result)
class Transform(object):
def __init__(self, light_head_rcnn):
self.light_head_rcnn = light_head_rcnn
def __call__(self, in_data):
img, bbox, label = in_data
_, H, W = img.shape
img = self.light_head_rcnn.prepare(img)
_, o_H, o_W = img.shape
scale = o_H / H
bbox = transforms.resize_bbox(bbox, (H, W), (o_H, o_W))
# horizontally flip
img, params = transforms.random_flip(
img, x_random=True, return_param=True)
bbox = transforms.flip_bbox(
bbox, (o_H, o_W), x_flip=params['x_flip'])
return img, bbox, label, scale
def main():
parser = argparse.ArgumentParser(
description='ChainerCV training example: LightHeadRCNN')
parser.add_argument('--out', '-o', default='result',
help='Output directory')
parser.add_argument('--seed', '-s', type=int, default=1234)
parser.add_argument('--batchsize', '-b', type=int, default=8)
parser.add_argument('--epoch', type=int, default=30)
parser.add_argument('--step-epoch', type=int, nargs='*', default=[19, 25])
args = parser.parse_args()
# https://docs.chainer.org/en/stable/chainermn/tutorial/tips_faqs.html#using-multiprocessiterator
if hasattr(multiprocessing, 'set_start_method'):
multiprocessing.set_start_method('forkserver')
p = multiprocessing.Process()
p.start()
p.join()
# chainermn
comm = chainermn.create_communicator('pure_nccl')
device = comm.intra_rank
np.random.seed(args.seed)
random.seed(args.seed)
# model
light_head_rcnn = LightHeadRCNNResNet101(
pretrained_model='imagenet',
n_fg_class=len(coco_bbox_label_names))
light_head_rcnn.use_preset('evaluate')
model = LightHeadRCNNTrainChain(light_head_rcnn)
chainer.cuda.get_device_from_id(device).use()
model.to_gpu()
# train dataset
train_dataset = COCOBboxDataset(
year='2017', split='train')
# filter non-annotated data
train_indices = np.array(
[i for i, label in enumerate(train_dataset.slice[:, ['label']])
if len(label[0]) > 0],
dtype=np.int32)
train_dataset = train_dataset.slice[train_indices]
train_dataset = TransformDataset(
train_dataset, ('img', 'bbox', 'label', 'scale'),
Transform(model.light_head_rcnn))
if comm.rank == 0:
indices = np.arange(len(train_dataset))
else:
indices = None
indices = chainermn.scatter_dataset(indices, comm, shuffle=True)
train_dataset = train_dataset.slice[indices]
train_iter = chainer.iterators.SerialIterator(
train_dataset, batch_size=args.batchsize // comm.size)
optimizer = chainermn.create_multi_node_optimizer(
chainer.optimizers.MomentumSGD(momentum=0.9), comm)
optimizer.setup(model)
global_context_module = model.light_head_rcnn.head.global_context_module
global_context_module.col_max.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.col_max.b.update_rule.add_hook(GradientScaling(3.0))
global_context_module.col.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.col.b.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row_max.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row_max.b.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row.W.update_rule.add_hook(GradientScaling(3.0))
global_context_module.row.b.update_rule.add_hook(GradientScaling(3.0))
optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0001))
model.light_head_rcnn.extractor.conv1.disable_update()
model.light_head_rcnn.extractor.res2.disable_update()
for link in model.links():
if isinstance(link, L.BatchNormalization):
link.disable_update()
converter = functools.partial(
concat_examples, padding=0,
# img, bboxes, labels, scales
indices_concat=[0, 2, 3], # img, _, labels, scales
indices_to_device=[0], # img
)
updater = chainer.training.updater.StandardUpdater(
train_iter, optimizer, converter=converter,
device=device)
trainer = chainer.training.Trainer(
updater, (args.epoch, 'epoch'), out=args.out)
@make_shift('lr')
def lr_scheduler(trainer):
base_lr = 0.0005 * 1.25 * args.batchsize
warm_up_duration = 500
warm_up_rate = 1 / 3
iteration = trainer.updater.iteration
epoch = trainer.updater.epoch
if iteration < warm_up_duration:
rate = warm_up_rate \
+ (1 - warm_up_rate) * iteration / warm_up_duration
else:
for step in args.step_epoch:
if epoch > step:
rate *= 0.1
return rate * base_lr
trainer.extend(lr_scheduler)
if comm.rank == 0:
# interval
log_interval = 100, 'iteration'
plot_interval = 3000, 'iteration'
print_interval = 20, 'iteration'
# training extensions
model_name = model.light_head_rcnn.__class__.__name__
trainer.extend(
chainer.training.extensions.snapshot_object(
model.light_head_rcnn,
filename='%s_model_iter_{.updater.iteration}.npz'
% model_name),
trigger=(1, 'epoch'))
trainer.extend(
extensions.observe_lr(),
trigger=log_interval)
trainer.extend(
extensions.LogReport(log_name='log.json', trigger=log_interval))
report_items = [
'iteration', 'epoch', 'elapsed_time', 'lr',
'main/loss',
'main/rpn_loc_loss',
'main/rpn_cls_loss',
'main/roi_loc_loss',
'main/roi_cls_loss',
'validation/main/map/iou=0.50:0.95/area=all/max_dets=100',
]
trainer.extend(
extensions.PrintReport(report_items), trigger=print_interval)
trainer.extend(
extensions.ProgressBar(update_interval=10))
if extensions.PlotReport.available():
trainer.extend(
extensions.PlotReport(
['main/loss'],
file_name='loss.png', trigger=plot_interval),
trigger=plot_interval)
trainer.extend(extensions.dump_graph('main/loss'))
trainer.run()
if __name__ == '__main__':
main()
| {
"content_hash": "c69414ce3157075f9bf7f10b918c4483",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 109,
"avg_line_length": 34.23673469387755,
"alnum_prop": 0.6329279923700525,
"repo_name": "chainer/chainercv",
"id": "f5b126c6e8072cf5f6af9613f1d78ddc18fbfdf1",
"size": "8388",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/light_head_rcnn/train_coco_multi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3132"
},
{
"name": "Python",
"bytes": "1288391"
},
{
"name": "Shell",
"bytes": "11424"
}
],
"symlink_target": ""
} |
"""website_1 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^polls/', include('polls.urls')),
url(r'', include('polls.urls')),
url(r'^admin/', admin.site.urls),
url(r'^todo/', include('todo.urls')),
]
| {
"content_hash": "4ef690eb0240bb525f1c075523be324c",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 79,
"avg_line_length": 37.375,
"alnum_prop": 0.6800445930880713,
"repo_name": "indoorConstructionMan/Django",
"id": "e763a009e80e8e87fdccc967fb584404a179e4e6",
"size": "897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website_1/website_1/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1296"
},
{
"name": "HTML",
"bytes": "2847"
},
{
"name": "Python",
"bytes": "16657"
}
],
"symlink_target": ""
} |
from doorman import permissions
def register(name_or_test_func=None):
if callable(name_or_test_func):
permissions.register(name_or_test_func.__name__, name_or_test_func)
return name_or_test_func
else:
def decorator(test_func):
permissions.register(name_or_test_func, test_func)
return test_func
return decorator
| {
"content_hash": "d69887e672af6cfac32f5b40d17c435f",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 75,
"avg_line_length": 31.5,
"alnum_prop": 0.6507936507936508,
"repo_name": "seanbrant/django-doorman",
"id": "a4f8db5ac4050617b043491d6861ef4b4687600d",
"size": "378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doorman/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "18118"
}
],
"symlink_target": ""
} |
import pyximport; pyximport.install()
import packet_counter
import sys
packet_counter.main(sys.argv[1])
| {
"content_hash": "f418e92fc86926bafdccb2cfd9b909c3",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 37,
"avg_line_length": 15.285714285714286,
"alnum_prop": 0.7850467289719626,
"repo_name": "High-Hill/bachelor_dap_gw",
"id": "a492e583b9fabedeb4f7f9df73b904a4aafbc8f8",
"size": "107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "program/realtime/packet_counter_pipe.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30753"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
"""Common code for converting proto to other formats, such as JSON."""
# pylint:disable=wildcard-import
from apitools.base.py.encoding_helper import *
import apitools.base.py.extra_types # pylint:disable=unused-import
# pylint:disable=undefined-all-variable
__all__ = [
'CopyProtoMessage',
'JsonToMessage',
'MessageToJson',
'DictToMessage',
'MessageToDict',
'PyValueToMessage',
'MessageToPyValue',
'MessageToRepr',
'GetCustomJsonFieldMapping',
'AddCustomJsonFieldMapping',
'GetCustomJsonEnumMapping',
'AddCustomJsonEnumMapping',
]
| {
"content_hash": "61f70743fda92d197e0a8dedaa9b4af0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 70,
"avg_line_length": 26.5,
"alnum_prop": 0.7186963979416809,
"repo_name": "endlessm/chromium-browser",
"id": "25b02d3bfc05a3a0bc9d48828815a5b7a5bf3365",
"size": "1183",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "third_party/catapult/third_party/gsutil/third_party/apitools/apitools/base/py/encoding.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from micropython import const
import time, machine, bluetooth
TIMEOUT_MS = 5000
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_IRQ_PERIPHERAL_CONNECT = const(7)
_IRQ_PERIPHERAL_DISCONNECT = const(8)
_IRQ_GATTC_SERVICE_RESULT = const(9)
_IRQ_GATTC_SERVICE_DONE = const(10)
UUID_A = bluetooth.UUID(0x180D)
UUID_B = bluetooth.UUID("A5A5A5A5-FFFF-9999-1111-5A5A5A5A5A5A")
SERVICE_A = (
UUID_A,
(),
)
SERVICE_B = (
UUID_B,
(),
)
SERVICES = (SERVICE_A, SERVICE_B)
waiting_event = None
waiting_data = None
num_service_result = 0
def irq(event, data):
global waiting_event, waiting_data, num_service_result
if event == _IRQ_CENTRAL_CONNECT:
print("_IRQ_CENTRAL_CONNECT")
elif event == _IRQ_CENTRAL_DISCONNECT:
print("_IRQ_CENTRAL_DISCONNECT")
elif event == _IRQ_PERIPHERAL_CONNECT:
print("_IRQ_PERIPHERAL_CONNECT")
elif event == _IRQ_PERIPHERAL_DISCONNECT:
print("_IRQ_PERIPHERAL_DISCONNECT")
elif event == _IRQ_GATTC_SERVICE_RESULT:
if data[3] == UUID_A or data[3] == UUID_B:
print("_IRQ_GATTC_SERVICE_RESULT", data[3])
num_service_result += 1
if waiting_event is not None:
if (isinstance(waiting_event, int) and event == waiting_event) or (
not isinstance(waiting_event, int) and waiting_event(event, data)
):
waiting_event = None
waiting_data = data
def wait_for_event(event, timeout_ms):
global waiting_event, waiting_data
waiting_event = event
waiting_data = None
t0 = time.ticks_ms()
while time.ticks_diff(time.ticks_ms(), t0) < timeout_ms:
if waiting_data:
return True
machine.idle()
return False
# Acting in peripheral role.
def instance0():
multitest.globals(BDADDR=ble.config("mac"))
ble.gatts_register_services(SERVICES)
print("gap_advertise")
ble.gap_advertise(20_000, b"\x02\x01\x06\x04\xffMPY")
multitest.next()
try:
wait_for_event(_IRQ_CENTRAL_CONNECT, TIMEOUT_MS)
wait_for_event(_IRQ_CENTRAL_DISCONNECT, TIMEOUT_MS)
finally:
ble.active(0)
# Acting in central role.
def instance1():
multitest.next()
try:
# Connect to peripheral and then disconnect.
print("gap_connect")
ble.gap_connect(0, BDADDR)
if not wait_for_event(_IRQ_PERIPHERAL_CONNECT, TIMEOUT_MS):
return
conn_handle, _, _ = waiting_data
# Discover services.
ble.gattc_discover_services(conn_handle)
wait_for_event(lambda event, data: num_service_result == 2, TIMEOUT_MS)
# Disconnect from peripheral.
print("gap_disconnect:", ble.gap_disconnect(conn_handle))
wait_for_event(_IRQ_PERIPHERAL_DISCONNECT, TIMEOUT_MS)
finally:
ble.active(0)
ble = bluetooth.BLE()
ble.active(1)
ble.irq(irq)
| {
"content_hash": "097d369cc0997199ed7bfe5f44ba7a4a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 79,
"avg_line_length": 27.66346153846154,
"alnum_prop": 0.6388599235314564,
"repo_name": "kerneltask/micropython",
"id": "e746b87458e1b59aaf3f941a80cc6b633bd3966f",
"size": "2912",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/multi_bluetooth/ble_gattc_discover_services.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "14351"
},
{
"name": "C",
"bytes": "14815227"
},
{
"name": "C++",
"bytes": "609755"
},
{
"name": "CMake",
"bytes": "876"
},
{
"name": "JavaScript",
"bytes": "5792"
},
{
"name": "Makefile",
"bytes": "177272"
},
{
"name": "Objective-C",
"bytes": "7598"
},
{
"name": "Python",
"bytes": "1147168"
},
{
"name": "Shell",
"bytes": "16221"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.