repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
melrief/Hadoop-Log-Tools
|
hadoop/log/convert/libjobevent.py
|
Python
|
apache-2.0
| 1,665
| 0.032432
|
def parse_event(raw_event,preserve_backslash=False,preserve_dot=False):
in_string = False
words = []
d = {}
key = None
curr = []
for c in raw_event:
if c == '\\' and not preserve_backslash:
continue
elif c == '"':
in_string = not in_string
elif c == ' ':
if in_string:
curr.append(c)
else:
if key:
val = ''.join(curr)
d[key] = decodeCounters(val) if key == 'COUNTERS' else val
key = None
el
|
se:
word = ''.join(curr)
if preserve_dot or word != '.':
words.append( ''.join(curr) )
curr = []
elif c == '=':
key = ''.join(curr)
curr = []
else:
curr.append(c)
if in_string:
curr.append(c)
else:
if key:
d[key] = ''.join(curr)
key = None
else:
word = ''.join(curr)
if preserve_dot or word != '.':
words.append( ''.join(curr) )
curr = []
return words,d
def decodeCou
|
nters(counters):
raw_counter_families = counters[1:-1].split('}{')
counter_families = {}
for raw_family in raw_counter_families:
splitted = raw_family.split('[')
name,desc = decodeCounterKey( splitted[0] )
raw_counters = [s[:-1] if s[-1] == ']' else s for s in splitted[1:]]
counters = {}
for raw_counter in raw_counters:
cname,fdesc,val = decodeCounterKey(raw_counter)
#counters[cname] = Counter(cname,fdesc,val)
counters[cname] = (fdesc,val)
#counter_families[name] = CounterFamily(name,desc,counters)
counter_families[name] = (name,desc,counters)
return counter_families
def decodeCounterKey(s):
return s[1:-1].split(')(')
|
kiwifb/numpy
|
numpy/core/tests/test_multiarray.py
|
Python
|
bsd-3-clause
| 244,600
| 0.000773
|
from __future__ import division, absolute_import, print_function
import collections
import tempfile
import sys
import shutil
import warnings
import operator
import io
import itertools
import ctypes
import os
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
from decimal import Decimal
import numpy as np
from numpy.compat import asbytes, getexception, strchar, unicode, sixu
from test_print import in_foreign_locale
from numpy.core.multiarray_tests import (
test_neighborhood_iterator, test_neighborhood_iterator_oob,
test_pydatamem_seteventhook_start, test_pydatamem_seteventhook_end,
test_inplace_increment, get_buffer_info, test_as_c_array,
)
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_raises,
assert_equal, assert_almost_equal, assert_array_equal,
assert_array_almost_equal, assert_allclose, IS_PYPY, HAS_REFCOUNT,
assert_array_less, runstring, dec, SkipTest, temppath
)
# Need to test an object that does not fully implement math interface
from datetime import timedelta
if sys.version_info[:2] > (3, 2):
# In Python 3.3 the representation of empty shape, strides and sub-offsets
# is an empty tuple instead of None.
# http://docs.python.org/dev/whatsnew/3.3.html#api-changes
EMPTY = ()
else:
EMPTY = None
class TestFlags(TestCase):
def setUp(self):
self.a = np.arange(10)
def test_writeable(self):
mydict = locals()
self.a.flags.writeable = False
self.assertRaises(ValueError, runstring, 'self.a[0] = 3', mydict)
self.assertRaises(ValueError, runstring, 'self.a[0:1].itemset(3)', mydict)
self.a.flags.writeable = True
self.a[0] = 5
self.a[0] = 0
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags.farray, False)
assert_equal(self.a.flags.behaved, True)
assert_equal(self.a.flags.fnc, False)
assert_equal(self.a.flags.forc, True)
assert_equal(self.a.flags.owndata, True)
assert_equal(self.a.flags.writeable, True)
assert_equal(self.a.flags.aligned, True)
assert_equal(self.a.flags.updateifcopy, False)
def test_string_align(self):
a = np.zeros(4, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
# not power of two are accessed byte-wise and thus considered aligned
a = np.zeros(5, dtype=np.dtype('|S4'))
assert_(a.flags.aligned)
def test_void_align(self):
a = np.zeros(4, dtype=np.dtype([("a", "i4"), ("b", "i4")]))
assert_(a.flags.aligned)
class TestHash(TestCase):
# see #3793
def test_int(self):
for st, ut, s in [(np.int8, np.uint8, 8),
(np.int16, np.uint16, 16),
(np.int32, np.uint32, 32),
(np.int64, np.uint64, 64)]:
for i in range(1, s):
assert_equal(hash(st(-2**i)), hash(-2**i),
err_msg="%r: -2**%d" % (st, i))
assert_equal(hash(st(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (st, i - 1))
assert_equal(hash(st(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (st, i))
i = max(i - 1, 1)
assert_equal(hash(ut(2**(i - 1))), hash(2**(i - 1)),
err_msg="%r: 2**%d" % (ut, i - 1))
assert_equal(hash(ut(2**i - 1)), hash(2**i - 1),
err_msg="%r: 2**%d - 1" % (ut, i))
class TestAttributes(TestCase):
def setUp(self):
self.one = np.arange(10)
self.two = np.arange(20).reshape(4, 5)
self.three = np.arange(60, dtype=np.float64).reshape(2, 5, 6)
def test_attributes(self):
assert_equal(self.one.shape, (10,))
assert_equal(self.two.shape, (4, 5))
assert_equal(self.three.shape, (2, 5, 6))
self.three.shape = (10, 3, 2)
assert_equal(self.three.shape, (10, 3, 2))
self.three.shape = (2, 5, 6)
assert_equal(self.one.strides, (self.one.itemsize,))
num = self.two.itemsize
assert_equal(self.two.strides, (5*num, num))
num = self.three.itemsize
assert_equal(self.three.strides, (30*num, 6*num, num))
assert_equal(self.one.ndim, 1)
assert_equal(self.two.ndim, 2)
assert_equal(self.three.ndim, 3)
num = self.two.itemsize
assert_equal(self.two.size, 20)
assert_equal(self.two.nbytes, 20*num)
assert_equal(self.two.itemsize, self.two.dtype.itemsize)
assert_equal(self.two.base, np.arange(20))
def test_dtypeattr(self):
assert
|
_equal(self.one.dtype, np.dtype(np.int_))
assert_equal(self.three.dtype, np.dtype(np.float_))
assert_equal(self.one.dtype.char, 'l')
assert_equal(self.three.dtype.char, 'd')
self.assertTrue(self.three.dtype.str[0] in '<>')
assert_equal(self.one.dtype.str[1], 'i')
assert_equal(self.three.dtype.str[1], 'f')
def test
|
_int_subclassing(self):
# Regression test for https://github.com/numpy/numpy/pull/3526
numpy_int = np.int_(0)
if sys.version_info[0] >= 3:
# On Py3k int_ should not inherit from int, because it's not
# fixed-width anymore
assert_equal(isinstance(numpy_int, int), False)
else:
# Otherwise, it should inherit from int...
assert_equal(isinstance(numpy_int, int), True)
# ... and fast-path checks on C-API level should also work
from numpy.core.multiarray_tests import test_int_subclass
assert_equal(test_int_subclass(numpy_int), True)
def test_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
return np.ndarray(size, buffer=x, dtype=int,
offset=offset*x.itemsize,
strides=strides*x.itemsize)
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(ValueError, make_array, 8, 3, 1)
assert_equal(make_array(8, 3, 0), np.array([3]*8))
# Check behavior reported in gh-2503:
self.assertRaises(ValueError, make_array, (2, 3), 5, np.array([-2, -3]))
make_array(0, 0, 10)
def test_set_stridesattr(self):
x = self.one
def make_array(size, offset, strides):
try:
r = np.ndarray([size], dtype=int, buffer=x,
offset=offset*x.itemsize)
except:
raise RuntimeError(getexception())
r.strides = strides = strides*x.itemsize
return r
assert_equal(make_array(4, 4, -1), np.array([4, 3, 2, 1]))
assert_equal(make_array(7, 3, 1), np.array([3, 4, 5, 6, 7, 8, 9]))
self.assertRaises(ValueError, make_array, 4, 4, -2)
self.assertRaises(ValueError, make_array, 4, 2, -1)
self.assertRaises(RuntimeError, make_array, 8, 3, 1)
# Check that the true extent of the array is used.
# Test relies on as_strided base not exposing a buffer.
x = np.lib.stride_tricks.as_strided(np.arange(1), (10, 10), (0, 0))
def set_strides(arr, strides):
arr.strides = strides
self.assertRaises(ValueError, set_strides, x, (10*x.itemsize, x.itemsize))
# Test for offset calculations:
x = np.lib.stride_tricks.as_strided(np.arange(10, dtype=np.int8)[-1],
shape=(10,), strides=(-1,))
self.assertRaises(ValueError, set_strides, x[::-1], -1)
a = x[::-1]
a.strides = 1
a[::2].strides = 2
def test_fill(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = np.empty((3, 2, 1), t)
y = np.empty((3, 2, 1), t)
x.fill(1)
y[...] = 1
assert_equal(x, y)
def test_fill_max_
|
shanot/imp
|
modules/core/test/test_surface_mover.py
|
Python
|
gpl-3.0
| 2,528
| 0.000396
|
import IMP
import IMP.algebra
import IMP.core
import IMP.atom
import IMP.test
class Tests(IMP.test.TestCase):
"""Tests for SurfaceMover."""
def test_init(self):
"""Test creation of surface mover."""
|
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(
|
surf, 1, .1, 1.)
mv.set_was_used(True)
def test_propose_move(self):
"""Test proposing move alters center and normal."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
n = surf.get_normal()
c = surf.get_coordinates()
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 1, .1, 1.)
mv.propose()
self.assertNotAlmostEqual((n - surf.get_normal()).get_magnitude(), 0)
self.assertNotAlmostEqual((c - surf.get_coordinates()).get_magnitude(), 0)
def test_propose_reflect(self):
"""Test reflect correctly flips normal."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
n = surf.get_normal()
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 0, 0, 1.)
mv.propose()
self.assertAlmostEqual((n + surf.get_normal()).get_magnitude(), 0)
def test_reject_restores_initial_state(self):
"""Test rejecting a move returns the surface to previous state."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
n = surf.get_normal()
c = surf.get_coordinates()
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 1, .1, 1.)
mv.propose()
mv.reject()
self.assertAlmostEqual((n - surf.get_normal()).get_magnitude(), 0)
self.assertAlmostEqual((c - surf.get_coordinates()).get_magnitude(), 0)
def test_inputs(self):
"""Test only input is Surface."""
m = IMP.Model()
surf = IMP.core.Surface.setup_particle(IMP.Particle(m))
surf.set_coordinates_are_optimized(True)
surf.set_normal_is_optimized(True)
mv = IMP.core.SurfaceMover(surf, 1, .1, 1.)
self.assertSetEqual(set([surf.get_particle()]), set(mv.get_inputs()))
mv.set_was_used(True)
if __name__ == '__main__':
IMP.test.main()
|
opendata-swiss/ckanext-geocat
|
ckanext/geocat/utils/search_utils.py
|
Python
|
agpl-3.0
| 5,592
| 0
|
from collections import namedtuple
import ckan.plugins.toolkit as tk
from ckan import model
from ckan.model import Session
import json
OgdchDatasetInfo = namedtuple('OgdchDatasetInfo',
['name', 'belongs_to_harvester', 'package_id'])
def get_organization_slug_for_harvest_source(harvest_source_id):
context = get_default_context()
try:
source_dataset = \
tk.get_action('package_show')(context, {'id': harvest_source_id})
return source_dataset.get('organization').get('name')
except (KeyError, IndexError, TypeError):
raise tk.ObjectNotFound
def get_packages_to_delete(existing_dataset_infos,
gathered_ogdch_identifiers):
return [
(identifier, info)
for identifier, info
in existing_dataset_infos.items()
if info.belongs_to_harvester and identifier not in gathered_ogdch_identifiers # noqa
]
def get_double_packages(existing_dataset_infos, gathered_ogdch_identifiers): # noqa
return [
(identifier, info)
for identifier, info
in existing_dataset_infos.items()
if not info.belongs_to_harvester and identifier in gathered_ogdch_identifiers # noqa
]
def find_package_for_identifier(identifier):
context = get_default_context()
fq = "identifier:({})".format(identifier)
try:
result = tk.get_action('package_search')(context,
{'fq': fq,
'include_private': True})
if result.get('count') > 0:
pkg = result['results'][0]
return OgdchDatasetInfo(name=pkg['name'],
package_id=pkg['id'],
belongs_to_harvester=True)
else:
return None
except Exception as e:
print("Error occured while searching for packages with fq: {}, error: {}" # noqa
.format(fq, e))
def get_dataset_infos_for_organization(organization_name, harvest_source_id):
context = get_default_context()
rows = 500
page = 0
result_count = 0
fq = "organization:({})".format(organization_name)
processed_count = 0
ogdch_dataset_infos = {}
while page == 0 or processed_count < result_count:
try:
page = page + 1
start = (page - 1) * rows
result = tk.get_action('package_search')(context,
{'fq': fq,
'rows': rows,
'start': start,
'include_private': True})
if not result_count:
result_count = result['count']
datasets_in_result = result.get('results')
if datasets_in_result:
for dataset in datasets_in_result:
extras = dataset.get('extras')
dataset_harvest_source_id = \
get_value_from_dataset_extras(extras,
'harvest_source_id')
if dataset_harvest_source_id and dataset_harvest_source_id == harvest_source_id: # noqa
belongs_to_harvester = True
else:
belongs_to_harvester = False
ogdch_dataset_infos[dataset['identifier']] = \
OgdchDatasetInfo(
name=dataset['name'],
package_id=dataset['id'],
belongs_to_harvester=belongs_to_harvester)
processed_count += len(datasets_in_result)
except Exception as e:
print("Error occured while searching for packages with fq: {}, error: {}" # noqa
.format(fq, e))
break
return ogdch_dataset_infos
def get_default_context():
return {
'model': model,
'session': Session,
'ignore_auth': True
}
def get_value_from_dataset_ext
|
ras(extras, key):
if extras:
extras_reduced_to_key = [item.get('value')
for item in extras
if item.get('key') == key]
if extras_reduced_to_key:
return extras_reduced_to_key[0]
return None
def get_value_from_object_extra(harvest_object_extras, key):
for extra in harvest_object_extras:
if extra.key == key:
return extra.value
return None
def map_resour
|
ces_to_ids(pkg_dict, pkg_info):
existing_package = \
tk.get_action('package_show')({}, {'id': pkg_info.package_id})
existing_resources = existing_package.get('resources')
existing_resources_mapping = \
{r['id']: _get_resource_id_string(r) for r in existing_resources}
for resource in pkg_dict.get('resources'):
resource_id_dict = _get_resource_id_string(resource)
id_to_reuse = [k for k, v in existing_resources_mapping.items()
if v == resource_id_dict]
if id_to_reuse:
id_to_reuse = id_to_reuse[0]
resource['id'] = id_to_reuse
del existing_resources_mapping[id_to_reuse]
def _get_resource_id_string(resource):
resource_id_dict = {'url': resource.get('url'),
'title': resource.get('title'),
'description': resource.get('description')}
return json.dumps(resource_id_dict)
|
evernym/zeno
|
plenum/test/common/test_transactions.py
|
Python
|
apache-2.0
| 450
| 0
|
from plenum.commo
|
n.constants import NODE, NYM
from plenum.common.transactions import PlenumTransactions
def testTransactionsAreEncoded():
assert NODE == "0"
assert NYM == "1"
def testTransactionEnumDecoded():
assert PlenumTransactions.NODE.name == "NODE"
assert PlenumTransactions.NYM.name == "NYM"
def testTransactionEnumEncoded():
assert PlenumTransactions.NODE.value == "0"
|
assert PlenumTransactions.NYM.value == "1"
|
amurzeau/streamlink-debian
|
src/streamlink/plugins/webtv.py
|
Python
|
bsd-2-clause
| 2,658
| 0.001129
|
import base64
import binascii
import logging
import re
from Crypto.Cipher import AES
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream.hls import HLSStream
from streamlink.utils.crypto import unpad_pkcs5
from streamlink.utils.parse import parse_json
from streamlink.utils.url import update_scheme
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r"https?://(\w+)\.web\.tv/?"
))
class WebTV(Plugin):
_sources_re = re.compile(r'"sources": (\[.*?\]),', re.DOTALL)
_sources_schema = validate.Schema([
{
"src": validate.any(
validate.contains("m3u8"),
validate.all(
validate.text,
validate.transform(lambda x: WebTV.decrypt_stream_url(x)),
validate.contains("m3u8")
)
),
"type": validate.text,
"label": validate.text
}
])
@staticmethod
def decrypt_stream_url(encoded_url):
data = base64.b64decode(encoded_url)
cipher_text = binascii.unhexlify(data[96:])
decryptor = AES.new(binascii.unhexlify(data[32:96]),
AES.MODE_CBC,
binascii.unhexlify(data[:32]))
return unpad_pkcs5(decryptor.decrypt(cipher_text)).decode("utf8")
def _get_streams(self):
"""
Find the streams for web.tv
:return:
"""
headers = {}
res = self.session.http.get(self.url, headers=headers)
headers["Referer"] = self.url
sources = self._sources_re.findall(res.text)
if len(sources):
sdata = parse_json(sources[0], schema=self._sources_schema)
for source in sdata:
log.debug(f"Found stream of type: {source['type']}")
if source["type"] == "application/vnd.apple.mpegurl":
url = update_scheme("https://", source["src"], force=False)
try:
# try to parse the stream as a variant playlist
variant = HLSStream.parse_variant_playlist(self.session, url, headers=headers)
|
if variant:
yield from variant.items()
else:
# and if that fails, try it as a plain HLS stream
yield 'live', HLSStream(self.session, url, headers=headers)
except OSError:
log.warning("Could not open the stream, perhaps the channel is offline")
__plugin
|
__ = WebTV
|
lincolnnascimento/crawler
|
crawler/wsgi.py
|
Python
|
apache-2.0
| 389
| 0.002571
|
"""
WSGI conf
|
ig for crawler project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "crawler.settings")
from django.core.wsgi import
|
get_wsgi_application
application = get_wsgi_application()
|
Seairth/Orochi
|
assembler/__init__.py
|
Python
|
gpl-3.0
| 11,327
| 0.005562
|
import re
from .state import State
from .expression import ConstantExpression
__all__ = ["assemble"]
_const_parser = None
def _evaluate_d(expression : str, state : State) -> int:
value = _get_register(expression) or state.GetLabelAddress(expression)
if not value:
value = _const_parser.Evaluate(expression)
return "{:0>9b}".format(value)
def _evaluate_s(expression : str, state : State) -> str:
value = _get_register(expression) or state.GetLabelAddress(expression)
if not value:
value = _const_parser.Evaluate(expression)
if value > 0x1FF:
raise AssemblerError(state.LineNumber, "s-field expression evaluated to a value greater than $1FF.")
return "{:0>9b}".format(value)
def _get_register(name : str) -> int:
return None
|
if name not in lang.registers else lang.registers[name]
def assemble(source, binary_format="binary", hub_offset=0, syntax_version=1):
global _const_parser
state = State()
pending = []
output = []
if binary_format != "raw":
state.HubAddress = 0x10
else:
sta
|
te.HubAddress = int(hub_offset)
_const_parser = ConstantExpression(state)
# PASS 1
for line in source:
state.LineNumber += 1
if "'" in line:
line = line[:line.index("'")] # remove comments
if line == "" or str.isspace(line): # ignore empty lines
continue
line = line.upper()
parts = line.split(maxsplit=1)
label = ""
directive = ""
cond = ""
opcode = ""
parameters = ""
try:
if parts[0] not in lang.reserved_words:
label = parts[0]
parts = parts[1].split(maxsplit=1) if len(parts) == 2 else []
if parts and parts[0] in lang.directives:
directive = parts[0]
parameters = parts[1] if len(parts) == 2 else ""
parts = []
if parts and parts[0] in lang.conditions:
cond = parts[0]
parts = parts[1].split(maxsplit=1) if len(parts) == 2 else []
if parts and parts[0] in lang.instructions:
opcode = parts[0]
parameters = parts[1] if len(parts) == 2 else ""
parts = []
if parts and parts[0] in lang.datatypes:
opcode = parts[0]
parameters = parts[1] if len(parts) == 2 else ""
parts = []
if label != "":
if directive in ("ORG", "FIT"):
raise AssemblerError(state.LineNumber, "Labels are not allowed for ORG or FIT.")
if not state.AddLabel(label):
raise AssemblerError(state.LineNumber, "Could not add label '{}'".format(label))
if directive != "":
if directive == "ORG":
if parameters == "":
state.ORG()
else:
state.ORG(_const_parser.Evaluate(parameteters))
elif directive == "FIT":
fit = (parameters == "") and state.FIT() or state.FIT(_const_parser.Evaluate(parameters))
if not fit:
raise AssemblerError(state.LineNumber, "It doesn't FIT!")
elif directive == "RES":
state.FixLabelAddresses()
if parameters == "":
state.RES()
else:
state.RES(_const_parser.Evaluate(parameters))
else:
raise AssemblerError(state.LineNumber, "Unrecognized directive!")
if opcode != "":
state.FixLabelAddresses()
pending.append((cond, opcode, parameters.strip(), state.LineNumber, state.CogAddress, state.HubAddress, line))
state.CogAddress += 1
state.HubAddress += 1
if directive == "" and opcode == "" and label == "":
raise AssemblerError(state.LineNumber, "unrecognized text: {}".format(line))
# print("> {0}".format(line.rstrip()))
except AssemblerError as e:
state.AddError(e)
# print("Pass 2...")
# PASS 2
for line in pending:
state.SetLineNumber(line[3])
parameters = line[2]
try:
if line[1] in lang.datatypes:
value = _const_parser.Evaluate(parameters)
if line[1] == "BYTE":
if isinstance(value, list):
temp = value[0]
count = 8
for b in (value + [0,0,0])[1:]:
if b < 0: b += 0x100
temp += (b << count)
count += 8
if count == 32: break
value = temp
elif value < 0:
value += 0x100
elif line[1] == "WORD":
if isinstance(value, list):
temp = value[0]
count = 16
for b in (value + [0])[1:]:
if b < 0: b += 0x10000
temp += (b << count)
count += 16
if count == 32: break
value = temp
elif value < 0:
value += 0x10000
else:
if isinstance(value, list):
value = value[0]
if value < 0:
value += 0x100000000
bits = "{:0>32b}".format(value)
else:
rules = lang.instructions[line[1]]
bits = rules[0]
if rules[5] and line[0]:
cond = lang.conditions[line[0]]
bits = bits[:10] + cond + bits[14:]
if parameters:
wr_nr = False
effect = re.split("[\s\t\n,]+", parameters)[-1]
while effect in lang.effects:
if effect == "WZ":
if not line[1]:
raise AssemblerError(state.LineNumber, "WZ Not allowed!")
bits = bits[:6] + "1" + bits[7:]
elif effect == "WC":
if not line[2]:
raise AssemblerError(state.LineNumber, "WC Not allowed!")
bits = bits[:7] + "1" + bits[8:]
elif effect in ("WR", "NR"):
if not line[3]:
raise AssemblerError(state.LineNumber, "WR Not allowed!")
if wr_nr:
raise AssemblerError(state.LineNumber, "Cannot use NR and WR at the same time.")
bits = bits[:8] + ("1" if effect == "WR" else "0") + bits[9:]
wr_nr = True
parameters = parameters[:-3]
effect = parameters and re.split("[\s\t\n,]+", parameters)[-1] or ""
if parameters:
if "d" in bits and "s" in bits:
(d, s) = parameters.split(",")
elif "d" in bits:
d = parameters
elif "s" in bits:
s = parameters
else:
raise AssemblerError(state.LineNumber, "Unrecognized parameters: {}".format(parameters))
if "d" in bits:
d = d.strip()
d = _evaluate_d(d, state)
d_start = bits.index("d")
d_stop = bits.rindex("d")
bits = bit
|
cheral/orange3
|
Orange/canvas/application/tests/test_schemeinfo.py
|
Python
|
bsd-2-clause
| 680
| 0.002941
|
from ...scheme import Scheme
from ..schemeinfo import SchemeInfoDialog
from ...gui import test
class TestSchemeInfo(test.QAppTestCase):
def test_scheme_info(self):
scheme = Scheme(title="A Scheme", description="A String\n")
dialog = SchemeInfoDialog()
dialog.setScheme(scheme)
status = dialog.exec_()
if
|
status == dialog.Accepted:
self.assertEqual(scheme.title.strip(),
s
|
tr(dialog.editor.name_edit.text()).strip())
self.assertEqual(scheme.description,
str(dialog.editor.desc_edit \
.toPlainText()).strip())
|
Onager/artifacts
|
tests/test_lib.py
|
Python
|
apache-2.0
| 1,770
| 0.00791
|
# -*- coding: utf-8 -*-
"""Shared functions and classes for testing."""
from __future__ import unicode_literals
import os
import shutil
import tempfile
import unittest
class BaseTestCase(unittest.TestCase):
"""The base test case."""
_DATA_PATH = os.path.join(os.getcwd(), 'data')
_TEST_DATA_PATH = os.path.join(os.getcwd(), 'test_data')
# Show full diff results, part of TestCase so does not follow our naming
# conventions.
maxDiff = None
def _GetTestFilePath(self, path_segments):
"""Retrieves the path of a test file in the test data directory.
Args:
path_segments (list[str]): path segments inside the test data directory.
Returns:
str: path of the test file.
"""
# Note that we need to pass the individual path segments to os.path.join
# and not a list.
return os.path.join(self._TEST_DATA_PATH, *path_segments)
def _SkipIfPathNotExists(self, path):
"""Skips the test if the path does not exist.
Args:
path (str): path of a test file.
Raises:
SkipTest: if the path path does not exist and the test should be skipped.
"""
if not os.path.exists(path):
filename = os.path.basename(path)
raise unittest.SkipTest('missing test file: {0:s}'.format(filename))
class TempDirectory(object):
"""Class that implements a temporary directory."""
def __init__(self):
"""Initializes a temporary directory."""
super(TempDirectory, self).__init__()
self.name = ''
def __enter__(self):
"""Make this work with the 'with' statement."""
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, unused_type, unused_value, unused_traceback):
"""Make this work with the 'with' st
|
atement."""
shutil.rmtree(self.name, True)
| |
stefanv/aandete
|
app/lib/paste/script/templates.py
|
Python
|
bsd-3-clause
| 10,088
| 0.001685
|
from __future__ import print_function
# (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
import sys
import os
import inspect
from . import copydir
from . import command
from paste.util.template import paste_script_template_renderer
import six
class Template(object):
# Subclasses must define:
# _template_dir (or template_dir())
# summary
# Variables this template uses (mostly for documentation now)
# a list of instances of var()
vars = []
# Eggs that should be added as plugins:
egg_plugins = []
# Templates that must be applied first:
required_templates = []
# Use Cheetah for substituting templates:
use_cheetah = False
# If true, then read all the templates to find the variables:
read_vars_from_templates = False
# You can also give this function/method to use something other
# than Cheetah or string.Template. The function should be of the
# signature template_renderer(content, vars, filename=filename).
# Careful you don't turn this into a method by putting a function
# here (without staticmethod)!
template_renderer = None
def __init__(self, name):
self.name = name
self._read_vars = None
def module_dir(self):
"""Returns the module directory of this template."""
mod = sys.modules[self.__class__.__module__]
return os.path.dirname(mod.__file__)
def template_dir(self):
assert self._template_dir is not None, (
"Template %r didn't set _template_dir" % self)
if isinstance( self._template_dir, tuple):
return self._template_dir
else:
return os.path.join(self.module_dir(), self._template_dir)
def run(self, command, output_dir, vars):
self.pre(command, output_dir, vars)
self.write_files(command, output_dir, vars)
self.post(command, output_dir, vars)
def check_vars(self, vars, cmd):
expect_vars = self.read_vars(cmd)
if not expect_vars:
# Assume that variables aren't defined
return vars
converted_vars = {}
unused_vars = vars.copy()
errors = []
for var in expect_vars:
if var.name not in unused_vars:
if cmd.interactive:
prompt = 'Enter %s' % var.full_description()
response = cmd.challenge(prompt, var.default, var.should_echo)
converted_vars[var.name] = response
elif var.default is command.NoDefault:
errors.append('Required variable missing: %s'
% var.full_description())
else:
converted_vars[var.name] = var.default
else:
converted_vars[var.name] = unused_vars.pop(var.name)
if errors:
raise command.BadCommand(
'Errors in variables:\n%s' % '\n'.join(errors))
converted_vars.update(unused_vars)
vars.update(converted_vars)
return conve
|
rted_vars
def read_vars(self, command=None):
if self._read_vars is not None:
return self._read_vars
assert (not self.read_vars_from_templates
or self.use_cheetah), (
"You can only read variables from templates if using Cheetah")
if not self.read_vars_from_templates:
self._read_vars = sel
|
f.vars
return self.vars
vars = self.vars[:]
var_names = [var.name for var in self.vars]
read_vars = find_args_in_dir(
self.template_dir(),
verbose=command and command.verbose > 1).items()
read_vars.sort()
for var_name, var in read_vars:
if var_name not in var_names:
vars.append(var)
self._read_vars = vars
return vars
def write_files(self, command, output_dir, vars):
template_dir = self.template_dir()
if not os.path.exists(output_dir):
print("Creating directory %s" % output_dir)
if not command.simulate:
# Don't let copydir create this top-level directory,
# since copydir will svn add it sometimes:
os.makedirs(output_dir)
copydir.copy_dir(template_dir, output_dir,
vars,
verbosity=command.verbose,
simulate=command.options.simulate,
interactive=command.interactive,
overwrite=command.options.overwrite,
indent=1,
use_cheetah=self.use_cheetah,
template_renderer=self.template_renderer)
def print_vars(self, indent=0):
vars = self.read_vars()
var.print_vars(vars)
def pre(self, command, output_dir, vars):
"""
Called before template is applied.
"""
pass
def post(self, command, output_dir, vars):
"""
Called after template is applied.
"""
pass
NoDefault = command.NoDefault
class var(object):
def __init__(self, name, description,
default='', should_echo=True):
self.name = name
self.description = description
self.default = default
self.should_echo = should_echo
def __repr__(self):
return '<%s %s default=%r should_echo=%s>' % (
self.__class__.__name__,
self.name, self.default, self.should_echo)
def full_description(self):
if self.description:
return '%s (%s)' % (self.name, self.description)
else:
return self.name
def print_vars(cls, vars, indent=0):
max_name = max([len(v.name) for v in vars])
for var in vars:
if var.description:
print('%s%s%s %s' % (
' '*indent,
var.name,
' '*(max_name-len(var.name)),
var.description))
else:
print(' %s' % var.name)
if var.default is not command.NoDefault:
print(' default: %r' % var.default)
if var.should_echo is True:
print(' should_echo: %s' % var.should_echo)
print()
print_vars = classmethod(print_vars)
class BasicPackage(Template):
_template_dir = 'paster-templates/basic_package'
summary = "A basic setuptools-enabled package"
vars = [
var('version', 'Version (like 0.1)'),
var('description', 'One-line description of the package'),
var('long_description', 'Multi-line description (in reST)'),
var('keywords', 'Space-separated keywords/tags'),
var('author', 'Author name'),
var('author_email', 'Author email'),
var('url', 'URL of homepage'),
var('license_name', 'License name'),
var('zip_safe', 'True/False: if the package can be distributed as a .zip file', default=False),
]
template_renderer = staticmethod(paste_script_template_renderer)
_skip_variables = ['VFN', 'currentTime', 'self', 'VFFSL', 'dummyTrans',
'getmtime', 'trans']
def find_args_in_template(template):
if isinstance(template, six.string_types):
# Treat as filename:
import Cheetah.Template
template = Cheetah.Template.Template(file=template)
if not hasattr(template, 'body'):
# Don't know...
return None
method = template.body
args, varargs, varkw, defaults = inspect.getargspec(method)
defaults=list(defaults or [])
vars = []
while args:
if len(args) == len(defaults):
default = defaults.pop(0)
else:
default = command.NoDefault
arg = args.pop(0)
if arg in _skip_variables:
continue
# @@: No way to get description yet
vars.append(
var(arg, description=None,
default=default))
return vars
def find_args_in_dir(dir, verbo
|
amlyj/pythonStudy
|
2.7/crawlers/jkxy/jk_utils.py
|
Python
|
mit
| 4,771
| 0.002144
|
# -*- coding=utf-8 -*-
import requests
import os
import json
import sys
import time
reload(sys)
sys.setdefaultencoding('utf8')
download_base_url = 'http://www.jikexueyuan.com/course/video_download'
cookie_map = 'gr_user_id=eb91fa90-1980-4500-a114-6fea026da447; _uab_collina=148758210602708013401536; connect.sid=s%3AsRUeQ8XeWpWREBnEnaAWt31xIIpwZHj0.otjTDyQcivT1X65RfMF%2B2mpSfgjAoC3%2BBog9Z8C9NCo; _gat=1; _umdata=2FB0BDB3C12E491D192D688906F3F911DBC9CBDAAC2337399CD12353C2D45B7A1BAFC8FE8A49D872CD43AD3E795C914C7A1B39D73E9DDB85B7E1FAADEEA5709A; uname=king_aric; uid=3034284; code=MNla69; authcode=29e9YmwFSjDxwSHA4AZN%2B3s%2B7%2BcEm6ZdlmeMMoEKxP5an1nNvLlH96ke%2FL34Br0NXXoQ%2FcPNkhbXSOUOF2ZM5RPSw%2F0sjlravys3aCucZ1C12Fn2UxWA8V8J%2FPSV; avatar=https%3A%2F%2Fassets.jikexueyuan.com%2Fuser%2Favtar%2Fdefault.gif; ca_status=0; vip_status=1; level_id=1; is_expire=0; domain=0JjajqVPq; _ga=GA1.2.1312183079.1487582095; gr_session_id_aacd01fff9535e79=39cf51ef-683e-4338-b251-e487baed02bc; gr_cs1_39cf51ef-683e-4338-b251-e487baed02bc=uid%3A3034284; QINGCLOUDELB=84b10773c6746376c2c7ad1fac354ddfd562b81daa2a899c46d3a1e304c7eb2b|WK6ZY|WK6YR Host:www.jikexueyuan.com'
def download_execute(root_dir, result_list, sort=False):
"""
result_list [{'href':'','title':'','course_id':''},]
"""
number = 0
for doc in result_list:
number += 1
if sort:
doc_path = u'%s/%d.%s' % (root_dir, number, doc.get('title'))
else:
doc_path = u'%s/%s' % (root_dir, doc.get('title'))
print doc_path
create_dir(doc_path)
seq, keep_running = 0, True
while keep_running:
seq += 1
download_url = '%s?seq=%d&course_id=%d' % (download_base_url, seq, doc.get('course_id'))
for i in range(10): # retry 10 times
result_code = request_data(download_url, doc_path, seq)
if result_code == 404:
keep_running = False
break
elif result_code == 500:
print u'重试%d : %s' % (i + 1, download_url)
continue
break
def request_data(download_url, doc_path, seq):
"""
:return 200,404,500
"""
try:
if not os.path.exists(doc_path):return 404
response = requests.get(url=download_url, headers={'Cookie': cookie_map})
if response.status_code == 200:
download_data = response.content
download_data = json.loads(download_data)
# print download_data, download_data.get('data').get('title')
if download_data.get('code') != 200:
if download_data.get('code') == 10101:
return 404
print u'request error: %s' % download_data.get('msg').decode('utf-8')
return 500
file_path = u'%s/%d.%s.mp4' % (
doc_path, seq, download_data.get('data').get('title'))
if os.path.exists(file_path):
print u'%s 已经存在' % file_path
return 200
begin_time = time.time()
r = requests.get(download_data.get('data').get('urls'))
with open(file_path, "wb") as code:
try:
code.write(r.content)
print u'下载:%s %d秒' % (file_path, int(time.time() - begin_time))
return 200
except Exception:
print u'下
|
载:%s 失败' % file_path
return 500
else:
print u'%s 请求失败,状态%d' % (download_url, response.status_code)
return 500
except Exception, e:
print u'%s 请求失败,\n异常信息:%s' % (download_ur
|
l, e)
return 500
def create_dir(path):
if not os.path.exists(path):
try:
os.makedirs(path)
except Exception, e:
print u'文件夹%s 创建失败;\n %s' % (path, e)
else:
print u'文件夹%s 已经存在' % path
def parent_dir(path):
if path[-1] == '/': path = path[0:-1]
return '/'.join(path.split('/')[0:-1])
def del_dir(path):
assert os.path.exists(path) and os.path.isdir(path)
for root, dirs, files in os.walk(path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(path)
def create_file(name, mode='r', data=""):
try:
parent_path = parent_dir(name)
if parent_path and not os.path.exists(parent_path): create_dir(parent_path)
with open(name, mode)as f:
f.write(data)
except Exception, e:
print u'%s 创建失败\n异常:%s' % (name, e)
|
rwl/PyCIM
|
CIM15/IEC61970/Informative/InfWork/Request.py
|
Python
|
mit
| 5,609
| 0.002318
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Document import Document
class Request(Document):
"""A request for work, service or project.A request for work, service or project.
"""
def __init__(self, actionNeeded='', priority='', corporateCode='', ErpQuoteLineItem=None, Projects=None, Organisation=None, Works=None, *args, **kw_args):
"""Initialises a new 'Request' instance.
@param actionNeeded: Based on the current 'Status.status', the action that is needed before this Request can transition to the desired state, such as initiating the requested Work. For example, missing or additionally needed information may be required from the requesting organisation before a work Design may be created.
@param priority: The priority of this request.
@param corporateCode: The corporate code for this request.
@param ErpQuoteLineItem:
@param Projects:
@param Organisation:
@param Works:
"""
#: Based on the current 'Status.status', the action that is needed before this Request can transition to the desired state, such as initiating the requested Work. For example, missing or additionally needed information may be required from the requesting organisation before a work Design may be created.
self.actionNeeded = actionNeeded
#: The priority of this request.
self.priority = priority
#: The corporate code for this request.
|
self.corporateCode = corporateCode
self._ErpQuoteLineItem = None
self.ErpQuoteLineItem = ErpQuoteLineItem
self._Projects = []
self.Projects = [] if Projects is None else Projects
self._Organisation = None
self.Organisation = Organisat
|
ion
self._Works = []
self.Works = [] if Works is None else Works
super(Request, self).__init__(*args, **kw_args)
_attrs = ["actionNeeded", "priority", "corporateCode"]
_attr_types = {"actionNeeded": str, "priority": str, "corporateCode": str}
_defaults = {"actionNeeded": '', "priority": '', "corporateCode": ''}
_enums = {}
_refs = ["ErpQuoteLineItem", "Projects", "Organisation", "Works"]
_many_refs = ["Projects", "Works"]
def getErpQuoteLineItem(self):
return self._ErpQuoteLineItem
def setErpQuoteLineItem(self, value):
if self._ErpQuoteLineItem is not None:
self._ErpQuoteLineItem._Request = None
self._ErpQuoteLineItem = value
if self._ErpQuoteLineItem is not None:
self._ErpQuoteLineItem.Request = None
self._ErpQuoteLineItem._Request = self
ErpQuoteLineItem = property(getErpQuoteLineItem, setErpQuoteLineItem)
def getProjects(self):
return self._Projects
def setProjects(self, value):
for p in self._Projects:
filtered = [q for q in p.Requests if q != self]
self._Projects._Requests = filtered
for r in value:
if self not in r._Requests:
r._Requests.append(self)
self._Projects = value
Projects = property(getProjects, setProjects)
def addProjects(self, *Projects):
for obj in Projects:
if self not in obj._Requests:
obj._Requests.append(self)
self._Projects.append(obj)
def removeProjects(self, *Projects):
for obj in Projects:
if self in obj._Requests:
obj._Requests.remove(self)
self._Projects.remove(obj)
def getOrganisation(self):
return self._Organisation
def setOrganisation(self, value):
if self._Organisation is not None:
filtered = [x for x in self.Organisation.Requests if x != self]
self._Organisation._Requests = filtered
self._Organisation = value
if self._Organisation is not None:
if self not in self._Organisation._Requests:
self._Organisation._Requests.append(self)
Organisation = property(getOrganisation, setOrganisation)
def getWorks(self):
return self._Works
def setWorks(self, value):
for x in self._Works:
x.Request = None
for y in value:
y._Request = self
self._Works = value
Works = property(getWorks, setWorks)
def addWorks(self, *Works):
for obj in Works:
obj.Request = self
def removeWorks(self, *Works):
for obj in Works:
obj.Request = None
|
google/uncertainty-baselines
|
experimental/single_model_uncertainty/flags.py
|
Python
|
apache-2.0
| 8,929
| 0.009184
|
# coding=utf-8
# Copyright 2022 The Uncertaint
|
y Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain
|
a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common flags."""
from typing import Any, Dict, List
from absl import flags
from uncertainty_baselines.datasets import datasets
from uncertainty_baselines.models import models
FLAGS = flags.FLAGS
def serialize_flags(flag_list: Dict[str, Any]) -> str:
string = ''
for flag_name, flag_value in flag_list.items():
string += '--{}={}\n'.format(flag_name, flag_value)
# Remove the final trailing newline.
return string[:-1]
def define_flags() -> List[str]:
"""Define common flags."""
predefined_flags = set(FLAGS)
flags.DEFINE_string('experiment_name', None, 'Name of this experiment.')
# TPU Job flags.
flags.DEFINE_string('tpu', None, 'Name of the TPU to use.')
flags.DEFINE_enum(
'mode',
'train_and_eval',
['train', 'eval', 'train_and_eval'],
'Whether to execute train and/or eval.')
flags.DEFINE_integer(
'num_cores', None, 'How many TPU cores or GPUs the job is running on.')
flags.DEFINE_bool('run_ood', False, 'Whether to run OOD jobs with eval job.')
flags.DEFINE_bool('use_cpu', False, 'Whether to run on CPU.')
flags.DEFINE_bool('use_gpu', False, 'Whether to run on GPU or TPU.')
# Train/eval loop flags.
flags.DEFINE_integer(
'checkpoint_step', -1, 'Step of the checkpoint to restore from.')
flags.DEFINE_enum(
'dataset_name',
None,
datasets.get_dataset_names(),
'Name of the dataset to use.')
flags.DEFINE_enum(
'ood_dataset_name',
None,
datasets.get_dataset_names(),
'Name of the OOD dataset to use for evaluation.')
flags.DEFINE_integer(
'eval_frequency',
None,
'How many steps between evaluating on the (validation and) test set.')
flags.DEFINE_integer('num_bins', 15, 'Number of bins for ECE.')
flags.DEFINE_string('output_dir', None, 'Base output directory.')
flags.DEFINE_enum(
'model_name',
None,
models.get_model_names(),
'Name of the model to use.')
flags.DEFINE_integer(
'log_frequency',
100,
'How many steps between logging the metrics.')
flags.DEFINE_integer('train_steps', None, 'How many steps to train for.')
# Hyperparamater flags.
flags.DEFINE_integer('batch_size', None, 'Training batch size.')
flags.DEFINE_integer('eval_batch_size', None, 'Validation/test batch size.')
flags.DEFINE_float('learning_rate', None, 'Learning rate.')
flags.DEFINE_string(
'learning_rate_schedule',
'constant',
'Learning rate schedule to use.')
flags.DEFINE_integer('schedule_hparams_warmup_epochs', 1,
'Number of epochs for a linear warmup to the initial '
'learning rate. Use 0 to do no warmup.')
flags.DEFINE_float('schedule_hparams_decay_ratio', 0.2,
'Amount to decay learning rate.')
flags.DEFINE_list('schedule_hparams_decay_epochs', ['60', '120', '160'],
'Epochs to decay learning rate by.')
flags.DEFINE_string('optimizer', 'adam', 'Optimizer to use.')
flags.DEFINE_float('optimizer_hparams_momentum', 0.9, 'SGD momentum.')
flags.DEFINE_float('optimizer_hparams_beta_1', 0.9, 'Adam beta_1.')
flags.DEFINE_float('optimizer_hparams_beta_2', 0.999, 'Adam beta_2.')
flags.DEFINE_float('optimizer_hparams_epsilon', 1e-7, 'Adam epsilon.')
flags.DEFINE_float('weight_decay', 0.0, 'Weight decay for optimizer.')
flags.DEFINE_float('l2_regularization', 1e-4, 'L2 regularization for models.')
flags.DEFINE_float(
'focal_loss_gamma', 0.0, 'The gamma parameter in the focal loss. '
'If gamma=0.0, the focal loss is equivalent to cross entropy loss.')
flags.DEFINE_integer('seed', 42, 'Random seed.')
flags.DEFINE_float(
'validation_percent',
0.0,
'Percent of training data to hold out and use as a validation set.')
flags.DEFINE_integer(
'shuffle_buffer_size', 16384, 'Dataset shuffle buffer size.')
# Model flags, Wide Resnet
flags.DEFINE_integer('wide_resnet_depth', 28,
'Depth of wide resnet model.')
flags.DEFINE_integer('wide_resnet_width_multiplier', 10,
'Width multiplier for wide resnet model.')
flags.DEFINE_integer('num_classes', 10, 'Number of label classes.')
# Flags relating to genomics_cnn model
flags.DEFINE_integer('len_seqs', 250,
'Sequence length, only used for genomics dataset.')
flags.DEFINE_integer('num_motifs', 1024,
'Number of motifs, only used for the genomics dataset.')
flags.DEFINE_integer('len_motifs', 20,
'Length of motifs, only used for the genomics dataset.')
flags.DEFINE_integer('num_denses', 128,
'Number of denses, only used for the genomics dataset.')
# Flags relating to SNGP model
flags.DEFINE_float('dropout_rate', 0.1, 'Dropout rate for dropout layers.')
flags.DEFINE_bool(
'before_conv_dropout', False,
'Whether to use filter wise dropout before convolutionary layers. ')
flags.DEFINE_bool(
'use_mc_dropout', False,
'Whether to use Monte Carlo dropout for the hidden layers.')
flags.DEFINE_bool('use_spec_norm', False,
'Whether to apply spectral normalization.')
flags.DEFINE_bool('use_gp_layer', False,
'Whether to use Gaussian process as the output layer.')
# Model flags, Spectral Normalization.
flags.DEFINE_integer(
'spec_norm_iteration', 1,
'Number of power iterations to perform for estimating '
'the spectral norm of weight matrices.')
flags.DEFINE_float('spec_norm_bound', 6.,
'Upper bound to spectral norm of weight matrices.')
# Model flags, Gaussian Process layer.
flags.DEFINE_float('gp_bias', 0., 'The bias term for GP layer.')
flags.DEFINE_float(
'gp_scale', 1.,
'The length-scale parameter for the RBF kernel of the GP layer.')
flags.DEFINE_integer(
'gp_input_dim', 128,
'The dimension to reduce the neural network input to for the GP layer '
'(via random Gaussian projection which preserves distance by the '
' Johnson-Lindenstrauss lemma). If -1 the no dimension reduction.')
flags.DEFINE_integer(
'gp_hidden_dim', 1024,
'The hidden dimension of the GP layer, which corresponds to the number '
'of random features used to for the approximation ')
flags.DEFINE_bool(
'gp_input_normalization', False,
'Whether to normalize the input using LayerNorm for GP layer.'
'This is similar to automatic relevance determination (ARD) in the '
'classic GP learning.')
flags.DEFINE_float(
'gp_cov_ridge_penalty', 1.0,
'The Ridge penalty parameter for GP posterior covariance.')
flags.DEFINE_float(
'gp_cov_discount_factor', 0.999,
'The discount factor to compute the moving average of '
'precision matrix.')
flags.DEFINE_float(
'gp_mean_field_factor', -1,
'The tunable multiplicative factor used in the mean-field approximation '
'for the posterior mean of softmax Gaussian process. If -1 then use '
'posterior mode instead of posterior mean. See [2] for detail.')
flags.mark_flag_as_required('dataset_name')
flags.mark_flag_as_required('experiment_name')
flags.mark_flag_as_required('model_name')
# Flags relating to OOD metrics
flags.DEFINE_list(
'sensitivity_thresholds', ['0.05', '0.95', '10'],
'List of sensitivities at which to calculate specificity.'
' The list should contains '
'[lower bound, upper bound, num_elements]')
flags.DEFINE_list(
's
|
eladnoor/ms-tools
|
james/isotope_util.py
|
Python
|
mit
| 1,554
| 0.01287
|
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 30 14:32:42 2015
@author: noore
"""
import numpy as np
from scipy.misc import comb # comb(N,k) = The number of combinations of N things taken k at a time
THETA = 0.011 # the natural abundance of 13C among the two isotopes (13C and 12C).
def compute_fractions(counts):
"""
Calculates the isotope fractions of a compound, given the list of
counts (assuming it starts from M+0).
Usage:
counts - a list of positive values representing the counts of each
|
isotope starting from M+0
Returns:
|
fractions - a list of values between 0..1 that represent the fraction
of each isotope from the total pool, after correcting for the
natural abundance of 13C
"""
N = len(counts)-1
F = np.matrix(np.zeros((N+1, N+1)))
for i in range(N+1):
for j in range(i+1):
F[i,j] = comb(N-j, i-j) * THETA**(i-j) * (1-THETA)**(N-j)
X = np.matrix(counts, dtype=float).T
corrected_counds = list((F.I * X).flat)
return corrected_counds
if __name__ == '__main__':
counts = [900, 100, 5, 900, 5000]
Y = compute_fractions(counts)
print("The corrected isotope relative abundances are:")
print('-'*50)
print( ' | '.join(map(lambda d: ' M + %d ' % d, range(len(counts)))))
print(' | '.join(map(lambda s: '%4.1e' % (s*100), Y)))
print('-'*50)
print(compute_fractions([1]*7))
|
jensonjose/utilbox
|
utilbox/os_utils/dir_utils.py
|
Python
|
mit
| 7,866
| 0.003051
|
"""
Utility module to manipulate directories.
"""
import os
import types
import shutil
__author__ = "Jenson Jose"
__email__ = "jensonjose@live.in"
__status__ = "Alpha"
class DirUtils:
"""
Utility class containing methods to manipulate directories.
"""
def __init__(self):
pass
@staticmethod
def create_dir(dir_path):
"""
Creates a directory at the specified path.
:param dir_path: The full path of the directory to be created.
:return: True, if directory was created, False otherwise.
:rtype: bool
"""
try:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
return True
return False
except Exception as ex:
return False
@staticmethod
def create_dir_path_string(path_components, separator="/"):
"""
Combines list of supplied path components to create a full directory path.
:param path_components: List of components to be part of the final directory path.
:param separator: Separator to be used for isolating directory path components.
:return: The full directory path string, if path_components is a valid list, False otherwise.
:rtype: str
"""
if isinstance(path_components, types.ListType):
if len(path_components) > 0:
path_string = ""
for component in path_components:
path_string += component + separator
path_string = path_string[:-1]
path_string = os.path.normpath(path_string)
return path_string
return False
@staticmethod
def fix_path(path1, path2):
"""
Combines 2 given paths to form OS compliant path with correct path separators.
Example:
1st path (Linux): /root/some_dir; 2nd path (Windows): \test\data
After combining the above paths,
On Windows: \root\some_dir\test\data
On Linux: /root/some_dir/test/data
:param path1: The first path to be combined.
:param path2: The second path to be combined.
:return: The final combined path.
:rtype: str
"""
return os.path.normpath(path1 + path2)
@staticmethod
def check_valid_dir(dir_path):
"""
Verifies if given directory path exists and is a valid directory.
:param dir_path: The full path of the directory to be verified.
:return: True if path contains a valid directory, False otherwise.
:rtype: bool
"""
if os.path.exists(dir_path):
if os.path.isdir(dir_path):
return True
return False
@staticmethod
def create_archive(output_file_name, source_path, archive_format="zip"):
"""
Creates a compressed archive of the specified directory.
:param output_file_name: Name of the output archive file.
:param source_path: The full path of the source to be archived.
:param archive_format: The format to be used for archiving, and can be either ZIP, TAR, BZTAR or GZTAR.
:return: True if archiving was successful, False otherwise.
:rtype: bool
"""
if shutil.make_archive(output_file_name, archive_format.lower(), source_path):
return True
return False
@staticmethod
def get_dir_contents(source_dir, filter_pattern=None, meta_data=False):
"""
Returns a list of directory contents matching the supplied search pattern.
If no pattern is supplied all directory contents are returned.
:param source_dir: The path of the directory to be searched.
:param filter_pattern: The pattern to be used to search the directory.
:param meta_data: If True, returns a list of dictionaries containing meta data of each individual entry.
:return: List of matching entries if the directory is valid, False otherwise.
:rtype: list
"""
from utilbox.os_utils import FileUtils
filtered_entry_list = []
if DirUtils.check_valid_dir(source_dir):
dir_entries = os.listdir(source_dir)
for dir_entry in dir_entries:
if filter_pattern is not None:
import re
compiled_pattern = re.compile(filter_pattern)
if len(compiled_pattern.findall(dir_entry)) > 0:
if meta_data:
dir_entry_path = DirUtils.create_dir_path_string([source_dir,
dir_entry])
if DirUtils.check_valid_dir(dir_entry_path):
meta_data = DirUtils.get_dir_metadata(dir_entry_path)
elif FileUtils.check_valid_file(dir_entry_path):
meta_data = FileUtils.get_file_metadata(dir_entry_path)
if meta_data:
filtered_entry_list.append(meta_data)
else:
filtered_entry_list.append(dir_entry)
else:
if meta_data:
dir_entry_path = DirUtils.create_dir_path_string([source_dir,
dir_entry])
if DirUtils.check_valid_dir(dir_entry_path):
meta_data = DirUtils.get_dir_metadata(dir_entry_path)
elif FileUtils.check_valid_file(dir_entry_path):
meta_data = FileUtils.get_file_metadata(dir_entry_path)
if meta_data:
filtered_entry_list.append(meta_data)
else:
filtered_entry_list.append(dir_entry)
return filtered_entry_list
return False
@staticmethod
def get_dir_metadata(dir_path, size_unit="k", time_format="%Y-%m-%d %I:%M:%S"):
"""
Returns directory meta-data containing,
- Last modified time
- Directory size (sum of all file sizes)
- Directory name
- Directory parent directory
- Directory full path
:param dir_path: The full path of the directory to be analyzed.
:param size_unit: Units in which to report directory size.
:param time_format: Format in which to report directory modification time.
:return: Dictionary containing relevant directory meta data.
:rtype: dict
"""
if DirUtils.check_valid_dir(dir_path):
import datetime
last_modified_time = datetime.datetime.fromtimestamp(os.path.getmtime(dir_path)).strftime(time_format)
# get file size in bytes
file_size = os.path.getsize(dir_path)
base_unit = 1024.0
decimal_limit = 2
if size_unit == "b":
pass
elif size_unit == "k":
file_size /= base_unit
elif size_unit == "m":
file_size = (file_size / base_unit) / base_unit
elif size_unit == "g":
file_size = ((file_size / base_unit) / base_unit) / base_unit
# limit floating-point value to X decimal points
if size_unit != "b":
file_size = round(file_size, decimal_limit)
return {"LAST_MOD
|
IFIED": str(last_modified_time),
"SIZE": str(file_size),
"NAME": str(os.path.basename(dir_path)),
"PARENT_DIRECTORY": str(os.path.dirname(d
|
ir_path)),
"FULL_PATH": str(dir_path)}
return False
|
dhuang/incubator-airflow
|
airflow/providers/amazon/aws/operators/ecs.py
|
Python
|
apache-2.0
| 17,123
| 0.002686
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import re
imp
|
ort sys
from collections import deque
from datetime import datetime
from typing import Dict, Generator, Optional
from botocore.waiter import Waiter
from airflow.exceptions i
|
mport AirflowException
from airflow.models import BaseOperator, XCom
from airflow.providers.amazon.aws.exceptions import ECSOperatorError
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
from airflow.providers.amazon.aws.hooks.logs import AwsLogsHook
from airflow.typing_compat import Protocol, runtime_checkable
from airflow.utils.session import provide_session
def should_retry(exception: Exception):
"""Check if exception is related to ECS resource quota (CPU, MEM)."""
if isinstance(exception, ECSOperatorError):
return any(
quota_reason in failure['reason']
for quota_reason in ['RESOURCE:MEMORY', 'RESOURCE:CPU']
for failure in exception.failures
)
return False
@runtime_checkable
class ECSProtocol(Protocol):
"""
A structured Protocol for ``boto3.client('ecs')``. This is used for type hints on
:py:meth:`.ECSOperator.client`.
.. seealso::
- https://mypy.readthedocs.io/en/latest/protocols.html
- https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html
"""
def run_task(self, **kwargs) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task""" # noqa: E501
...
def get_waiter(self, x: str) -> Waiter:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.get_waiter""" # noqa: E501
...
def describe_tasks(self, cluster: str, tasks) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_tasks""" # noqa: E501
...
def stop_task(self, cluster, task, reason: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.stop_task""" # noqa: E501
...
def describe_task_definition(self, taskDefinition: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.describe_task_definition""" # noqa: E501
...
def list_tasks(self, cluster: str, launchType: str, desiredStatus: str, family: str) -> Dict:
"""https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.list_tasks""" # noqa: E501
...
class ECSOperator(BaseOperator):
"""
Execute a task on AWS ECS (Elastic Container Service)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:ECSOperator`
:param task_definition: the task definition name on Elastic Container Service
:type task_definition: str
:param cluster: the cluster name on Elastic Container Service
:type cluster: str
:param overrides: the same parameter that boto3 will receive (templated):
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/ecs.html#ECS.Client.run_task
:type overrides: dict
:param aws_conn_id: connection id of AWS credentials / region name. If None,
credential boto3 strategy will be used
(http://boto3.readthedocs.io/en/latest/guide/configuration.html).
:type aws_conn_id: str
:param region_name: region name to use in AWS Hook.
Override the region_name in connection (if provided)
:type region_name: str
:param launch_type: the launch type on which to run your task ('EC2' or 'FARGATE')
:type launch_type: str
:param capacity_provider_strategy: the capacity provider strategy to use for the task.
When capacity_provider_strategy is specified, the launch_type parameter is omitted.
If no capacity_provider_strategy or launch_type is specified,
the default capacity provider strategy for the cluster is used.
:type capacity_provider_strategy: list
:param group: the name of the task group associated with the task
:type group: str
:param placement_constraints: an array of placement constraint objects to use for
the task
:type placement_constraints: list
:param placement_strategy: an array of placement strategy objects to use for
the task
:type placement_strategy: list
:param platform_version: the platform version on which your task is running
:type platform_version: str
:param network_configuration: the network configuration for the task
:type network_configuration: dict
:param tags: a dictionary of tags in the form of {'tagKey': 'tagValue'}.
:type tags: dict
:param awslogs_group: the CloudWatch group where your ECS container logs are stored.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:type awslogs_group: str
:param awslogs_region: the region in which your CloudWatch logs are stored.
If None, this is the same as the `region_name` parameter. If that is also None,
this is the default AWS region based on your connection settings.
:type awslogs_region: str
:param awslogs_stream_prefix: the stream prefix that is used for the CloudWatch logs.
This is usually based on some custom name combined with the name of the container.
Only required if you want logs to be shown in the Airflow UI after your job has
finished.
:type awslogs_stream_prefix: str
:param reattach: If set to True, will check if the task previously launched by the task_instance
is already running. If so, the operator will attach to it instead of starting a new task.
This is to avoid relaunching a new task when the connection drops between Airflow and ECS while
the task is running (when the Airflow worker is restarted for example).
:type reattach: bool
:param quota_retry: Config if and how to retry _start_task() for transient errors.
:type quota_retry: dict
"""
ui_color = '#f0ede4'
template_fields = ('overrides',)
template_fields_renderers = {
"overrides": "json",
"network_configuration": "json",
"tags": "json",
"quota_retry": "json",
}
REATTACH_XCOM_KEY = "ecs_task_arn"
REATTACH_XCOM_TASK_ID_TEMPLATE = "{task_id}_task_arn"
def __init__(
self,
*,
task_definition: str,
cluster: str,
overrides: dict,
aws_conn_id: Optional[str] = None,
region_name: Optional[str] = None,
launch_type: str = 'EC2',
capacity_provider_strategy: Optional[list] = None,
group: Optional[str] = None,
placement_constraints: Optional[list] = None,
placement_strategy: Optional[list] = None,
platform_version: Optional[str] = None,
network_configuration: Optional[dict] = None,
tags: Optional[dict] = None,
awslogs_group: Optional[str] = None,
awslogs_region: Optional[str] = None,
awslogs_stream_prefix: Optional[str] = None,
propagate_tags: Optional[str] = None,
quota_retry: Optional[dict] = None,
reattach: bool = False,
**kwargs,
):
super().__init__(**kwargs)
self.aws_conn_id = aw
|
ab9621/PogoLibrary
|
pogoInput.py
|
Python
|
gpl-3.0
| 17,129
| 0.021834
|
import numpy as np
import warnings
import subprocess
import pogoFunctions as pF
import pdb
from PolyInterface import poly
class PogoInput:
def __init__(self,
fileName,
elementTypes,
signals,
historyMeasurement,
nodes = None,
elements = None,
geometryFile = None,
precision=8,
targetMeshSize = 5e-5,
|
nDims=2,
nDofPerNode = None,
notes = None,
runName = 'pogoJob',
|
nt = 100,
dt = 1e-8,
elementTypeRefs = None,
materialTypeRefs = None,
orientationRefs = None,
elementParameters = None,
materials = [[0,7e10,0.34,2700],],
orientations = None,
boundaryConditions = None,
historyMeasurementFrequency = 20,
fieldStoreIncrements = None,
folderIn = None,
totalForce = False,
version = 1.03,
writeFile = True):
self.fileName = fileName
### Header
self.header = np.array(['']*20, dtype='str')
if version not in [1.03, 1.04]:
raise ValueError('Input file version must be 1.03 or 1.04.')
headerString = '%pogo-inp{}'.format(version)
for c1 in range(0, len(headerString)):
self.header[c1] = headerString[c1]
### Precision
if precision not in [4,8]:
raise ValueError('Precision must be 4 or 8.')
self.precision = np.array([precision,],dtype='int32')
self.nDims = np.array([nDims,],dtype='int32')
### Number of degrees of freedom per node
if nDofPerNode == None:
nDofPerNode = self.nDims
if nDofPerNode not in [1,2,3]:
raise ValueError('Number of degrees of freedom must be 1, 2 or 3')
self.nDofPerNode = np.array([nDofPerNode,],dtype='int32')
### Set notes
self.notes = np.array(['']*1024, dtype='str')
if notes != None:
if len(notes) > 1024:
notes = notes[:1024]
for character in range(len(notes)):
self.notes[character] = notes[character]
### Set runname
self.runName = np.array(['']*80, dtype='str')
if len(runName) > 80:
runName = runName[:80]
for character in range(0, len(runName)):
self.runName[character] = runName[character]
### Set time step and run time
self.nt = np.array([nt,],dtype='int32')
self.dt = np.array([dt,],dtype=self.getPrecString())
### Node generation if necessary
if not np.any(nodes) and not geometryFile:
raise ValueError('Either a poly file or node/element definitions are required')
elif geometryFile and targetMeshSize and not np.any(elements) and not np.any(nodes):
if geometryFile.split('.')[-1] == 'dxf':
print 'Creating poly file from {}'.format(geometryFile)
poly.poly(geometryFile,elementSize = targetMeshSize,writeFile=True)
if geometryFile.split('.')[-1] == 'poly':
geometryFile = geometryFile[:-5]
if self.nDims == 2:
targetMeshArea = targetMeshSize*targetMeshSize
subprocess.call('triangle -q -j -a{:.12}F {}.poly'.format(targetMeshArea,geometryFile))
elif self.nDims == 3:
targetMeshVolume = targetMeshSize*targetMeshSize*targetMeshSize
### Add cwd
subprocess.call('tetgen {:.12}F {}.poly'.format(targetMeshVolume,geometryFile))
nodes = pF.loadNodeFile(geometryFile+'.1.node')
elements = pF.loadElementFile(geometryFile+'.1.ele')
### Number of nodes and node positions
if np.shape(nodes)[0] != nDims:
raise ValueError('nodes must be in shape (nDims, nNodes).')
self.nNodes = np.array([np.shape(nodes)[1],],dtype = 'int32')
self.nodes = nodes.astype(self.getPrecString()).T
### Number of elements and nodes per element
self.nElements = np.array([np.shape(elements)[1],],dtype='int32')
self.nNodesPerElement = np.array([np.shape(elements)[0],],dtype='int32')
### Element type refs
if elementTypeRefs == None:
elementTypeRefs = np.zeros(self.nElements)
if len(elementTypeRefs) != self.nElements:
raise ValueError('elementTypeRefs must be of length nElements.')
#if min(elementTypeRefs) != 0:
# raise ValueError('elementTypeRefs must be 1 indexed.')
self.elementTypeRefs = elementTypeRefs.astype('int32')# - 1
### Material type refs
if materialTypeRefs == None:
materialTypeRefs = np.zeros(self.nElements)
if len(materialTypeRefs) != self.nElements:
raise ValueError('materialTypeRefs must be of length nElements.')
#if min(materialTypeRefs) != 1:
# raise ValueError('materialTypeRefs must be 1 indexed.')
self.materialTypeRefs = materialTypeRefs.astype('int32') #- 1
### Element orientations
if orientationRefs == None:
orientationRefs = np.zeros(self.nElements,dtype = 'int32')
if len(orientationRefs)!= self.nElements:
raise ValueError('orientationRefs must be of length nElements.')
if min(elementTypeRefs) < 0: #unused values are set to 0 so -1 in zero indexing
raise ValueError('orientationRefs must be 1 indexed.')
self.orientationRefs = orientationRefs.astype('int32')# - 1
### Elements
if np.max(elements) > self.nNodes:
raise ValueError('elements points to nodes which are greater than nNodes.')
if np.min(elements) < 0:
raise ValueError('elements must be 1 indexed.')
self.elements = elements.astype('int32') - 1 #convert to zero indexing
self.elements = self.elements.T
### PML sets
self.nPmlSets = np.array([0,],dtype = 'int32')
self.pmlParams = np.array([0,],dtype = 'int32')
### Element types
self.nElementTypes = np.array([len(elementTypes),],dtype = 'int32')
if elementParameters == None:
elementParameters = np.array([0,]*len(elementTypes), dtype = 'int32')
if np.max(self.elementTypeRefs) > self.nElementTypes - 1:
raise ValueError('elementTypeRefs points to element types greater than the number of types of element.')
self.elementTypes = []
for ii,elementType in enumerate(elementTypes):
self.elementTypes.append(ElementType(elementType,elementParameters[ii],self.getPrecString()))
### Material types
self.nMaterials = np.array([len(materials),], dtype = 'int32')
self.materials = []
for material in materials:
self.materials.append(Material(material,self.getPrecString()))
### Orientations
if orientations == None:
self.nOr = np.array([0,],dtype ='int32')
self.orientations = None
else:
self.orientations = []
self.nOr = np.array([len(orientations),],dtype = 'int32')
for orientation in orientations:
self.orientations.append(Orientation(orientation,self.getPrecString()))
### Boundary conditions
if boundaryConditions == None:
self.nFixDof = np.array([0,],dtype ='int32')
self.boundaryConditions = None
else:
nSets = len(boundaryConditions) / 2
self.nFixDof = np.array([sum([len(boundaryConditions[c1*2]) for c1 in range(nSets)]),],dtype = 'int32')
self.
|
mdakin/engine
|
build/android/gyp/generate_v14_compatible_resources.py
|
Python
|
bsd-3-clause
| 11,922
| 0.008136
|
#!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Convert Android xml resources to API 14 compatible.
There are two reasons that we cannot just use API 17 attributes,
so we are generating another set of resources by this script.
1. paddingStart attribute can cause a crash on Galaxy Tab 2.
2. There is a bug that paddingStart does not override paddingLeft on
JB-MR1. This is fixed on JB-MR2. b/8654490
Therefore, this resource generation script can be removed when
we drop the support for JB-MR1.
Please refer to http://crbug.com/235118 for the details.
"""
import optparse
import os
import re
import shutil
import sys
import xml.dom.minidom as minidom
from util import build_utils
# Note that we are assuming 'android:' is an alias of
# the namespace 'http://schemas.android.com/apk/res/android'.
GRAVITY_ATTRIBUTES = ('android:gravity', 'android:layout_gravity')
# Almost all the attributes that has "Start" or "End" in
# its name should be mapped.
ATTRIBUTES_TO_MAP = {'paddingStart' : 'paddingLeft',
'drawableStart' : 'drawableLeft',
'layout_align
|
Start' : 'layout_alignLeft',
'layout_marginStart' : 'layout_marginLeft',
|
'layout_alignParentStart' : 'layout_alignParentLeft',
'layout_toStartOf' : 'layout_toLeftOf',
'paddingEnd' : 'paddingRight',
'drawableEnd' : 'drawableRight',
'layout_alignEnd' : 'layout_alignRight',
'layout_marginEnd' : 'layout_marginRight',
'layout_alignParentEnd' : 'layout_alignParentRight',
'layout_toEndOf' : 'layout_toRightOf'}
ATTRIBUTES_TO_MAP = dict(['android:' + k, 'android:' + v] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
ATTRIBUTES_TO_MAP_REVERSED = dict([v, k] for k, v
in ATTRIBUTES_TO_MAP.iteritems())
def IterateXmlElements(node):
"""minidom helper function that iterates all the element nodes.
Iteration order is pre-order depth-first."""
if node.nodeType == node.ELEMENT_NODE:
yield node
for child_node in node.childNodes:
for child_node_element in IterateXmlElements(child_node):
yield child_node_element
def ParseAndReportErrors(filename):
try:
return minidom.parse(filename)
except Exception:
import traceback
traceback.print_exc()
sys.stderr.write('Failed to parse XML file: %s\n' % filename)
sys.exit(1)
def AssertNotDeprecatedAttribute(name, value, filename):
"""Raises an exception if the given attribute is deprecated."""
msg = None
if name in ATTRIBUTES_TO_MAP_REVERSED:
msg = '{0} should use {1} instead of {2}'.format(filename,
ATTRIBUTES_TO_MAP_REVERSED[name], name)
elif name in GRAVITY_ATTRIBUTES and ('left' in value or 'right' in value):
msg = '{0} should use start/end instead of left/right for {1}'.format(
filename, name)
if msg:
msg += ('\nFor background, see: http://android-developers.blogspot.com/'
'2013/03/native-rtl-support-in-android-42.html\n'
'If you have a legitimate need for this attribute, discuss with '
'kkimlabs@chromium.org or newt@chromium.org')
raise Exception(msg)
def WriteDomToFile(dom, filename):
"""Write the given dom to filename."""
build_utils.MakeDirectory(os.path.dirname(filename))
with open(filename, 'w') as f:
dom.writexml(f, '', ' ', '\n', encoding='utf-8')
def HasStyleResource(dom):
"""Return True if the dom is a style resource, False otherwise."""
root_node = IterateXmlElements(dom).next()
return bool(root_node.nodeName == 'resources' and
list(root_node.getElementsByTagName('style')))
def ErrorIfStyleResourceExistsInDir(input_dir):
"""If a style resource is in input_dir, raises an exception."""
for input_filename in build_utils.FindInDirectory(input_dir, '*.xml'):
dom = ParseAndReportErrors(input_filename)
if HasStyleResource(dom):
raise Exception('error: style file ' + input_filename +
' should be under ' + input_dir +
'-v17 directory. Please refer to '
'http://crbug.com/243952 for the details.')
def GenerateV14LayoutResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert layout resource to API 14 compatible layout resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
# Iterate all the elements' attributes to find attributes to convert.
for element in IterateXmlElements(dom):
for name, value in list(element.attributes.items()):
# Convert any API 17 Start/End attributes to Left/Right attributes.
# For example, from paddingStart="10dp" to paddingLeft="10dp"
# Note: gravity attributes are not necessary to convert because
# start/end values are backward-compatible. Explained at
# https://plus.sandbox.google.com/+RomanNurik/posts/huuJd8iVVXY?e=Showroom
if name in ATTRIBUTES_TO_MAP:
element.setAttribute(ATTRIBUTES_TO_MAP[name], value)
del element.attributes[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14StyleResourceDom(dom, filename, assert_not_deprecated=True):
"""Convert style resource to API 14 compatible style resource.
Args:
dom: Parsed minidom object to be modified.
filename: Filename that the DOM was parsed from.
assert_not_deprecated: Whether deprecated attributes (e.g. paddingLeft) will
cause an exception to be thrown.
Returns:
True if dom is modified, False otherwise.
"""
is_modified = False
for style_element in dom.getElementsByTagName('style'):
for item_element in style_element.getElementsByTagName('item'):
name = item_element.attributes['name'].value
value = item_element.childNodes[0].nodeValue
if name in ATTRIBUTES_TO_MAP:
item_element.attributes['name'].value = ATTRIBUTES_TO_MAP[name]
is_modified = True
elif assert_not_deprecated:
AssertNotDeprecatedAttribute(name, value, filename)
return is_modified
def GenerateV14LayoutResource(input_filename, output_v14_filename,
output_v17_filename):
"""Convert API 17 layout resource to API 14 compatible layout resource.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
If the generated resource is identical to the original resource,
don't do anything. If not, write the generated resource to
output_v14_filename, and copy the original resource to output_v17_filename.
"""
dom = ParseAndReportErrors(input_filename)
is_modified = GenerateV14LayoutResourceDom(dom, input_filename)
if is_modified:
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
# Copy the original resource.
build_utils.MakeDirectory(os.path.dirname(output_v17_filename))
shutil.copy2(input_filename, output_v17_filename)
def GenerateV14StyleResource(input_filename, output_v14_filename):
"""Convert API 17 style resources to API 14 compatible style resource.
Write the generated style resource to output_v14_filename.
It's mostly a simple replacement, s/Start/Left s/End/Right,
on the attribute names.
"""
dom = ParseAndReportErrors(input_filename)
GenerateV14StyleResourceDom(dom, input_filename)
# Write the generated resource.
WriteDomToFile(dom, output_v14_filename)
def GenerateV14LayoutResourcesInDir(input_dir, output_v14_dir, output_v17_dir):
"""Convert layout resources to API 14 compatible resources in input_dir."""
for input_filename in build_u
|
wandec/grr
|
gui/views.py
|
Python
|
apache-2.0
| 9,063
| 0.00982
|
#!/usr/bin/env python
"""Main Django renderer."""
import importlib
import os
import pdb
import time
from django import http
from django import shortcuts
from django import template
from django.views.decorators import csrf
import psutil
import logging
from grr import gui
from grr.gui import api_call_renderers
from grr.gui import renderers
from grr.gui import urls
from grr.gui import webauth
from grr.lib import access_control
from grr.lib import aff4
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import registry
from grr.lib import stats
DOCUMENT_ROOT = os.path.join(os.path.dirname(gui.__file__), "static")
class ViewsInit(registry.InitHook):
pre = ["StatsInit"]
def RunOnce(self):
"""Run this once on init."""
# Renderer-aware metrics
stats.STATS.RegisterEventMetric(
"ui_renderer_latency", fields=[("renderer", str)])
stats.STATS.RegisterEventMetric(
"ui_renderer_response_size", fields=[("renderer", str)],
units=stats.MetricUnits.BYTES)
stats.STATS.RegisterCounterMetric(
"ui_renderer_failure", fields=[("renderer", str)])
# General metrics
stats.STATS.RegisterCounterMetric("ui_unknown_renderer")
stats.STATS.RegisterCounterMetric("http_access_denied")
stats.STATS.RegisterCounterMetric("http_server_error")
@webauth.SecurityCheck
@csrf.ensure_csrf_cookie # Set the csrf cookie on the homepage.
def Homepage(request):
"""Basic handler to render the index page."""
# We build a list of all js files to include by looking at the list
# of renderers modules. JS files are always named in accordance with
# renderers modules names. I.e. if there's a renderers package called
# grr.gui.plugins.acl_manager, we expect a js files called acl_manager.js.
renderers_js_files = set()
for cls in renderers.Renderer.classes.values():
if aff4.issubclass(cls, renderers.Renderer) and cls.__module__:
module_components = cls.__module__.split(".")
# Only include files corresponding to renderers in "plugins" package.
if module_components[-2] == "plugins":
renderers_js_files.add(module_components[-1] + ".js")
create_time = psutil.Process(os.getpid()).create_time()
context = {"page_title": config_lib.CONFIG["AdminUI.page_title"],
"heading": config_lib.CONFIG["AdminUI.heading"],
"report_url": config_lib.CONFIG["AdminUI.report_url"],
"help_url": config_lib.CONFIG["AdminUI.help_url"],
"use_precompiled_js": config_lib.CONFIG[
"AdminUI.use_precompiled_js"],
"renderers_js": renderers_js_files,
"timestamp": create_time}
return shortcuts.render_to_response(
"base.html", context, context_instance=template.RequestContext(request))
@webauth.SecurityCheck
def RenderBinaryDownload(request):
"""Basic handler to allow downloads of aff4:/config/executables files."""
path, filename = request.path.split("/", 2)[-1].rsplit("/", 1)
if not path or not filename:
return AccessDenied("Error: Invalid path.")
request.REQ = request.REQUEST
def Generator():
with aff4.FACTORY.Open(aff4_path, aff4_type="GRRSignedBlob",
token=BuildToken(request, 60)) as fd:
while True:
data = fd.Read(1000000)
if not data: break
yield data
base_path = rdfvalue.RDFURN("aff4:/config/executables")
aff4_path = base_path.Add(path).Add(filename)
if not aff4_path.RelativeName(base_path):
# Check for path traversals.
return AccessDenied("Error: Invalid path.")
filename = aff4_path.Basename()
response = http.HttpResponse(content=Generator(),
content_type="binary/octet-stream")
response["Content-Disposition"] = ("attachment; filename=%s" % filename)
return response
@webauth.SecurityCheck
@renderers.ErrorHandler()
def RenderApi(request):
"""Handler for the /api/ requests."""
return api_call_renderers.RenderHttpResponse(request)
@webauth.SecurityCheck
@renderers.ErrorHandler()
def RenderGenericRenderer(request):
"""Django handler for rendering registered GUI Elements."""
try:
action, renderer_name = request.path.split("/")[-2:]
renderer_cls = renderers.Renderer.GetPlugin(name=renderer_name)
except KeyError:
stats.STATS.IncrementCounter("ui_unknown_renderer")
return AccessDenied("Error: Renderer %s not found" % renderer_name)
# Check that the action is valid
["Layout", "RenderAjax", "Download", "Validate"].index(action)
renderer = renderer_cls()
result = http.HttpResponse(content_type="text/html")
# Pass the request only from POST parameters. It is much more convenient to
# deal with normal dicts than Django's Query objects so we convert here.
if flags.FLAGS.debug:
# Allow both POST and GET for debugging
request.REQ = request.POST.dict()
request.REQ.update(request.GET.dict())
else:
# Only POST in production for CSRF protections.
request.REQ = request.POST.dict()
# Build the security token for this request
request.token = BuildToken(request, renderer.max_execution_time)
request.canary_mode = "canary_mode" in request.COOKIES
# Allow the renderer to check its own ACLs.
renderer.CheckAccess(request)
try:
# Does this renderer support this action?
method = getattr(renderer, action)
start_time = time.time()
try:
result = method(request, result) or result
finally:
total_time = time.time() - start_time
stats.STATS.RecordEvent("ui_renderer_latency",
total_time, fields=[renderer_name])
except access_control.UnauthorizedAccess, e:
result = http.HttpResponse(content_type="text/html")
result = renderers.Renderer.GetPlugin("UnauthorizedRenderer")().Layout(
request, result, exception=e)
except Exception:
stats.STATS.IncrementCounter("ui_renderer_failure",
fields=[renderer_name])
if flags.FLAGS.debug:
pdb.post_mortem()
raise
if not isinstance(result, http.HttpResponse):
raise RuntimeError("Renderer returned invalid response %r" % result)
return result
def RedirectToRemoteHelp(path):
"""Redirect to GitHub-hosted documentation."""
target_path = os.path.join(config_lib.CONFIG["AdminUI.github_docs_location"],
path.replace(".html", ".adoc"))
# We have to redirect via JavaScript to have access to and to preserve the
# URL hash. We don't know the hash part of the url on the server.
response = http.HttpResponse()
response.write("""
<script>
var friendly_hash = window.location.hash.replace('#_', '#').replace(/_/g, '-');
window.location = '%s' + friendly_hash;
</script>
""" % target_path)
return response
@webauth.SecurityCheck
def RenderHelp(request, path, document_root=None, content_type=None):
"""Either serves local help files or redirects to the remote ones."""
_ = document_root
_ = content_type
request.REQ = request.REQUEST
help_path = request.path.split("/
|
", 2)[-1]
if not help_path:
return AccessDenied("Error: Invalid help path.")
try:
user_record = aff4.FACTORY.Open(
aff4.ROOT_URN.Add("users").Add(request.user), "GRRUser",
token=BuildToken(request, 60))
settings = user_record.Get(user_record.Schema.GUI_SETTINGS)
except IOError:
settings = aff4.GRRUser.SchemaCls.GUI_SETTINGS()
if settings.docs_location == settings.DocsLocation.REMOTE:
# Proxy remote documentation.
return RedirectToRemoteHe
|
lp(help_path)
else:
# Serve prebuilt docs using static handler. To do that we have
# to resolve static handler's name to an actual function object.
static_handler_components = urls.static_handler.split(".")
static_handler_module = importlib.import_module(".".join(
static_handler_components[0:-1]))
static_handler = getattr(static_handler_module,
static_handler_components[-1])
return static_handler(request, path, document_root=urls.help_root)
def BuildToken(request, execution_time):
"""Build an ACLToken from the request."""
token = access
|
bobobo80/python-crawler-test
|
web_get/webget.py
|
Python
|
mit
| 1,636
| 0.001906
|
"""
使用requests包装的页面请求
"""
import requests
from .headers import Headers
from proxy import proxy
class TimeoutException(Exception):
"""
连接超时异常
"""
pass
class ResponseException(Exception):
"""
响应异常
"""
pass
class WebRequest(object):
"""
包装requests
"""
def __init__(self):
self.headers = Headers().get()
self.proxies = proxy.get_proxy()
def get(self, url):
"""
页面请求
"""
try:
resp = requests.get(url, headers=self.headers,
proxies={'http': 'http://{}'.format(self.proxies)}, timeout=10)
return self.check_response(resp)
except Exception as e:
self.network_error(e)
def post(self, url, payload):
"""
|
页面post
"""
try:
resp = requests.post(url, data=payload, headers=self.headers,
proxies={'http': 'http://{}'.format(self.proxies)},
|
timeout=10)
return self.check_response(resp)
except Exception as e:
self.network_error(e)
def network_error(self, e):
proxy.delete_proxy(self.proxies)
print('error: {}'.format(e))
raise TimeoutException('timeout')
def check_response(self, resp):
"""
检查响应
:param resp:
:return:
"""
if resp.status_code == 200:
return resp
else:
raise ResponseException('response status error: {}'.format(resp.status_code))
|
Sventimir/src-depend
|
depend.py
|
Python
|
apache-2.0
| 4,469
| 0.00358
|
#! /usr/bin/python
"""Src-depend is a simple tool for sketching source code dependency graphs
from source code itself. It iterates through all source code files in given
directory, finds import statements and turns them into edges of a dependency
graph.
Uses graphviz for sketching graphs."""
import argparse
import graphviz
import logging
import os.path
import re
def parseargs():
parser = argparse.ArgumentParser()
parser.add_argument('-l', '--lang', dest='lang', default='python',
help='specifies language plugin to be used (defaults to python)')
parser.add_argument('-o', '--output', dest='img_out',
help='output sketched graph to specified file (appends extension automatiaclly); source will be output to IMG_OUT')
parser.add_argument('-d', '--debug', dest='debug', action='store_true',
help='debug mode')
parser.add_argument('-f', '--output-format', dest='format', default='png',
help='specifies output image\'s format (defaults to .png')
parser.add_argument('-r', '--remove-redundant', dest='remove-redundant',
action='store_true', help='remove direct dependencies on modules that module depends on indirectly')
parser.add_argument('-e', '--exclude', dest='excludes', nargs='+', default=[],
help='a filename to ommit (multiple names possible)')
parser.add_argument('--exclude-regex', dest='exclude-regex', default=None,
help='filenames matching specified regex will be ignored')
parser.add_argument('-q', '--quiet', dest='quiet', action='store_true',
help='quiet mode')
parser.add_argument('target', help='source code directory to analyse')
return parser.parse_args().__dict__
def main(args):
log_level = logging.INFO
if args['debug']:
log_level = logging.DEBUG
elif args['quiet']:
log_level = logging.ERROR
logging.basicConfig(
level=log_level,
format='[%(asctime)s; %(levelname)s]: %(message)s'
)
is_excluded = exclude_checker(args['excludes'], args['exclude-regex'])
try:
import_obj = __import__('plugins.{}'.format(args['lang'])
|
)
plugin = getattr(import_obj, args['lang'])
except ImportError:
logging.error('Could not find plugin for {}!'.format(args['lang']))
return 1
files = find_source_files(args['target'], plugin.Module.filename_ext, is_excluded)
for f in files:
with open(f, 'r') as file:
plugin.M
|
odule(file, args['target'])
plugin.Module.create_dependency_tree()
if args['remove-redundant']:
plugin.Module.remove_redundant_dependencies()
graph = make_graph(*plugin.Module.registry)
graph.format = args['format']
if not args['img_out'] is None:
output = graph.render(args['img_out'])
logging.info('Writing graph image to {}...'.format(output))
def make_graph(*modules):
graph = graphviz.Digraph()
for module in modules:
graph.node(module.filename, module.name, module.attributes)
logging.debug('Creating node {}...'.format(module.name))
for dep in module.dependencies:
if not dep is None:
logging.debug('Creating dependency of {} on {}'.format(
module.name, dep.name
))
graph.edge(module.filename, dep.filename)
return graph
def find_source_files(path, ext, is_excluded):
basename = os.path.basename(path)
if is_excluded(basename):
logging.debug('Ommitting excluded path: {}...'.format(path))
elif not basename == '.' and basename.startswith('.'):
logging.debug('Ommitting hidden path: {}...'.format(path))
elif os.path.isfile(path) and path.endswith(ext):
logging.info('{} recoginzed as source file.'.format(path))
yield path
elif os.path.isdir(path):
logging.debug('In dir "{}": {}'.format(path, os.listdir(path)))
for f in os.listdir(path):
for el in find_source_files(os.path.join(path, f), ext, is_excluded):
yield el
else:
logging.debug('{} is not a source file.'.format(path))
def exclude_checker(excluded, regex):
if regex is None:
return lambda filename: filename in excluded
else:
compiled_regex = re.compile(regex)
return lambda filename:filename in excluded \
or compiled_regex.match(filename)
if __name__ == '__main__':
exit(main(parseargs()))
|
davelab6/pyfontaine
|
fontaine/charsets/noto_glyphs/notosanssylotinagri_regular.py
|
Python
|
gpl-3.0
| 3,639
| 0.023633
|
# -*- coding: utf-8 -*-
class Charset(object):
common_name = 'NotoSansSylotiNagri-Regular'
native_name = ''
def glyphs(self):
glyphs = []
glyphs.append(0x0039) #glyph00057
glyphs.append(0x0034) #uniA82A
glyphs.append(0x0035) #uniA82B
glyphs.append(0x0036) #glyph00054
glyphs.append(0x0040) #glyph00064
glyphs.append(0x0053) #uni09EE
glyphs.append(0x0038) #glyph00056
glyphs.append(0x0015) #uniA80B
glyphs.append(0x0016) #uniA80C
glyphs.append(0x003D) #glyph00061
glyphs.append(0x0014) #uniA80A
glyphs.append(0x0019) #uniA80F
glyphs.append(0x0037) #glyph00055
glyphs.append(0x0017) #uniA80D
glyphs.append(0x0018) #uniA80E
glyphs.append(0x0032) #uniA828
glyphs.append(0x0001) #uniFEFF
glyphs.append(0x004D) #uni09E8
glyphs.append(0x0054) #uni09EF
glyphs.append(0x0048) #uni2055
glyphs.append(0x0050) #uni09EB
glyphs.append(0x0002) #uni000D
glyphs.append(0x0051) #uni09EC
glyphs.append(0x0052) #uni09ED
glyphs.append(0x002C) #uniA822
glyphs.append(0x0049) #uni0964
glyphs.append(0x004A) #uni0965
glyphs.append(0x003E) #glyph00062
glyphs.append(0x0042) #glyph00066
glyphs.append(0x002D) #uniA823
glyphs.append(0x0023) #uniA819
glyphs.append(0x0022) #uniA818
glyphs.append(0x0033) #uniA829
glyphs.append(0x0043) #glyph00067
glyphs.append(0x001F)
|
#uniA815
glyphs.append(0x001E) #uniA814
glyphs.append(0x0021) #uniA817
glyphs.append(0x0020) #uniA816
glyphs.append(0x001B) #uniA811
|
glyphs.append(0x001A) #uniA810
glyphs.append(0x001D) #uniA813
glyphs.append(0x001C) #uniA812
glyphs.append(0x0047) #glyph00071
glyphs.append(0x0041) #glyph00065
glyphs.append(0x004C) #uni09E7
glyphs.append(0x0044) #glyph00068
glyphs.append(0x0045) #glyph00069
glyphs.append(0x0028) #uniA81E
glyphs.append(0x0027) #uniA81D
glyphs.append(0x0003) #uni00A0
glyphs.append(0x0029) #uniA81F
glyphs.append(0x0024) #uniA81A
glyphs.append(0x003F) #glyph00063
glyphs.append(0x0026) #uniA81C
glyphs.append(0x0025) #uniA81B
glyphs.append(0x0005) #uni200C
glyphs.append(0x0004) #uni200B
glyphs.append(0x003B) #glyph00059
glyphs.append(0x0006) #uni200D
glyphs.append(0x003A) #glyph00058
glyphs.append(0x004E) #uni09E9
glyphs.append(0x002F) #uniA825
glyphs.append(0x0007) #uni2010
glyphs.append(0x0008) #uni2011
glyphs.append(0x004B) #uni09E6
glyphs.append(0x0009) #uni25CC
glyphs.append(0x004F) #uni09EA
glyphs.append(0x003C) #glyph00060
glyphs.append(0x0046) #glyph00070
glyphs.append(0x002A) #uniA820
glyphs.append(0x002B) #uniA821
glyphs.append(0x0012) #uniA808
glyphs.append(0x0013) #uniA809
glyphs.append(0x002E) #uniA824
glyphs.append(0x0000) #.notdef
glyphs.append(0x0030) #uniA826
glyphs.append(0x0031) #uniA827
glyphs.append(0x000C) #uniA802
glyphs.append(0x000D) #uniA803
glyphs.append(0x000A) #uniA800
glyphs.append(0x000B) #uniA801
glyphs.append(0x0010) #uniA806
glyphs.append(0x0011) #uniA807
glyphs.append(0x000E) #uniA804
glyphs.append(0x000F) #uniA805
return glyphs
|
Mappy/pycnikr
|
tests/test_pycnik.py
|
Python
|
lgpl-3.0
| 660
| 0.001515
|
"""
This test illustrate how to generate an XML Mapnik style sheet from a pycnik
style sheet written in Python.
"""
import os
from pycnik import pycnik
import artefact
actual_xml_style_sheet = 'artefacts/style_sheet.xml'
expected_xml_style_sheet = 'style_sheet.xml'
class TestPycnik(artefact.TestCaseWithArtefacts):
def test_pycnik(self):
python_style_sheet =
|
pycnik.import_
|
style('style_sheet.py')
pycnik.translate(python_style_sheet, actual_xml_style_sheet)
with open(actual_xml_style_sheet) as actual, \
open(expected_xml_style_sheet) as expected:
self.assertEquals(actual.read(), expected.read())
|
codeback/openerp-cbk_sale_commission_filter
|
__openerp__.py
|
Python
|
agpl-3.0
| 1,616
| 0.004337
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# cbk_crm_information: CRM Information Tab
# Copyright (c) 2013 Codeback Software S.L. (http://codeback.es)
# @author: Miguel García <miguel@codeback.es>
# @author: Javier Fuentes <javier@codeback.es>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Softwar
|
e Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# alo
|
ng with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Sale Commission Filter',
'version': '0.1',
'author': 'Codeback Software',
'summary': '',
'description' : 'Añade campos para que los filtros funcionen correctamente',
'website': 'http://codeback.es',
'images': [],
'depends': ['sale', 'account'],
'category': '',
'sequence': 26,
'demo': [],
'data': ['sale_view.xml', 'account_invoice_view.xml'],
'test': [],
'installable': True,
'application': False,
'auto_install': False,
'css': [],
}
|
anish/buildbot
|
master/buildbot/test/unit/test_steps_package_rpm_rpmbuild.py
|
Python
|
gpl-2.0
| 5,421
| 0.001476
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from collections import OrderedDict
from twisted.internet import defer
from twisted.trial import unittest
from buildbot import config
from buildbot.process.properties import Interpolate
from buildbot.process.results import SUCCESS
from buildbot.steps.package.rpm import rpmbuild
from buildbot.test.fake.remotecommand import ExpectShell
from buildbot.test.util import steps
from buildbot.test.util.misc import TestReactorMixin
class RpmBuild(steps.BuildStepMixin, TestReactorMixin, unittest.TestCase):
def setUp(self):
self.setUpTestReactor()
return self.setUpBuildStep()
def tearDown(self):
return self.tearDownBuildStep()
def test_no_specfile(self):
with self.assertRaises(config.ConfigErrors):
rpmbuild.RpmBuild()
def test_success(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", dist=".el5"))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define "_specdir '
'`pwd`" --define "_srcrpmdir `pwd`" --define "dist .el5" '
'-ba foo.spec')
+ ExpectShell.log('stdio',
stdout='lalala')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
return self.runStep()
def test_autoRelease(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", autoRelease=True))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir `pwd`" '
'--define "_sourcedir `pwd`" --define "_specdir `pwd`" '
'--define "_srcrpmdir `pwd`" --define "_release 0" '
'--define "dist .el6" -ba foo.spec')
+ ExpectShell.log('stdio',
stdout='Your code has been rated at 10/10')
+ 0)
self.expectOutcome(result=SUCCESS,
|
state_string='RPMBUILD')
return self.runStep()
def test_define(self):
defines = [("a", "1"), ("b", "2")]
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec",
define=OrderedDict(defines)))
|
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define '
'"_specdir `pwd`" --define "_srcrpmdir `pwd`" '
'--define "a 1" --define "b 2" --define "dist .el6" '
'-ba foo.spec')
+ ExpectShell.log('stdio',
stdout='Your code has been rated at 10/10')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
return self.runStep()
def test_define_none(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec", define=None))
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define '
'"_specdir `pwd`" --define "_srcrpmdir `pwd`" '
'--define "dist .el6" -ba foo.spec')
+ ExpectShell.log('stdio',
stdout='Your code has been rated at 10/10')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
return self.runStep()
@defer.inlineCallbacks
def test_renderable_dist(self):
self.setupStep(rpmbuild.RpmBuild(specfile="foo.spec",
dist=Interpolate('%(prop:renderable_dist)s')))
self.properties.setProperty('renderable_dist', '.el7', 'test')
self.expectCommands(
ExpectShell(workdir='wkdir', command='rpmbuild --define "_topdir '
'`pwd`" --define "_builddir `pwd`" --define "_rpmdir '
'`pwd`" --define "_sourcedir `pwd`" --define "_specdir '
'`pwd`" --define "_srcrpmdir `pwd`" --define "dist .el7" '
'-ba foo.spec')
+ ExpectShell.log('stdio',
stdout='lalala')
+ 0)
self.expectOutcome(result=SUCCESS, state_string='RPMBUILD')
yield self.runStep()
|
wscullin/spack
|
var/spack/repos/builtin/packages/py-markupsafe/package.py
|
Python
|
lgpl-2.1
| 2,130
| 0.000939
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
|
MA 02111-1307 USA
##############################################################################
from spack import *
class PyMarkupsafe(PythonPackage):
"""MarkupSafe is a library for Python that implements a unicode
string that is aware of HTML escaping rules and can be used to
implement automatic string escaping. It is used by Jinja 2, the
Mako templating engine
|
, the Pylons web framework and many more."""
homepage = "http://www.pocoo.org/projects/markupsafe/"
url = "https://pypi.io/packages/source/M/MarkupSafe/MarkupSafe-1.0.tar.gz"
import_modules = ['markupsafe']
version('1.0', '2fcedc9284d50e577b5192e8e3578355')
version('0.23', 'f5ab3deee4c37cd6a922fb81e730da6e')
version('0.22', 'cb3ec29fd5361add24cfd0c6e2953b3e')
version('0.21', 'fde838d9337fa51744283f46a1db2e74')
version('0.20', '7da066d9cb191a70aa85d0a3d43565d1')
version('0.19', 'ccb3f746c807c5500850987006854a6d')
depends_on('py-setuptools', type='build')
|
vicnet/weboob
|
modules/bp/pages/subscription.py
|
Python
|
lgpl-3.0
| 6,761
| 0.003108
|
# -*- coding: utf-8 -*-
# Copyright(C) 2010-2018 Célande Adrien
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import re
from weboob.capabilities.bill import DocumentTypes, Subscription, Document
from weboob.browser.pages import LoggedPage, HTMLPage
from weboob.browser.filters.standard import CleanText, Regexp, Env, Date, Format, Field
from weboob.browser.filters.html import Link, Attr, TableCell
from weboob.browser.elements import ListElement, ItemElement, method, TableElement
class SubscriptionPage(LoggedPage, HTMLPage):
# because of freaking JS from hell
STATEMENT_TYPES = ('RCE', 'RPT', 'RCO')
@method
class iter_subscriptions(ListElement):
item_xpath = '//select[@id="compte"]/option'
class item(ItemElement):
klass = Subscription
obj_id = Regexp(Attr('.', 'value'), r'\w-(\w+)')
obj_label = CleanText('.')
obj_subscriber = Env('subscriber')
@method
class iter_documents(ListElement):
def condition(self):
return not (
CleanText('//p[contains(text(), "est actuellement indisponible")]')(self)
or CleanText('//p[contains(text(), "Aucun e-Relevé n\'est disponible")]')(self)
)
item_xpath = '//ul[contains(@class, "liste-cpte")]/li'
# you can have twice the same statement: same month, same subscription
ignore_duplicate = True
class item(ItemElement):
klass = Document
obj_id = Format('%s_%s%s', Env('sub_id'), Regexp(CleanText('.//a/@title'), r' (\d{2}) '), CleanText('.//span[contains(@class, "date")]' ,symbols='/'))
obj_label = Format('%s - %s', CleanText('.//span[contains(@class, "lib")]'), CleanText('.//span[contains(@class, "date")]'))
obj_url = Format('/voscomptes/canalXHTML/relevePdf/relevePdf_historique/%s', Link('./a'))
obj_format = 'pdf'
obj_type = DocumentTypes.OTHER
def obj_date(self):
date = CleanText('.//span[contains(@class, "date")]')(self)
m = re.search(r'(\d{2}/\d{2}/\d{4})', date)
if m:
return Date(CleanText('.//span[contains(@class, "date")]'), dayfirst=True)(self)
else:
return Date(
Format(
'%s/%s',
Regexp(CleanText('.//a/@title'), r' (\d{2}) '),
CleanText('.//span[contains(@class, "date")]')
),
dayfirst=True
)(self)
def get_params(self, sub_label):
# the id is in the label
sub_value = Attr('//select[@id="compte"]/option[contains(text(), "%s")]' % sub_label, 'value')(self.doc)
form = self.get_form(name='formulaireHistorique')
form['formulaire.numeroCompteRecherche'] = sub_value
return form
def get_years(self):
return self.doc.xpath('//select[@id="annee"]/option/@value')
def has_error(self):
return (
CleanText('//p[contains(text(), "est actuellement indisponible")]')(self.doc)
or CleanText('//p[contains(text(), "Aucun e-Relevé n\'est disponible")]')(self.doc)
)
class DownloadPage(LoggedPage, HTMLPage):
def get_content(self):
if self.doc.xpath('//iframe'):
# the url has the form
# ../relevePdf_telechargement/affichagePDF-telechargementPDF.ea?date=XXX
part_link = Attr('//iframe', 'src')(self.doc).replace('..', '')
return self.browser.open('/voscomptes/canalXHTML/relevePdf%s' % part_link).content
return self.content
class ProSubscriptionPage(LoggedPage, HTMLPage):
@method
class iter_subscriptions(ListElement):
item_xpath = '//select[@id="numeroCompteRechercher"]/option[not(@disabled)]'
class item(ItemElement):
klass = Subscription
obj_label = CleanText('.')
obj_id = Regexp(Field('label'), r'\w? ?- (\w+)')
obj_subscriber = Env('subscriber')
obj__number = Attr('.', 'value')
@method
class iter_documents(TableElement):
item_xpath = '//table[@id="relevesPDF"]//tr[td]'
head_xpath = '//table[@id="relevesPDF"]//th'
# may have twice the same statement for a given month
ignore_duplicate = True
col_date = re.compile('Date du relevé')
col_label = re.compile('Type de document')
class item(ItemElement):
klass = Document
obj_date = Date(CleanText(TableCell('date')), dayfirst=True)
obj_label = Format('%s %s', CleanText(TableCell('label')), CleanText(TableCell('date')))
obj_id = Format('%s_%s', Env('sub_id'), CleanText(TableCell('date'), symbols='/'))
# the url uses an id depending on the page where the document is
# by example, if the id is 0,
# it means that it is the first document that you can find
# on the page of the year XXX for the subscription YYYY
obj_url = Link('.//a')
obj_format = 'pdf'
obj_type = DocumentTypes.OTHER
def submit_form(self, sub_number, year):
form = self.get_form(name='formRechHisto')
form['historiqueReleveParametre.numeroCompteRecherche'] = sub_number
form['typeRecherche'] = 'annee'
form['anneeRechercheDefaut'] = year
form.submit()
def get_years(self):
return self.doc.xpath('//select[@name="anneeRechercheDefaut"]/option/@value')
def no_statement(self):
return self.doc.xpath('//p[has-class("noresult")]')
def has_document(self, date):
return self.doc.xpath('//td[@headers="dateReleve" and contains(text(), "%s")]' % date.strftime('%d/%m/%Y'))
def get_sub_number(self, doc_id):
sub_id = doc_i
|
d.split('_')[0]
return Attr('//select[@id="numeroCompteRechercher"]/option[contains(text(), "%s")]' % sub_id, 'value'
|
)(self.doc)
|
kevink1986/my-first-blog
|
handlers/signup.py
|
Python
|
apache-2.0
| 1,621
| 0
|
from base import BaseHandler
from functions import *
from models import User
class SignupHandler(BaseHandler):
"""Sign up handler that is used to signup users."""
def get(self):
self.render("signup.html")
def post(self):
error = False
self.username = self.request.get("username")
self.password = self.request.get("password")
self.password_check = self.request.get("password_check")
self.email = self.request.get("email")
template_vars = dict(username=self.username,
email=self.email)
if not valid_username(self.username):
template_vars['error_username'] = "That's not a valid username."
error = True
elif User.by_username(self.username):
template_vars['error_username'] = "This username already exists."
error = True
if not valid_password(self.password):
template_vars['error_password'] = "That wasn't a valid password."
error = True
elif self.pa
|
ssword != self.password_check:
template_vars['error_check'] = "Your passwords didn't match."
error = True
if not valid_email(self.email):
|
template_vars['error_email'] = "That's not a valid email."
error = True
if error:
self.render('signup.html', **template_vars)
else:
u = User.register(self.username,
self.password,
self.email)
u.put()
self.login(u)
self.redirect('/?')
|
chrsrds/scikit-learn
|
sklearn/neighbors/__init__.py
|
Python
|
bsd-3-clause
| 1,176
| 0
|
"""
The :mod:`sklearn.neighbors` module implements the k-nearest neighbors
algorithm.
"""
from .ball_tree import BallTree
from .kd_tree import KDTree
from .dist_metrics import DistanceMetric
from .graph import kneighbors_graph, radius_neighbors_graph
from .unsupervised import NearestNeighbors
from .classification import KNeighborsClassifier, RadiusNeighborsClassifier
from .regression import KNeighborsRegressor, RadiusNeighborsRegressor
from .nearest_centroid import NearestCentroid
from .kde import KernelDensity
from .lof import LocalOutlierFactor
from .nca import NeighborhoodComponentsAnalysis
from .base import VALID_METRICS, VALID_METRICS_SPARSE
__all__ = ['BallTree',
'DistanceMetric',
'KDTree',
'KNeighborsClassifier',
|
'KNeighborsRegressor',
'NearestCentroid',
'NearestNeighbors',
'RadiusNeighborsClassifier',
'RadiusNeighborsRegressor',
'kneighbors_graph',
'radius_neighbors_graph',
'KernelDensity',
|
'LocalOutlierFactor',
'NeighborhoodComponentsAnalysis',
'VALID_METRICS',
'VALID_METRICS_SPARSE']
|
dhuang/incubator-airflow
|
docs/build_docs.py
|
Python
|
apache-2.0
| 19,953
| 0.002356
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import argparse
import multiprocessing
import os
import sys
from collections import defaultdict
from typing import Dict, List, NamedTuple, Optional, Tuple
from rich.console import Console
from tabulate import tabulate
from airflow.utils.helpers import partition
from docs.exts.docs_build import dev_index_generator, lint_checks
from docs.exts.docs_build.code_utils import CONSOLE_WIDTH, PROVIDER_INIT_FILE
from docs.exts.docs_build.docs_builder import DOCS_DIR, AirflowDocsBuilder, get_available_packages
from docs.exts.docs_build.errors import DocBuildError, display_errors_summary
from docs.exts.docs_build.fetch_inventories import fetch_inventories
from docs.exts.docs_build.github_action_utils import with_group
from docs.exts.docs_build.package_filter import process_package_filters
from docs.exts.docs_build.spelling_checks import SpellingError, display_spelling_error_summary
TEXT_RED = '\033[31m'
TEXT_RESET = '\033[0m'
if __name__ not in ("__main__", "__mp_main__"):
raise SystemExit(
"This file is intended to be executed as an executable program. You cannot use it as a module."
"To run this script, run the ./build_docs.py command"
)
CHANNEL_INVITATION = """\
If you need help, write to #documentation channel on Airflow's Slack.
Channel link: https://apache-airflow.slack.com/archives/CJ1LVREHX
Invitation link: https://s.apache.org/airflow-slack\
"""
ERRORS_ELIGIBLE_TO_REBUILD = [
'failed to reach any of the inventories with the following issues',
'undefined label:',
'unknown document:',
]
ON_GITHUB_ACTIONS = os.environ.get('GITHUB_ACTIONS', 'false') == "true"
console = Console(force_terminal=True, color_system="standard", width=CONSOLE_WIDTH)
def _promote_new_flags():
console.print()
console.print("[yellow]Still tired of waiting for documentation to be built?[/]")
console.print()
if ON_GITHUB_ACTIONS:
console.print("You can quickly build documentation locally with just one command.")
console.print(" [blue]./breeze build-docs[/]")
console.print()
console.print("[yellow]Still too slow?[/]")
console.print()
console.print("You can only build one documentation package:")
console.print(" [blue]./breeze build-docs -- --package-filter <PACKAGE-NAME>[/]")
console.print()
console.print("This usually takes from [yellow]20 seconds[/] to [yellow]2 minutes[/].")
console.print()
console.print("You can also use other extra flags to iterate faster:")
console.print(" [blue]--docs-only - Only build documentation[/]")
console.print(" [blue]--spellcheck-only - Only perform spellchecking[/]")
console.print()
console.print("For more info:")
console.print(" [blue]./breeze build-docs --help[/]")
console.print()
def _get_parser():
available_packages_list = " * " + "\n * ".join(get_available_packages())
parser = argparse.ArgumentParser(
description='Builds documentation and runs spell checking',
epilog=f"List of supported documentation packages:\n{available_packages_list}",
)
parser.formatter_class =
|
argparse.RawTextHelpFormatter
parser.add_argument(
'--disable-checks', dest='disable_checks', action='store_true', help='Disables extra checks'
)
parser.add_argument(
"--package-filter",
action="append",
help=(
"Filter specifying for which packages the documentation is to be built. Wildcard are supported."
),
)
parser.add_argument('--docs-only', dest='docs_only', action='store_true', help='Only build do
|
cumentation')
parser.add_argument(
'--spellcheck-only', dest='spellcheck_only', action='store_true', help='Only perform spellchecking'
)
parser.add_argument(
'--for-production',
dest='for_production',
action='store_true',
help='Builds documentation for official release i.e. all links point to stable version',
)
parser.add_argument(
"-j",
"--jobs",
dest='jobs',
type=int,
default=0,
help=(
"""\
Number of parallel processes that will be spawned to build the docs.
If passed 0, the value will be determined based on the number of CPUs.
"""
),
)
parser.add_argument(
"-v",
"--verbose",
dest='verbose',
action='store_true',
help=(
'Increases the verbosity of the script i.e. always displays a full log of '
'the build process, not just when it encounters errors'
),
)
return parser
class BuildSpecification(NamedTuple):
"""Specification of single build."""
package_name: str
for_production: bool
verbose: bool
class BuildDocsResult(NamedTuple):
"""Result of building documentation."""
package_name: str
log_file_name: str
errors: List[DocBuildError]
class SpellCheckResult(NamedTuple):
"""Result of spellcheck."""
package_name: str
log_file_name: str
errors: List[SpellingError]
def perform_docs_build_for_single_package(build_specification: BuildSpecification) -> BuildDocsResult:
"""Performs single package docs build."""
builder = AirflowDocsBuilder(
package_name=build_specification.package_name, for_production=build_specification.for_production
)
console.print(f"[blue]{build_specification.package_name:60}:[/] Building documentation")
result = BuildDocsResult(
package_name=build_specification.package_name,
errors=builder.build_sphinx_docs(
verbose=build_specification.verbose,
),
log_file_name=builder.log_build_filename,
)
return result
def perform_spell_check_for_single_package(build_specification: BuildSpecification) -> SpellCheckResult:
"""Performs single package spell check."""
builder = AirflowDocsBuilder(
package_name=build_specification.package_name, for_production=build_specification.for_production
)
console.print(f"[blue]{build_specification.package_name:60}:[/] Checking spelling started")
result = SpellCheckResult(
package_name=build_specification.package_name,
errors=builder.check_spelling(
verbose=build_specification.verbose,
),
log_file_name=builder.log_spelling_filename,
)
console.print(f"[blue]{build_specification.package_name:60}:[/] Checking spelling completed")
return result
def build_docs_for_packages(
current_packages: List[str],
docs_only: bool,
spellcheck_only: bool,
for_production: bool,
jobs: int,
verbose: bool,
) -> Tuple[Dict[str, List[DocBuildError]], Dict[str, List[SpellingError]]]:
"""Builds documentation for all packages and combines errors."""
all_build_errors: Dict[str, List[DocBuildError]] = defaultdict(list)
all_spelling_errors: Dict[str, List[SpellingError]] = defaultdict(list)
with with_group("Cleaning documentation files"):
for package_name in current_packages:
console.print(f"[blue]{package_name:60}:[/] Cleaning files")
builder = AirflowDocsBuilder(package_name=package_name, for_production=for_production)
builder.clean_files()
if jobs > 1:
run_in_parallel(
all_build_errors,
all_spelling_errors,
curr
|
rohinkumar/correlcalc
|
correlcalc/test.py
|
Python
|
mit
| 3,556
| 0.003375
|
# from fileios import *
# msg = 'Enter Absolute Path to file: '
# f_name = raw_input(msg).strip()
#
# path = file_data_and_path(f_name)
# if path != None:
# print 'Path:',path
# from Tkinter import Tk
# from tkFileDialog import askopenfilename
#
# Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing
# filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file
# print(filename)
# def get_filename(file_type):
# while True:
# print('enter ' + file_type + ' filename: ')
# filename = input()
#
|
print(filename)
# try:
# with open(filename, 'r') as f:
# my_file = f.read()
# return my_file
# except FileNotFoundError:
# print('No such file. Check file name and path and try again.')
#
#
# x = get_filename('TEMPLATE')
# print(x)
# -*- coding: utf-8 -*-
""
|
"To add test methods.
"""
# from time import sleep
# from halo import Halo
# from time import time
#
# def rocket_launch():
# #spinner = Halo({'spinner': 'shark'})
# spinner = Halo({
# 'spinner': {
# 'interval': 100,
# 'frames': ['-', '\\', '|', '/', '-']
# }
# })
# spinner.start()
# while(1):
# spinner.text = 'Running... Time Elapsed: {} seconds'.format(time())
# sleep(10)
# break
# spinner.succeed('Rocket launched')
#
# rocket_launch()
from antpcf import *
# bins = np.arange(0.01, 0.201, 0.01)
# atpcf('/Users/rohin/Downloads/DR7-Full.ascii', bins, randfile='/Users/rohin/Downloads/random-DR7-Ful.ascii',permetric='apzdth', parmetric='apdz', weights=True)
# tpcf('/Users/rohin/Downloads/DR3-ns.ascii',bins,randfile='/Users/rohin/Downloads/random-DR3-ns.ascii',weights=True)
# def pmethod():
bins = np.arange(0.002, 0.06, 0.002)
correl = tpcf('./testw.dat',bins,randfile='./testw.dat',weights=True)
# return correl
# pool = multiprocessing.Pool(processes=ncount)
# correl = pool.map(pmethod)
# print correl
# atpcf('./testw.dat',bins,randfile='./testw.dat',permetric='apzdth',parmetric='apdz',method='ls',weights=True)
# blha=readfitsfile('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_CMASS_North.fits','data')
# dr12gcmn, weights = datprep('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits','data','lcdm')
# dat = readfitsfile('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits','data')
# weights = dat['WEIGHT_SYSTOT']
# import pyfits
# dpy = pyfits.open('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits')
# dpyd = dpy[1].data
# wts = dpyd['WEIGHT_SYSTOT']
# print(wts)
# print(min(wts))
# print(max(wts))
# print(dr12gcmn)
# print(weights)
# print (min(weights))
# print(max(weights))
# dr12gls=tpcf('/Users/rohin/Documents/ipy_notebooks/galsurveystudy/input/galaxy_DR12v5_LOWZ_South.fits',bins,randfile='/Users/rohin/Downloads/random0_DR12v5_LOWZ_South.fits',weights=True)
# Planck run with changed parameters in param.py
# corrdr3milne = tpcf('/Users/rohin/Downloads/DR3-ns.ascii', bins, randfile='/Users/rohin/Downloads/random-DR3-ns.ascii', weights=True, geometry='open', cosmology='lc')
# corrdr3milne = tpcf('/Users/rohin/Downloads/DR3-ns.ascii', bins, weights=True, mask='/Users/rohin/Documents/ipy_notebooks/galsurveystudy/masks/window.dr72safe0.ply')
# corrdr3milne = tpcf('./testw.dat', bins, weights=True, mask='/Users/rohin/Documents/ipy_notebooks/galsurveystudy/masks/window.dr72safe0.ply')
|
zrax/pycdc
|
tests/input/unpack_empty.py
|
Python
|
gpl-3.0
| 45
| 0
|
[] = c
y = []
for [] in x:
BL
|
OCK
|
[] = []
|
seanherron/data-inventory
|
inventory_project/datasets/lookups.py
|
Python
|
mit
| 625
| 0.0032
|
from django.contrib.auth.models import User
from selectable.base import ModelLookup
from
|
selectable.registry import registry
class UserLookup(ModelLookup):
model = User
search_fields = (
'username__icontains',
'first_name__icontains',
'last_name__icontains',
)
filters = {'is_active': T
|
rue, }
def get_item_value(self, item):
# Display for currently selected item
return item.get_full_name()
def get_item_label(self, item):
# Display for choice listings
return u"%s (%s)" % (item.username, item.get_full_name())
registry.register(UserLookup)
|
melon-li/tools
|
netem/statdata/statdelay.py
|
Python
|
apache-2.0
| 1,581
| 0.01265
|
#!/usr/bin/python
#coding:utf-8
import os
import sys
import re
def usage():
help_info="Usage: %s <recinfo_file> <sendinfo_file>" % sys.argv[0]
print help_info
def main():
try:
recinfo_file=sys.argv[1]
sendinfo_file=sys.argv[2]
except:
usage()
sys.exit(-1)
if not os.path.exists(recinfo_file):
print "ERROR: recinfo_file does not exists!"
usage()
sys.exit(-1)
if not os.path.exists(sendinfo_file):
print "ERROR: r
|
ecinfo_file does not exists!"
usage()
sys.exit(-1)
delays = []
cnt = 0
with open(sendinfo_file, 'r') as sf:
sinfo = sf.read()
with open(recinfo_file, 'r') as rf:
rl = rf.readline()
while True:
rl = rf.readline()
if not rl: break
if re.search('#', rl): continue
rl_list = rl.split()
if rl_list[1] =
|
= '0': continue
pattern = rl_list[0] + ".*?\n"
result = re.search(pattern, sinfo)
if result:
sl = result.group()
sl_list = sl.split()
delay_time = int(rl_list[3]) - int(sl_list[3])
if delay_time == 0:
print rl_list[0]
delays.append(delay_time)
print(delays)
print "rec number:%d" % len(delays)
print "rec delay max :%d" % max(delays)
print "rec delay min :%d" % min(delays)
print "rec delay avg:%.2f" % (sum(delays)/float(len(delays)),)
if __name__ == "__main__":
sys.exit(main())
|
oudalab/phyllo
|
phyllo/extractors/gestafrancDB.py
|
Python
|
apache-2.0
| 6,972
| 0.005164
|
#http://www.thelatinlibrary.com/gestafrancorum.html
#prose
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# functions are mostly made by Sarah Otts
def add_to_database(verse_entries, db):
logger.info("Adding {} entries to the database".format(len(verse_entries)))
curs = db.cursor()
curs.execute("DELETE FROM texts WHERE author='Gesta Francorum'")
for i, v in enumerate(verse_entries):
data = curs.execute("SELECT * FROM texts")
curs.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, v["title"], v["book"], "Latin", v["author"], v["date"], v["chapter"], v["verse"],
v["text"], v["link"], "prose"))
def add_entry_to_list(entries, title, book, author, date, chapter, verse, text, txturl):
entry_dict = {"title": title, "book": book, "author": author, "date": date, "chapter": chapter, "verse": verse,
"text": text, "link": txturl}
|
entries.append(entry_dict)
def get_verses(soup):
# if there's nothing in the paragraph, return an empty array
if len(soup.contents) == 0:
return None
para_text = soup.get_text()
verses = re.split('\[?[0-9]+[A-Z]?\]?|\[[ivx]+\]',
para_text) # "[x]" can contain arabic numerals, lower case roman numerals, or upper case letters
verses
|
= [re.sub(r'^\s+', '', v) for v in verses] # remove whitespace
verses = [re.sub(r'^\n', '', v) for v in verses] # remove \n
verses = filter(lambda x: len(x) > 0, verses)
verses = [v for v in verses]
# print verses
return verses
def get_name_and_author_of_book(soup, url):
# attempt to get it from the page title
# print soup
pagetitle = soup.title.string
split_title = pagetitle.split(":")
if len(split_title) >= 2:
book = split_title[-1]
# if that doesn't work, get the author from the page title and the
else:
book = soup.p.br.next_sibling
# remove any surrounding spaces
book = re.sub(r'^\s+|\s+$|\n', '', book)
author = "Anonymous" #Gesta Francorum has an anonymous author.
return [book, author]
def get_title_and_date(soup):
title_soup = soup.find('h1')
title = ""
date = ""
if title_soup != None:
title = title_soup.string
else:
pagehead = soup.find('p', class_="pagehead")
if (pagehead is not None):
title = pagehead.find(text=True)
if (pagehead.find('span') is not None):
date = pagehead.find('span').string.replace("(", '').replace(")", '')
else:
h1 = soup.find('h1')
title = h1.string
if date is None or date == "":
date_tag = soup.find('h2', class_='date')
if (not date_tag is None):
date = date_tag.find(text=True).replace('(', '').replace(')', '')
else:
date = ""
date = date.replace(u"\u2013", '-')
title = title.upper()
return [title, date]
def main():
# collection name: Gesta Francorum
gestaURL = 'http://www.thelatinlibrary.com/gestafrancorum.html'
siteURL = 'http://www.thelatinlibrary.com'
gestaMain = urllib.request.urlopen(gestaURL)
soup = BeautifulSoup(gestaMain, "html5lib")
textsUrl = []
# search through soup for prose and links
for a in soup.find_all('a', href=True):
link = a['href']
textsUrl.append("{}/{}".format(siteURL, a['href']))
# remove some unnecessary urls
while ("http://www.thelatinlibrary.com/index.html" in textsUrl):
textsUrl.remove("http://www.thelatinlibrary.com/index.html")
textsUrl.remove("http://www.thelatinlibrary.com/classics.html")
textsUrl.remove("http://www.thelatinlibrary.com/medieval.html")
logger.info("\n".join(textsUrl))
# extract data
# get titles of this collection
title_dict_ges, date_dict_ges = get_title_and_date(soup)
verses = []
for u in textsUrl:
uURL = urllib.request.urlopen(u)
soup = BeautifulSoup(uURL, "html5lib") # check pep 8 for file/fuction name
book, author = get_name_and_author_of_book(soup, uURL)
date = date_dict_ges
# go through text to find chapters
para = soup.findAll('p')[:-1]
chapter = "1" #Note that chapters aren't integers.
verse = 0
text = ""
for p in para:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
chap_found = False
# in other books, chapters are bold or italicized
potential_chap = p.find('b')
if potential_chap is not None:
chapter = potential_chap.find(text=True)
# Include italicized part in chap name
italic = potential_chap.i
if italic is not None:
chapter += italic.string
chapter = chapter.replace("\n", "")
chapter = chapter.replace(u'\xa0', '')
#Note: Some chapters have Roman numerals as part of the chapter name.
#e.g. Roman numeral is only part of the string and is not capitalized. Needs fixing.
chapnum = {'I', 'II', 'III', 'IV', 'V', 'VI', 'VII', 'VIII', 'IX', 'X',
'XI', 'XII', 'XIII', 'XIV', 'XV', 'XVI'}
if chapter in chapnum:
chapter = chapter.upper() #Roman numerals need to be uppercase
else:
chapter = chapter.title()
verse = 0
continue
# go through text to find verses
if (get_verses(p)):
for i, t in enumerate(get_verses(p)):
verse += 1
text = t
# text = unicode.encode(text, errors="ignore")
# add the entry
add_entry_to_list(verses, title_dict_ges, book, author, date, chapter, verse, text, u)
with sqlite3.connect('texts.db') as db:
# open cursor
curs = db.cursor()
# create the database if it doesn't already exist
curs.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
db.commit()
# put it all in the db
add_to_database(verses,db)
db.commit()
logger.info("Process finished")
if __name__ == '__main__':
main() #do thsi dunder thing for everything else
|
jacobgasyna/Hackathon2017
|
basics.py
|
Python
|
gpl-3.0
| 3,296
| 0.011229
|
# Copywrite © 2017 Joe Rogge, Jacob Gasyna and Adele Rehkemper
#This file is part of Rhythm Trainer Pro. Rhythm Trainer Pro is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version. Rhythm Trainer Pro is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details. You should have received a copy of the GNU General Public License
#along with Rhythm Trainer Pro. If not, see <http://www.gnu.org/licenses/>.
from Tkinter import *
from PIL import Image, ImageTk
def commandTest():
print "test"
#def makeButtons(sidebar):
#""" place all the buttons in the sidebar"""
#TODO add commands for each button
root = Tk()
screenWidth = root.winfo_screenwidth()
screenHeight = root.winfo_screenheight()
root.geometry('%dx%d+%d+%d' % (screenWidth, screenHeight, 0, 0))
# main area
mainarea = Canvas(root, bg='#FFA', width=screenWidth-200)
mainarea.pack(expand=True, fill='both', side='right')
# sidebar
sidebar = Frame(root, bg='#FFF', width=200)
sidebar.pack(expand=False, fill='both', side='left', anchor='nw')
# make buttons
wnPath = 'WholeNoteicon.png'
wnImage = Image.open(wnPath)
wn = ImageTk.PhotoImage(wnImage)
wnButton = Button(sidebar, image=wn, border=0)
wnButton.place(x=25, y=0)
wnRestPath = 'WholeResticon.png'
wnRestImage = Image.open(wnRestPath)
wnRest = ImageTk.PhotoImage(wnRestImage)
wnRestButton = Button(sidebar, image=wnRest, border=0)
wnRestButton.place(x=100, y=0)
#hnPath = 'HalfNoteicon.png'
#hnImage = Image.open(hnPath)
#hn = ImageTk.PhotoImage(hnImage)
#hnButton = Button(sidebar, image=hn, border=0)
#hnButton.place(x=25, y=0)
hnRestPath = 'HalfResticon.png'
hnRestImage = Image.open(hnRestPath)
hnRest = ImageTk.PhotoImage(hnRestImage)
hnRestButton = Button(sidebar, image=hnRest, border=0)
hnRestButton.place(x=100, y=75)
qnPath = 'QuarterNoteicon.png'
qnImage = Image.open(qnPath)
qn = ImageTk.PhotoImage(qnImage)
qnButton = Button(sidebar, image=qn, border=0)
qnButton.place(x=25, y=150)
qnRestPath = 'QuarterResticon.png'
qnRestImage = Image.open(qnRestPath)
qnRest = ImageTk.PhotoImage(qnRestImage)
qnRestButton = Button(sidebar, image=qnRest, border=0)
qnRestButton.place(x=100, y=150)
#enPath = 'EighthNoteicon.png'
#enImage = Image.o
|
pen(enPath)
#en = ImageTk.PhotoImage(enImage)
#enButton = Button(sidebar, image=en, border
|
=0)
#enButton.place(x=25, y=150)
#enRestPath = 'EighthResticon.png'
#enRestImage = Image.open(enRestPath)
#enRest = ImageTk.PhotoImage(enRestImage)
#enRestButton = Button(sidebar, image=enRest, border=0)
#enRestButton.place(x=100, y=150)
snPath = 'SixteenthNoteicon.png'
snImage = Image.open(snPath)
sn = ImageTk.PhotoImage(snImage)
snButton = Button(sidebar, image=sn, border=0)
snButton.place(x=25, y=225)
snRestPath = 'SixteenthResticon.png'
snRestImage = Image.open(snRestPath)
snRest = ImageTk.PhotoImage(snRestImage)
snRestButton = Button(sidebar, image=snRest, border=0)
snRestButton.place(x=100, y=225)
if __name__ == '__main__':
root.mainloop()
|
witlox/dcs
|
controller/ilm/consuela.py
|
Python
|
gpl-2.0
| 4,723
| 0.003176
|
import json
import logging
from logging.config import dictConfig
import threading
import pickle
import redis
import aws
from settings import Settings
def terminate_worker(worker_id, instance, client):
result = aws.terminate_machine(instance)
if result is None or len(result) == 0:
logging.error('could not remove worker %s, remove manually!' % instance)
client.delete(worker_id)
class Consuela(threading.Thread):
""" Manages the termination of machines """
def __init__(self):
with open('logging.json') as jl:
dictConfig(json.load(jl))
logging.info('Consuela: Starting.')
threading.Thread.__init__(self)
self.daemon = True
self.settings = Settings()
self.client = redis.Redis('db')
self.job_pub_sub = self.client.pubsub()
self.job_pub_sub.subscribe(['jobs'])
def run(self):
for item in self.job_pub_sub.listen():
job_id = item['data']
if job_id == 'KILL':
self.job_pub_sub.unsubscribe()
logging.info('Consuela: Stopping.')
return
#
worker_id, worker = self.get_worker(job_id)
if worker and self.client.exists(job_id):
job = pickle.loads(self.client.get(job_id))
if job.state == 'finished' and worker.instance is not None:
if not self.settings.recycle_workers:
logging.info('recycle workers off, %s finished, shutting down machine' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
else:
if self.recycle_worker(job_id, job):
logging.info('going to recycle worker %s' % worker.instance)
worker.job_id = None
self.client.set(worker_id, pickle.dumps(worker))
else:
logging.info('no work left for %s, shutting down machine' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
elif job.state == 'failed' and worker.instance is not None:
logging.warning('%s finished with failure' % job_id)
if self.settings.auto_remove_failed and not self.settings.recycle_workers:
logging.info('auto-remove on failure enabled, trying to remove %s' % worker.instance)
terminate_worker(worker_id, worker.instance, self.client)
else:
logging.warning('auto-remove on failure not performed, manually remove %s!' % worker.instance)
elif job.state == 'broken' and worker.instance is not None:
logging.info('Terminating worker with a broken job.')
terminate_worker(worker_id, worker.instance, self.client)
job.state = 'failed'
self.client.set(job_id, pickle.dumps(job))
elif worker_id and worker and worker.instance:
terminate_worker(worker_id, worker.instance, self.client)
else:
logging.debug('no worker found for %s' % job_id)
def get_worker(self, job_id):
for worker_id in [worker_key for worker_key in self.client.keys() if worker_key.startswith('jm-')]: # Redis keys(pattern='*') does not filter at all.
pickled_worker = self.client.get(worker_id)
if pickled_worker is None:
continue
worker = pickle.loads(pickled_worker)
if worker.job_id is not None and worker.job_id == job_id:
return worker_id, worker
return None, None
def recycle_worker(self, job_id, job):
if job.batch_id is None or not self.client.exists(job.batch_id):
logging.info('could not find a "real" batch id for %s' % job.batch_id)
return False
batch = pickle.loads(self.client.get(job.batch_id))
for batch_job_id in pickle.loads(batch.jobs):
logging.debug('have job %s in batch %s' % (batch_job_id, job.batch_id))
if batch_job_id != job_id:
logging.debug('found other job in batch, checking state')
if self.client.exists(batch_job_id):
batch_job = pickle.loads(self.client.get(batch_job_id))
logging.debug('state is %s (for %s)' %
|
(batch_job.state,
|
batch_job_id))
if batch_job.state == 'spawned' or batch_job.state == 'received' or batch_job.state == 'delayed':
return True
return False
|
bugsnag/bugsnag-python
|
bugsnag/wsgi/__init__.py
|
Python
|
mit
| 147
| 0
|
from typing import Dict
from urllib.parse import quote
def request_path(env: Dict):
return quote('/' + env.get('PATH
|
_INFO', '').ls
|
trip('/'))
|
ritchyteam/odoo
|
addons/mail/mail_group.py
|
Python
|
agpl-3.0
| 12,895
| 0.004731
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import openerp
import openerp.tools as tools
from openerp.osv import osv
from openerp.osv import fields
from openerp.tools.safe_eval import safe_eval as eval
from openerp import SUPERUSER_ID
from openerp.tools.translate import _
class mail_group(osv.Model):
""" A mail_group is a collection of users sharing messages in a discussion
group. The group mechanics are based on the followers. """
_description = 'Discussion group'
_name = 'mail.group'
_mail_flat_thread = False
_inherit = ['mail.thread']
_inherits = {'mail.alias': 'alias_id'}
def _get_image(self, cr, uid, ids, name, args, context=None):
result = {}
for obj in self.browse(cr, uid, ids, context=context):
result[obj.id] = tools.image_get_resized_images(obj.image)
return result
def _set_image(self, cr, uid, id, name, value, args, context=None):
return self.write(cr, uid, [id], {'image': tools.image_resize_image_big(value)}, context=context)
_columns = {
'name': fields.char('Name', required=True, translate=True),
'description': fields.text('Description'),
'menu_id': fields.many2one('ir.ui.menu', string='Related Menu', required=True, ondelete="cascade"),
'public': fields.selection([('public', 'Everyone'), ('private', 'Invited people only'),
|
('groups', 'Selected group of users')], 'Privacy', required=True,
help='This group is visible by non members. \
Invisible groups can add members through the invite button.'),
'group_public_id': fields.many2one('res.groups', string='Authorized Group'),
'group_ids': fields.many2many('res.groups', rel='mail_group_res_group_rel',
id1='mail_group_id', id2='groups_id', string='Auto Subscription',
|
help="Members of those groups will automatically added as followers. "\
"Note that they will be able to manage their subscription manually "\
"if necessary."),
# image: all image fields are base64 encoded and PIL-supported
'image': fields.binary("Photo",
help="This field holds the image used as photo for the group, limited to 1024x1024px."),
'image_medium': fields.function(_get_image, fnct_inv=_set_image,
string="Medium-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Medium-sized photo of the group. It is automatically "\
"resized as a 128x128px image, with aspect ratio preserved. "\
"Use this field in form views or some kanban views."),
'image_small': fields.function(_get_image, fnct_inv=_set_image,
string="Small-sized photo", type="binary", multi="_get_image",
store={
'mail.group': (lambda self, cr, uid, ids, c={}: ids, ['image'], 10),
},
help="Small-sized photo of the group. It is automatically "\
"resized as a 64x64px image, with aspect ratio preserved. "\
"Use this field anywhere a small image is required."),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="The email address associated with this group. New emails received will automatically "
"create new topics."),
}
def _get_default_employee_group(self, cr, uid, context=None):
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'group_user')
return ref and ref[1] or False
def _get_default_image(self, cr, uid, context=None):
image_path = openerp.modules.get_module_resource('mail', 'static/src/img', 'groupdefault.png')
return tools.image_resize_image_big(open(image_path, 'rb').read().encode('base64'))
_defaults = {
'public': 'groups',
'group_public_id': _get_default_employee_group,
'image': _get_default_image,
}
def _generate_header_description(self, cr, uid, group, context=None):
header = ''
if group.description:
header = '%s' % group.description
if group.alias_id and group.alias_name and group.alias_domain:
if header:
header = '%s<br/>' % header
return '%sGroup email gateway: %s@%s' % (header, group.alias_name, group.alias_domain)
return header
def _subscribe_users(self, cr, uid, ids, context=None):
for mail_group in self.browse(cr, uid, ids, context=context):
partner_ids = []
for group in mail_group.group_ids:
partner_ids += [user.partner_id.id for user in group.users]
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
# get parent menu
menu_parent = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'mail_group_root')
menu_parent = menu_parent and menu_parent[1] or False
# Create menu id
mobj = self.pool.get('ir.ui.menu')
menu_id = mobj.create(cr, SUPERUSER_ID, {'name': vals['name'], 'parent_id': menu_parent}, context=context)
vals['menu_id'] = menu_id
# Create group and alias
create_context = dict(context, alias_model_name=self._name, alias_parent_model_name=self._name, mail_create_nolog=True)
mail_group_id = super(mail_group, self).create(cr, uid, vals, context=create_context)
group = self.browse(cr, uid, mail_group_id, context=context)
self.pool.get('mail.alias').write(cr, uid, [group.alias_id.id], {"alias_force_thread_id": mail_group_id, 'alias_parent_thread_id': mail_group_id}, context)
group = self.browse(cr, uid, mail_group_id, context=context)
# Create client action for this group and link the menu to it
ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'action_mail_group_feeds')
if ref:
search_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'mail', 'view_message_search')
params = {
'search_view_id': search_ref and search_ref[1] or False,
'domain': [
('model', '=', 'mail.group'),
('res_id', '=', mail_group_id),
],
'context': {
'default_model': 'mail.group',
'default_res_id': mail_group_id,
},
'res_model': 'mail.message',
'thread_level': 1,
'header_description': self._generate_header_description(cr, uid, group, context=context),
'view_mailbox': True,
'compose_placeholder': 'Send a message to the group',
}
cobj = self.pool.get('ir.actions.client')
newref = cobj.copy(cr, SUPERUSER_ID, ref[1], default={'params': str(params), 'name': vals['name']}, context=context)
mobj.write(cr, SUPERUSER_ID, menu_id, {'action': 'ir.actions.client,' + str(n
|
dmacvicar/spacewalk
|
backend/server/action_extra_data/reboot.py
|
Python
|
gpl-2.0
| 1,085
| 0.003687
|
#
# Copyright (c) 2008--2011 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
|
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
from spacewalk.common import rhnFlags
from spacewalk.common.rhnLog import log_debug
from spacewalk.server.rhnServer import server_kickstart
# the "exposed" functions
__rhnexport__ = ['reboot']
def reboot(server_id, action_id, data={}):
log_debug(3, action_id)
action_status = rhnFlags.get('action_status')
server_kickstart.
|
update_kickstart_session(server_id, action_id,
action_status, kickstart_state='restarted',
next_action_type=None)
|
nataddrho/DigiCue-USB
|
Python3/src/venv/Lib/site-packages/pip/_internal/utils/parallel.py
|
Python
|
mit
| 3,327
| 0
|
"""Convenient parallelization of higher order functions.
This module provides two helper functions, with appropriate fallbacks on
Python 2 and on systems lacking support for synchronization mechanisms:
- map_multiprocess
- map_multithread
These helpers work like Python 3's map, with two differences:
- They don't guarantee the order of processing of
the elements of the iterable.
- The underlying process/thread pools chop the iterable into
a number of chunks, so that for very long iterables using
a large value for chunksize can make the job complete much faster
than using the default value of 1.
"""
__all__ = ['map_multiprocess', 'map_multithread']
from contextlib import contextmanager
from multiprocessing import Pool as ProcessPool
from multiprocessing.dummy import Pool as ThreadPool
from pip._vendor.requests.adapters import DEFAULT_POOLSIZE
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from multiprocessing import pool
from typing import Callable, Iterable, Iterator, TypeVar, Union
Pool = Union[pool.Pool, pool.ThreadPool]
S = TypeVar('S')
T = TypeVar('T')
# On platforms without sem_open, multiprocessing[.dummy] Pool
# cannot be created.
try:
import multiprocessing.synchronize # noqa
except ImportError:
LACK_SEM_OPEN = True
else:
LACK_SEM_OPEN = False
# Incredibly large timeout to work around bpo-8296 on Python 2.
TIMEOUT = 2000000
@contextmanager
def closing(pool):
# type: (Pool) -> Iterator[Pool]
"""Return a context manager making sure the pool
|
closes properly."""
try:
yield pool
finally:
# For Pool.imap*, close and join are needed
# for the returned iterator to begin yielding.
pool.
|
close()
pool.join()
pool.terminate()
def _map_fallback(func, iterable, chunksize=1):
# type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]
"""Make an iterator applying func to each element in iterable.
This function is the sequential fallback either on Python 2
where Pool.imap* doesn't react to KeyboardInterrupt
or when sem_open is unavailable.
"""
return map(func, iterable)
def _map_multiprocess(func, iterable, chunksize=1):
# type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]
"""Chop iterable into chunks and submit them to a process pool.
For very long iterables using a large value for chunksize can make
the job complete much faster than using the default value of 1.
Return an unordered iterator of the results.
"""
with closing(ProcessPool()) as pool:
return pool.imap_unordered(func, iterable, chunksize)
def _map_multithread(func, iterable, chunksize=1):
# type: (Callable[[S], T], Iterable[S], int) -> Iterator[T]
"""Chop iterable into chunks and submit them to a thread pool.
For very long iterables using a large value for chunksize can make
the job complete much faster than using the default value of 1.
Return an unordered iterator of the results.
"""
with closing(ThreadPool(DEFAULT_POOLSIZE)) as pool:
return pool.imap_unordered(func, iterable, chunksize)
if LACK_SEM_OPEN:
map_multiprocess = map_multithread = _map_fallback
else:
map_multiprocess = _map_multiprocess
map_multithread = _map_multithread
|
thegeorgeous/flask-cqlalchemy
|
examples/example_app_udt.py
|
Python
|
isc
| 509
| 0
|
from flask
|
import Flask
from flask_cqlalchemy import
|
CQLAlchemy
app = Flask(__name__)
app.config['CASSANDRA_HOSTS'] = ['127.0.0.1']
app.config['CASSANDRA_KEYSPACE'] = "cqlengine"
app.config['CASSANDRA_SETUP_KWARGS'] = {'protocol_version': 3}
db = CQLAlchemy(app)
class Address(db.UserType):
street = db.columns.Text()
zipcode = db.columns.Integer()
class Users(db.Model):
__keyspace__ = 'cqlengine'
name = db.columns.Text(primary_key=True)
addr = db.columns.UserDefinedType(Address)
|
nobukatsu/deep-learning-from-scratch
|
ch03/nn-3layer.py
|
Python
|
mit
| 894
| 0.008949
|
import numpy as np
def init_network():
network = {}
network['W1'] = np.array([[0.1, 0.3, 0.5], [0.2, 0.4, 0.6]])
|
network['b1'] = np.array([0.1, 0.2, 0.3])
network['W2'] = np.
|
array([[0.1, 0.4], [0.2, 0.5], [0.3, 0.6]])
network['b2'] = np.array([0.1, 0.2])
network['W3'] = np.array([[0.1, 0.3], [0.2, 0.4]])
network['b3'] = np.array([0.1, 0.2])
return network
def identity_function(x):
return x
def forward(network, x):
W1, W2, W3 = network['W1'], network['W2'], network['W3']
b1, b2, b3 = network['b1'], network['b2'], network['b3']
a1 = np.dot(x, W1) + b1
z1 = sigmoid(a1)
a2 = np.dot(z1, W2) + b2
z2 = sigmoid(a2)
a3 = np.dot(z2, W3) + b3
y = identity_function(a3)
return y
def sigmoid(x):
return 1/(1+np.exp(-x))
network = init_network()
x = np.array([1.0, 0.5])
y = forward(network, x)
print(y)
|
schmit/intro-python-course
|
lectures/code/tuples_basics.py
|
Python
|
mit
| 65
| 0.046154
|
>
|
>> myTuple = (1, 2, 3)
>>> myTuple[1]
2
>>> myTuple[1:3]
(2, 3
|
)
|
jptomo/rpython-lang-scheme
|
rpython/translator/goal/translate.py
|
Python
|
mit
| 12,703
| 0.001889
|
#! /usr/bin/env pypy
"""
Command-line options for translate:
See below
"""
import os
import sys
import py
from rpython.config.config import (to_optparse, OptionDescription, BoolOption,
ArbitraryOption, StrOption, IntOption, Config, ChoiceOption, OptHelpFormatter)
from rpython.config.translationoption import (get_combined_translation_config,
set_opt_level, OPT_LEVELS, DEFAULT_OPT_LEVEL, set_platform, CACHE_DIR)
# clean up early rpython/_cache
try:
py.path.local(CACHE_DIR).remove()
except Exception:
pass
GOALS = [
("annotate", "do type inference", "-a --annotate", ""),
("rtype", "do rtyping", "-t --rtype", ""),
("pyjitpl", "JIT generation step", "--pyjitpl", ""),
("jittest", "JIT test with llgraph backend", "--pyjittest", ""),
("backendopt", "do backend optimizations", "--backendopt", ""),
("source", "create source", "-s --source", ""),
("compile", "compile", "-c --compile", " (default goal)"),
("llinterpret", "interpret the rtyped flow graphs", "--llinterpret", ""),
]
def goal_options():
result = []
for name, doc, cmdline, extra in GOALS:
optional = False
if name.startswith('?'):
optional = True
name = name[1:]
yesdoc = doc[0].upper() + doc[1:] + extra
result.append(BoolOption(name, yesdoc, default=False, cmdline=cmdline,
negation=False))
if not optional:
result.append(BoolOption("no_%s" % name, "Don't " + doc, default=False,
cmdline="--no-" + name, negation=False))
return result
translate_optiondescr = OptionDescription("translate", "XXX", [
StrOption("targetspec", "XXX", default='targetpypystandalone',
cmdline=None),
ChoiceOption("opt",
"optimization level", OPT_LEVELS, default=DEFAULT_OPT_LEVEL,
cmdline="--opt -O"),
BoolOption("profile",
"cProfile (to debug the speed of the translation process)",
default=False,
cmdline="--profile"),
BoolOption("pdb",
"Always run pdb even if the translation succeeds",
default=False,
cmdline="--pdb"),
BoolOption("batch", "Don't run interactive helpers", default=False,
cmdline="--batch", negation=False),
IntOption("huge", "Threshold in the number of functions after which "
"a local call graph and not a full one is displayed",
default=100, cmdline="--huge"),
BoolOption("view", "Start the pygame viewer", default=False,
cmdline="--view", negation=False),
BoolOption("help", "show this help message and exit", default=False,
cmdline="-h --help", negation=False),
BoolOption("fullhelp", "show full help message and exit", default=False,
cmdline="--full-help", negation=False),
ArbitraryOption("goals", "XXX",
defaultfactory=list),
# xxx default goals ['annotate', 'rtype', 'backendopt', 'source', 'compile']
ArbitraryOption("skipped_goals", "XXX",
defaultfactory=list),
OptionDescription("goal_options",
"Goals that should be reached during translation",
goal_options()),
])
import optparse
from rpython.tool.ansi_print import ansi_log
log = py.log.Producer("translation")
py.log.setconsumer("translation", ansi_log)
def load_target(targetspec):
log.info("Translating target as defined by %s" % targetspec)
if not targetspec.endswith('.py'):
targetspec += '.py'
thismod = sys.modules[__name__]
sys.modules['translate'] = thismod
specname = os.path.splitext(os.path.basename(targetspec))[0]
sys.path.insert(0, os.path.dirname(targetspec))
mod = __import__(specname)
if 'target' not in mod.__dict__:
raise Exception("file %r is not a valid targetxxx.py." % (targetspec,))
return mod.__dict__
def parse_options_and_load_target():
opt_parser = optparse.OptionParser(usage="%prog [options] [target] [target-specific-options]",
prog="translate",
formatter=OptHelpFormatter(),
add_help_option=False)
opt_parser.disable_interspersed_args()
config = get_combined_translation_config(translating=True)
to_optparse(config, parser=opt_parser, useoptions=['translation.*'])
translateconfig = Config(translate_optiondescr)
to_optparse(translateconfig, parser=opt_parser)
options, args = opt_parser.parse_args()
# set goals and skipped_goals
reset = False
for name, _, _, _ in GOALS:
if name.startswith('?'):
continue
if getattr(translateconfig.goal_options, name):
if name not in translateconfig.goals:
translateconfig.goals.append(name)
if getattr(translateconfig.goal_options, 'no_' + name):
if name not in translateconfig.skipped_goals:
if not reset:
translateconfig.skipped_goals[:] = []
reset = True
translateconfig.skipped_goals.append(name)
if args:
arg = args[0]
args = args[1:]
if os.path.isfile(arg + '.py'):
assert not os.path.isfile(arg), (
"ambiguous file naming, please rename %s" % arg)
translateconfig.targetspec = arg
elif os.path.isfile(arg) and arg.endswith('.py'):
translateconfig.targetspec = arg[:-3]
else:
log.ERROR("Could not find target %r" % (arg, ))
sys.exit(1)
else:
show_help(translateconfig, opt_parser, None, config)
# print the version of the host
# (if it's PyPy, it includes the hg checksum)
log.info(sys.version)
# apply the platform settings
set_platform(config)
targetspec = translateconfig.targetspec
targetsp
|
ec_dic = load_target(targetspec)
if args and not targetspec_dic.get('take_options', False):
log.WARNING("target specific arguments supplied but will be ignored: %s" % ' '.join(args))
# give the target the possibility to get its own configuration options
# into the config
if 'get_additional_config_opti
|
ons' in targetspec_dic:
optiondescr = targetspec_dic['get_additional_config_options']()
config = get_combined_translation_config(
optiondescr,
existing_config=config,
translating=True)
# show the target-specific help if --help was given
show_help(translateconfig, opt_parser, targetspec_dic, config)
# apply the optimization level settings
set_opt_level(config, translateconfig.opt)
# let the target modify or prepare itself
# based on the config
if 'handle_config' in targetspec_dic:
targetspec_dic['handle_config'](config, translateconfig)
return targetspec_dic, translateconfig, config, args
def show_help(translateconfig, opt_parser, targetspec_dic, config):
if translateconfig.help:
if targetspec_dic is None:
opt_parser.print_help()
print "\n\nDefault target: %s" % translateconfig.targetspec
print "Run '%s --help %s' for target-specific help" % (
sys.argv[0], translateconfig.targetspec)
elif 'print_help' in targetspec_dic:
print "\n\nTarget specific help for %s:\n\n" % (
translateconfig.targetspec,)
targetspec_dic['print_help'](config)
else:
print "\n\nNo target-specific help available for %s" % (
translateconfig.targetspec,)
print "\n\nFor detailed descriptions of the command line options see"
print "http://pypy.readthedocs.org/en/latest/config/commandline.html"
sys.exit(0)
def log_options(options, header="options in effect"):
# list options (xxx filter, filter for target)
log('%s:' % header)
optnames = options.__dict__.keys()
optnames.sort()
for name in optnames:
optvalue = getattr(options, name)
|
usc-isi-i2/etk
|
etk/data_extractors/htiExtractors/misc.py
|
Python
|
mit
| 1,773
| 0.0141
|
def phone_num_lists():
"""
Gets a dictionary of 0-9 integer values (as Strings) mapped to their potential Backpage ad manifestations, s
|
uch as "zer0" or "seven".
Returns:
dictionary of 0-9 integer values mapped to a list of strings containing the key's possible manifestations
"""
all_nums = {}
all_nums['2'] = ['2', 'two']
all_nums['3'] = ['3', 'three']
all_nums['4'] = ['4', 'four', 'fuor']
all_nums['5'] = ['5', 'five', 'fith']
all_nums['6'] = ['6', 'six']
all
|
_nums['7'] = ['7', 'seven', 'sven']
all_nums['8'] = ['8', 'eight']
all_nums['9'] = ['9', 'nine']
all_nums['0'] = ['0', 'zero', 'zer0', 'oh', 'o']
all_nums['1'] = ['1', 'one', '!' 'l', 'i']
return all_nums
def phone_text_subs():
"""
Gets a dictionary of dictionaries that each contain alphabetic number manifestations mapped to their actual
Number value.
Returns:
dictionary of dictionaries containing Strings mapped to Numbers
"""
Small = {
'zero': 0,
'zer0': 0,
'one': 1,
'two': 2,
'three': 3,
'four': 4,
'fuor': 4,
'five': 5,
'fith': 5,
'six': 6,
'seven': 7,
'sven': 7,
'eight': 8,
'nine': 9,
'ten': 10,
'eleven': 11,
'twelve': 12,
'thirteen': 13,
'fourteen': 14,
'fifteen': 15,
'sixteen': 16,
'seventeen': 17,
'eighteen': 18,
'nineteen': 19,
'twenty': 20,
'thirty': 30,
'forty': 40,
'fifty': 50,
'sixty': 60,
'seventy': 70,
'eighty': 80,
'ninety': 90,
'oh': 0
}
Magnitude = {
'thousand': 000,
'million': 000000,
}
Others = {
'!': 1,
'o': 0,
'l': 1,
'i': 1
}
output = {}
output['Small'] = Small
output['Magnitude'] = Magnitude
output['Others'] = Others
return output
|
Distrotech/bzr
|
bzrlib/testament.py
|
Python
|
gpl-2.0
| 9,034
| 0.001107
|
# Copyright (C) 2005 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Testament - a summary of a revision for signing.
A testament can be defined as "something that serves as tangible
proof or evidence." In bzr we use them to allow people to certify
particular revisions as authentic.
The goal is that if two revisions are semantically equal, then they will
have a byte-for-byte equal testament. We can define different versions of
"semantically equal" by using different testament classes; e.g. one that
includes or ignores file-ids.
We sign a testament rather than the revision XML itself for several reasons.
The most important is that the form in which the revision is stored
internally is designed for that purpose, and contains information which need
not be attested to by the signer. For example the inventory contains the
last-changed revision for a file, but this is not necessarily something the
user cares to sign.
Having unnecessary fields signed makes the signatures brittle when the same
revision is stored in different branches or when the format is upgraded.
Handling upgrades is another motivation for using testaments separate from
the stored revision. We would like to be able to compare a signature
generated from an old-format tree to newer tree, or vice versa. This could
be done by comparing the revisions but that makes it unclear about exactly
what is being compared or not.
Different signing keys might indicate different levels of trust; we can in
the future extend this to allow signatures indicating not just that a
particular version is authentic but that it has other properties.
The signature can be applied to either the full testament or to just a
hash of it.
Testament format 1
~~~~~~~~~~~~~~~~~~
* timestamps are given as integers to avoid rounding errors
* parents given in lexicographical order
* indented-text form similar to log; intended to be human readable
* paths are given with forward slashes
* files are named using paths for ease of comparison/debugging
* the testament uses unix line-endings (\n)
"""
from __future__ import absolute_import
# XXX: At the moment, clients trust that the graph described in a weave
# is accurate, but that's not covered by the testament. Perhaps the best
# fix is when verifying a revision to make sure that every file mentioned
# in the revision has compatible ancestry links.
# TODO: perhaps write timestamp in a more readable form
# TODO: Perhaps these should just be different formats in which inventories/
# revisions can be serialized.
from copy import copy
from bzrlib.osutils import (
contains_whitespace,
contains_linebreaks,
sha_strings,
)
from bzrlib.tree import Tree
class Testament(object):
"""Reduced summary of a revision.
Testaments can be
- produced from a revision
- written to a stream
- loaded from a stream
- compared to a revision
"""
long_header = 'bazaar-ng testament version 1\n'
short_header = 'bazaar-ng testament short form 1\n'
include_root = False
@classmethod
def from_revision(cls, repository, revision_id):
"""Produce a new testament from a historical revision."""
rev = repository.get_revision(revision_id)
tree = repository.revision_tree(revision_id)
return cls(rev, tree)
@classmethod
def from_revision_tree(cls, tree):
"""Produce a new testament from a revision tree."""
rev = tree._repository.get_revision(tree.get_revision_id())
return cls(rev, tree)
def __init__(self, rev, tree):
"""Create a new testament for rev using tree."""
self.revision_id = rev.revision_id
self.committer = rev.committer
self.timezone = rev.timezone or 0
self.timestamp = rev.timestamp
self.message = rev.message
self.parent_ids = rev.parent_ids[:]
if not isinstance(tree, Tree):
raise TypeError("As of bzr 2.4 Testament.__init__() takes a "
"Revision and a Tree.")
self.tree = tree
self.revprops = copy(rev.properties)
if contains_
|
whitespace(self.revision_id):
raise ValueError(self.revision_id)
if contains_linebreaks(self.committer):
raise ValueError(self.committer)
def as_text_lines(self):
"""Yield text form as a sequence of lines.
The result is returned in utf-8, becaus
|
e it should be signed or
hashed in that encoding.
"""
r = []
a = r.append
a(self.long_header)
a('revision-id: %s\n' % self.revision_id)
a('committer: %s\n' % self.committer)
a('timestamp: %d\n' % self.timestamp)
a('timezone: %d\n' % self.timezone)
# inventory length contains the root, which is not shown here
a('parents:\n')
for parent_id in sorted(self.parent_ids):
if contains_whitespace(parent_id):
raise ValueError(parent_id)
a(' %s\n' % parent_id)
a('message:\n')
for l in self.message.splitlines():
a(' %s\n' % l)
a('inventory:\n')
for path, ie in self._get_entries():
a(self._entry_to_line(path, ie))
r.extend(self._revprops_to_lines())
return [line.encode('utf-8') for line in r]
def _get_entries(self):
return ((path, ie) for (path, versioned, kind, file_id, ie) in
self.tree.list_files(include_root=self.include_root))
def _escape_path(self, path):
if contains_linebreaks(path):
raise ValueError(path)
return unicode(path.replace('\\', '/').replace(' ', '\ '))
def _entry_to_line(self, path, ie):
"""Turn an inventory entry into a testament line"""
if contains_whitespace(ie.file_id):
raise ValueError(ie.file_id)
content = ''
content_spacer=''
if ie.kind == 'file':
# TODO: avoid switching on kind
if not ie.text_sha1:
raise AssertionError()
content = ie.text_sha1
content_spacer = ' '
elif ie.kind == 'symlink':
if not ie.symlink_target:
raise AssertionError()
content = self._escape_path(ie.symlink_target)
content_spacer = ' '
l = u' %s %s %s%s%s\n' % (ie.kind, self._escape_path(path),
ie.file_id.decode('utf8'),
content_spacer, content)
return l
def as_text(self):
return ''.join(self.as_text_lines())
def as_short_text(self):
"""Return short digest-based testament."""
return (self.short_header +
'revision-id: %s\n'
'sha1: %s\n'
% (self.revision_id, self.as_sha1()))
def _revprops_to_lines(self):
"""Pack up revision properties."""
if not self.revprops:
return []
r = ['properties:\n']
for name, value in sorted(self.revprops.items()):
if contains_whitespace(name):
raise ValueError(name)
r.append(' %s:\n' % name)
for line in value.splitlines():
r.append(u' %s\n' % line)
return r
def as_sha1(self):
return sha_strings(self.as_text_lines())
class StrictTestament(Testament):
"""This testament format is for use as a checksum in bundle format 0.8"""
long_header = 'bazaar-ng testament version 2.1\n'
|
jason-neal/companion_simulations
|
misc/starfish_tests/read_HDF5.py
|
Python
|
mit
| 215
| 0
|
# Test
|
reading hdf5 file that I created
import numpy as np
import Starfish
from Starfish.grid_tools import HDF5Interface
myHDF5 = HDF5Interface()
wl = myHDF5.wl
flux = myHDF5.load_flux(np.array([6100, 4
|
.5, 0.0]))
|
libvirt/autotest
|
frontend/tko/csv_encoder.py
|
Python
|
gpl-2.0
| 5,495
| 0.00364
|
import csv
import django.http
try:
import autotest.common as common
except ImportError:
import common
from autotest_lib.frontend.afe import rpc_utils
class CsvEncoder(object):
def __init__(self, request, response):
self._request = request
self._response = response
self._output_rows = []
def _append_output_row(self, row):
self._output_rows.append(row)
def _build_response(self):
response = django.http.HttpResponse(mimetype='text/csv')
response['Content-Disposition'] = (
'attachment; filename=tko_query.csv')
writer = csv.writer(response)
writer.writerows(self._output_rows)
return response
def encode(self):
raise NotImplementedError
class UnhandledMethodEncoder(CsvEncoder):
def encode(self):
return rpc_utils.raw_http_response(
'Unhandled method %s (this indicates a bug)\r\n' %
self._request['method'])
class SpreadsheetCsvEncoder(CsvEncoder):
def _total_index(self, group, num_columns):
row_index, column_index = group['header_indices']
return row_index * num_columns + column_index
def _group_string(self, group):
result = '%s / %s' % (group['pass_count'], group['complete_count'])
if group['incomplete_count'] > 0:
result += ' (%s incomplete)' % group['incomplete_count']
if 'extra_info' in group:
result = '\n'.join([result] + group['extra_info'])
return result
def _build_value_table(self):
value_table = [''] * self._num_rows * self._num_columns
for group in self._response['groups']:
total_index = self._total_index(group, self._num_columns)
value_table[total_index] = self._group_string(group)
return value_table
def _header_string(self, header_value):
return '/'.join(header_value)
def _process_value_table(self, value_table, row_headers):
total_index = 0
for row_index in xrange(self._num_rows):
row_header = self._header_string(row_headers[row_index])
row_end_index = total_index + self._num_columns
row_values = value_table[total_index:row_end_index]
self._append_output_row([row_header] + row_values)
total_index += self._num_columns
def encode(self):
header_values = self._response['header_values']
assert len(header_values) == 2
row_headers, column_headers = header_values
self._num_rows, self._num_columns = (len(row_headers),
len(column_headers))
value_table = self._build_value_table()
first_line = [''] + [self._header_string(header_value)
for header_value in column_headers]
self._append_output_row(first_line)
self._process_value_table(value_table, row_headers)
return self._build_response()
class TableCsvEncoder(CsvEncoder):
def __init__(self, request, response):
super(TableCsvEncoder, self).__init__(request, response)
self._column_specs = request['columns']
def _format_row(self, row_object):
"""Extract data from a row object into a list of strings"""
return [row_object.get(field) for field, name in self._column_specs]
def _encode_table(self, row_objects):
self._append_output_row([column_spec[1] # header row
for column_spec in self._column_specs])
for row_object in row_objects:
self._append_output_row(self._format_row(row_object))
return self._build_response()
def encode(self):
return self._encode_table(self._response)
class GroupedTableCsvEncoder(TableCsvEncoder):
def encode(self):
return self._encode_table(self._response['groups'])
class StatusCountTableCsvEncoder(GroupedTableCsvEncoder):
_PASS_RATE_FIELD = '_test_pass_rate'
def __init__(self, request, response):
super(StatusCountTableCsvEncoder, self).__init__(request, response)
# inject a more sensible field name for test pass rate
for column_spec in self._column_specs:
field, name = column_spec
if name == 'Test pass rate':
column_spec[0] = self._PASS_RATE_FIELD
break
def _format_pass_rate(self, row_object):
result = '%s / %s' % (row_object['pass_count'],
row_object['complete_count'])
incomplete_count = row_object['incomplete_count']
if incomplete_count:
result += ' (%s incomplete)' % incomplete_count
return result
def _format_row(self, row_object):
row_object[self._PASS_RATE_FIELD] = self._format_pass_rate(row_object)
return super(StatusCountTableCsvEncoder, self)._format_row(row_object)
_ENCODER_MAP = {
'get_latest_tests' : SpreadsheetCsvEncoder,
'get_test_views' : TableCsvEncoder,
'get_group_counts' : GroupedTableCsvEncoder,
}
def _get_encoder_class(request):
method = request['method']
if method in _ENCODER_MAP:
return _ENCODER_MAP[method]
if method == 'get_status_counts':
if 'columns' in request:
return StatusCountTableCsvEncoder
return SpreadsheetCsvEncod
|
er
return Unhan
|
dledMethodEncoder
def encoder(request, response):
EncoderClass = _get_encoder_class(request)
return EncoderClass(request, response)
|
petrjasek/superdesk-core
|
content_api/companies/resource.py
|
Python
|
agpl-3.0
| 963
| 0
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this sourc
|
e code, or
# at https://www.sourcefabric.org/superdesk/license
from superdesk.resource import Resource
from content_api import MONGO_PREFIX
class CompaniesResource(Resource):
"""
Company schema
"""
schema = {
|
"name": {"type": "string", "unique": True, "required": True},
"sd_subscriber_id": {"type": "string"},
"is_enabled": {"type": "boolean", "default": True},
"contact_name": {"type": "string"},
"phone": {"type": "string"},
"country": {"type": "string"},
}
datasource = {"source": "companies", "default_sort": [("name", 1)]}
item_methods = ["GET", "PATCH", "PUT"]
resource_methods = ["GET", "POST"]
mongo_prefix = MONGO_PREFIX
|
googleads/google-ads-python
|
google/ads/googleads/v9/services/services/income_range_view_service/client.py
|
Python
|
apache-2.0
| 18,971
| 0.001054
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import os
import re
from typing import Dict, Optional, Sequence, Tuple, Type, Union
from google.api_core import client_options as client_options_lib
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
try:
OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault]
except AttributeError: # pragma: NO COVER
OptionalRetry = Union[retries.Retry, object] # type: ignore
from google.ads.googleads.v9.resources.types import income_range_view
from google.ads.googleads.v9.services.types import income_range_view_service
from .transports.base import (
IncomeRangeViewServiceTransport,
DEFAULT_CLIENT_INFO,
)
from .transports.grpc import IncomeRangeViewServiceGrpcTransport
class IncomeRangeViewServiceClientMeta(type):
"""Metaclass for the IncomeRangeViewService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[IncomeRangeViewServiceTransport]]
_transport_registry["grpc"] = IncomeRangeViewServiceGrpcTransport
def get_transport_class(
cls, label: str = None,
) -> Type[IncomeRangeViewServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the defa
|
ult (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class IncomeRangeViewServiceClient(metaclass=IncomeRangeViewServiceClientMeta):
"""Service to manage income range views."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint
|
.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "googleads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IncomeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(
info
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
IncomeRangeViewServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(
filename
)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> IncomeRangeViewServiceTransport:
"""Return the transport used by the client instance.
Returns:
IncomeRangeViewServiceTransport: The transport used by the client instance.
"""
return self._transport
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
"""Releases underlying transport's resources.
.. warning::
ONLY use as a context manager if the transport is NOT shared
with other clients! Exiting the with block will CLOSE the transport
and may cause errors in other clients!
"""
self.transport.close()
@staticmethod
def income_range_view_path(
customer_id: str, ad_group_id: str, criterion_id: str,
) -> str:
"""Return a fully-qualified income_range_view string."""
return "customers/{customer_id}/incomeRangeViews/{ad_group_id}~{criterion_id}".format(
customer_id=customer_id,
ad_group_id=ad_group_id,
criterion_id=criterion_id,
)
@staticmethod
def parse_income_range_view_path(path: str) -> Dict[str, str]:
"""Parse a income_range_view path into its component segments."""
m = re.match(
r"^customers/(?P<customer_id>.+?)/incomeRangeViews/(?P<ad_group_id>.+?)~(?P<criterion_id>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Return a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Return a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Return a fully-qualified organization
|
mozilla/standup
|
standup/status/tests/conftest.py
|
Python
|
bsd-3-clause
| 128
| 0
|
from django.core.cache import cache
def pytest_r
|
untest_setup(item):
# Clear the cache before every test
cache
|
.clear()
|
richard-willowit/odoo
|
odoo/tests/__init__.py
|
Python
|
gpl-3.0
| 43
| 0
|
from . import common
from .common import
|
*
|
|
DarrenBellew/CloudCompDT228-3
|
Lab3/CountingSundays.py
|
Python
|
mit
| 328
| 0.021341
|
'''import datetime
daytime.MINYEAR = 1901
daytime.MAXYEAR = 2000
print(daytime.MAXYEAR)'''
import cal
|
endar
count = 0
year = 1901
endYear = 2001
month = 12
for x in range (year, endYear):
for y in range (1, month+1):
if cale
|
ndar.weekday(x,y,1) == calendar.SUNDAY:
count = count+1
print("Count: " + str(count))
|
google-research/google-research
|
smurf/smurf_models/raft_update.py
|
Python
|
apache-2.0
| 8,034
| 0.003112
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of RAFT."""
# pylint:skip-file
import tensorflow as tf
def create_update_Conv2d(c_in, c_out, k_size):
kernel_scale = 1.0 / 3.0
if isinstance(k_size, list) or isinstance(k_size, tuple):
bias_scale = c_out / (3.0 * c_in * k_size[0] * k_size[1])
else:
bias_scale = c_out / (3.0 * c_in * k_size * k_size)
return tf.keras.layers.Conv2D(
filters=c_out,
kernel_size=k_size,
kernel_initializer=tf.keras.initializers.VarianceScaling(
distribution='uniform', scale=kernel_scale, mode='fan_in'),
bias_initializer=tf.keras.initializers.VarianceScaling(
distribution='uniform', scale=bias_scale, mode='fan_in'))
class ConvGRU(tf.keras.layers.Layer):
def __init__(self, hidden_dim=128, input_dim=192 + 128, **kwargs):
super(ConvGRU, self).__init__(**kwargs)
self.convz = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
self.convr = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
self.convq = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=3)
def call(self, input_tensor):
h, x = input_tensor
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz(pad_hx))
r = tf.math.sigmoid(self.convr(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq(pad_q))
h = (1 - z) * h + z * q
return h
class SepConvGRU(tf.keras.layers.Layer):
def __init__(self, hidden_dim=128, input_dim=192 + 128):
super(SepConvGRU, self).__init__()
self.convz1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convr1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convq1 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(1, 5))
self.convz2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
self.convr2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
self.convq2 = create_update_Conv2d(
c_in=hidden_dim + input_dim, c_out=hidden_dim, k_size=(5, 1))
def call(self, input_tensor):
h, x = input_tensor
# horizontal
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [0, 0], [2, 2], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz1(pad_hx))
r = tf.math.sigmoid(self.convr1(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq1(pad_q))
h = (1 - z) * h + z * q
# vertical
hx = tf.concat([h, x], axis=3)
paddings = [[0, 0], [2, 2], [0, 0], [0, 0]]
pad_hx = tf.pad(hx, paddings)
z = tf.math.sigmoid(self.convz2(pad_hx))
r = tf.math.sigmoid(self.convr2(pad_hx))
pad_q = tf.pad(tf.concat([r * h, x], axis=3), paddings)
q = tf.math.tanh(self.convq2(pad_q))
h = (1 - z) * h + z * q
return h
class FlowHead(tf.keras.layers.Layer):
def __init__(self, hidden_dim=256, input_dim=128, **kwargs):
super(FlowHead, self).__init__(**kwargs)
self.conv1 = create_update_Conv2d(
c_in=input_dim, c_out=hidden_dim, k_size=3)
self.conv2 = create_update_Conv2d(c_in=hidden_dim, c_out=2, k_size=3)
def call(self, x):
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
conv = tf.nn.relu(self.conv1(tf.pad(x, paddings)))
return self.conv2(tf.pad(conv, paddings))
class BasicMotionEncoder(tf.keras.layers.Layer):
def __init__(self, args, **kwargs):
super(BasicMotionEncoder, self).__init__(**kwargs)
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)**2
self.convc1 = create_update_Conv2d(c_in=cor_planes, c_out=256, k_size=1)
self.convc2 = create_update_Conv2d(c_in=256, c_out=192, k_size=3)
self.convf1 = create_update_Conv2d(c_in=2, c_out=128, k_size=7)
self.convf2 = create_update_Conv2d(c_in=128, c_out=64, k_size=3)
self.conv = create_update_Conv2d(c_in=64 + 192, c_out=128 - 2, k_size=3)
def call(self, input_tensor):
flow, corr = input_tensor
cor = tf.nn.relu(self.convc1(corr))
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
cor = tf.nn.relu(self.convc2(tf.pad(cor, paddings)))
paddings7 = [[0, 0], [3, 3], [3, 3], [0, 0]]
flo = tf.nn.relu(self.convf1(tf.pad(flow, paddings7)))
flo = tf.nn.relu(self.convf2(tf.pad(flo, paddings)))
cor_flo = tf.concat([cor, flo], axis=-1)
out = tf.nn.relu(self.conv(tf.pad(cor_flo, paddings)))
return tf.concat([out, flow], axis=-1)
class SmallMotionEncoder(tf.keras.layers.Layer):
def __init__(self, args, **kwargs):
super(SmallMotionEncoder, self).__init__(**kwargs)
cor_planes = args.corr_levels * (2 * args.corr_radius + 1)**2
self.convc1 = create_update_Conv2d(c_in=cor_planes, c_out=96, k_size=1)
self.convf1 = create_update_Conv2d(c_in=96, c_out=64, k_size=7)
self.convf2 = create_update_Conv2d(c_in=64, c_out=32, k_size=3)
self.conv = create_update_Conv2d(c_in=32, c_out=80, k_size=3)
def call(self, input_tensor):
flow, corr = input_tensor
cor = tf.nn.relu(self.convc1(corr))
paddings7 = [[0, 0], [3, 3], [3, 3], [0, 0]]
flo = tf.nn.relu(self.convf1(tf.pad(flow, paddings7)))
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
flo = tf.nn.relu(self.convf2(tf.pad(flo, paddings)))
cor_flo = tf.concat([cor, flo], axis=-1)
out = tf.nn.relu(self.conv(tf.pad(cor_flo, paddings)))
return tf.concat([out, flow], axis=-1)
class BasicUpdateBlock(tf.keras.layers.Layer):
def __init__(self, args, hidden_dim=128, **kwargs):
super(BasicUpdateBlock, self).__init__(**kwargs)
self.args = args
self.encoder = BasicMotionEncoder(args)
self.gru = SepConvGRU(hidden_dim=hidden_dim, input_dim=128 + hidden_dim)
self.flow_head = FlowHead(hidden_dim=256, input_dim=hidden_dim)
if args.convex_upsampling:
self.mask = tf.keras.Sequential(
[create_update_Conv2d(c_in=128, c_out=256, k_size=3),
tf.keras.layers.ReLU(),
create_update_Conv2d(c_in=256, c_out=64 * 9, k_size=1)
])
def call(self, input_tensor, training):
net, inp, corr, flow = input_tensor
motion_features = self.e
|
ncoder([flow, corr])
inp = tf.concat([inp, motion_features], axis=-1)
net = self.gru([n
|
et, inp])
delta_flow = self.flow_head(net)
if self.args.convex_upsampling:
# Scale mask to balance gradients.
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
pad_net = tf.pad(net, paddings)
mask = .25 * self.mask(pad_net)
else:
mask = None
return net, mask, delta_flow
class SmallUpdateBlock(tf.keras.layers.Layer):
def __init__(self, args, hidden_dim=96, **kwargs):
super(SmallUpdateBlock, self).__init__(**kwargs)
self.encoder = SmallMotionEncoder(args)
self.gru = ConvGRU(hidden_dim=hidden_dim, input_dim=82 + 64)
self.flow_head = FlowHead(hidden_dim=128, input_dim=hidden_dim)
def call(self, input_tensor, training):
net, inp, corr, flow = input_tensor
motion_features = self.encoder([flow, corr])
inp = tf.concat([inp, motion_features], axis=-1)
net = self.gru([net, inp])
delta_flow = self.flow_head(net)
return net, None, delta_flow
|
manjaro/thus
|
thus/misc/keyboard_widget.py
|
Python
|
gpl-3.0
| 13,129
| 0.001371
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# keyboard_widget.py
#
# Copyright © 2012 Linux Mint (QT version)
# Copyright © 2013 Manjaro (QT version)
# Copyright © 2013-2015 Antergos (GTK version)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
""" Keyboard widget that shows keyboard layout and variant types to the user """
from gi.repository import Gtk, GObject
import cairo
import subprocess
import math
def unicode_to_string(raw):
""" U+ , or +U+ ... to string """
if raw[0:2] == "U+":
return chr(int(raw[2:], 16))
elif raw[0:2] == "+U":
return chr(int(raw[3:], 16))
return ""
class KeyboardWidget(Gtk.DrawingArea):
__gtype_name__ = 'KeyboardWidget'
kb_104 = {
"extended_return": False,
"keys": [
(0x29, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd),
(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x2b),
(0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28),
(0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35),
()]
}
kb_105 = {
"extended_return": True,
"keys": [
(0x29, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd)
|
,
(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0
|
x1b),
(0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x2b),
(0x54, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35),
()]
}
kb_106 = {
"extended_return": True,
"keys": [
(0x29, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe),
(0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b),
(0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29),
(0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36),
()]
}
def __init__(self):
Gtk.DrawingArea.__init__(self)
self.set_size_request(460, 130)
self.codes = []
self.layout = "us"
self.variant = ""
self.font = "Helvetica"
self.space = 6
self.kb = None
def set_layout(self, layout):
self.layout = layout
def set_font(self):
""" Font depends on the keyboard layout """
# broken: ad (Andorra), lk (Sri Lanka), brai (Braille)
# ?!?: us:chr
self.font = "Helvetica"
# Load fonts from ttf-aboriginal-sans package
# us:chr
if self.variant == "chr":
self.font = "Aboriginal Sans"
# Load fonts from:
# ttf-indic-otf, ttf-khmer, ttf-lohit-fonts, ttf-myanmar3
# ttf-thaana-fonts, ttf-tlwg
# Font: Akaash
if self.layout == "bd":
self.font = "Akaash"
# Font: Gari
if self.layout == "np" or self.layout == "in":
self.font = "Gargi"
# Font: KhmerOS
if self.layout == "kh":
self.font = "KhmerOS"
# Font: Bengali
if self.variant == "ben_probhat" or self.variant == "ben":
self.font = "Lohit Bengali"
# Font: Padmaa
if self.variant == "guj": # not all keys
self.font = "Padmaa"
# Font: Punjabi
if self.variant == "guru" or self.variant == "jhelum":
self.font = "Lohit Punjabi"
# Font: Kannada
if self.variant == "kan":
self.font = "Lohit Kannada"
# Font: Malayalam
if self.variant == "mal" or self.variant == "mal_lalitha":
self.font = "Malayalam"
# Font: Tamil
if self.variant == "tam_keyboard_with_numerals" or self.variant == "tam":
self.font = "Lohit Tamil"
# Font: TSCu Times
lst = ["tam_TAB", "tam_TSCII", "tam_unicode"]
for i in lst:
if self.variant == i:
self.font = "TSCu_Times"
# Font: Telugu
if self.variant == "tel":
self.font = "Lohit Telugu"
# Font: Oriya
lst = ["af", "ara", "am", "cn", "ge", "gr", "gn", "ir", "iq", "ie", "il", "la", "ma", "pk", "lk", "sy"]
for i in lst:
if self.layout == i:
self.font = "Oriya"
lst = ["geo", "urd-phonetic3", "urd-phonetic", "urd-winkeys"]
for i in lst:
if self.variant == i:
self.font = "Oriya"
if self.variant == "ori":
self.font = "Lohit Oriya"
# Font: Mv Boli
if self.layout == "mv":
self.font = "MVBoli"
# Font: Myanmar
if self.layout == "mm":
self.font = "Myanmar3"
# Font: Tlwg
if self.layout == "th":
self.font = "Tlwg Mono"
def set_variant(self, variant):
self.variant = variant
self.load_codes()
self.load_info()
self.set_font()
# Force repaint
self.queue_draw()
def load_info(self):
kbl_104 = ["us", "th"]
kbl_106 = ["jp"]
# Most keyboards are 105 key so default to that
if self.layout in kbl_104:
self.kb = self.kb_104
elif self.layout in kbl_106:
self.kb = self.kb_106
elif self.kb != self.kb_105:
self.kb = self.kb_105
@staticmethod
def rounded_rectangle(cr, x, y, width, height, aspect=1.0):
corner_radius = height / 10.0
radius = corner_radius / aspect
degrees = math.pi / 180.0
cr.new_sub_path()
cr.arc(x + width - radius, y + radius, radius, -90 * degrees, 0 * degrees)
cr.arc(x + width - radius, y + height - radius, radius, 0 * degrees, 90 * degrees)
cr.arc(x + radius, y + height - radius, radius, 90 * degrees, 180 * degrees)
cr.arc(x + radius, y + radius, radius, 180 * degrees, 270 * degrees)
cr.close_path()
cr.set_source_rgb(0.5, 0.5, 0.5)
cr.fill_preserve()
cr.set_source_rgba(0.2, 0.2, 0.2, 0.5)
cr.set_line_width(2)
cr.stroke()
def do_draw(self, cr):
""" The 'cr' variable is the current Cairo context """
# alloc = self.get_allocation()
# real_width = alloc.width
# real_height = alloc.height
width = 460
# height = 130
usable_width = width - 6
key_w = (usable_width - 14 * self.space) / 15
# Set background color to transparent
cr.set_source_rgba(1.0, 1.0, 1.0, 0.0)
cr.paint()
cr.set_source_rgb(0.84, 0.84, 0.84)
cr.set_line_width(2)
cr.rectangle(0, 0, 640, 640)
cr.stroke()
cr.set_source_rgb(0.22, 0.22, 0.22)
rx = 3
space = self.space
w = usable_width
kw = key_w
# Use this to show real widget size (useful when debugging this widget)
# cr.rectangle(0, 0, real_width, real_height)
def draw_row(row, sx, sy, last_end=False):
x = sx
y = sy
keys = row
rw = w - sx
i = 0
for k in keys:
rect = (x, y, kw, kw)
if i == len(keys) - 1 and last_end:
rect = (rect[0], rect[1], rw, rect[3])
self.rounded_rectangle(cr, rect[0], rect[1], rect[2], rect[3])
px = rect[0] + 5
py = rect[1] + rect[3] - (rect[3] / 4)
if len(self.codes) > 0:
|
krishna11888/ai
|
third_party/gensim/gensim/models/__init__.py
|
Python
|
gpl-2.0
| 1,920
| 0.004688
|
"""
This package contains algorithms for extracting document representations from their raw
bag-of-word counts.
"""
# bring model classes directly into package namespace, to save some typing
from .hdpmodel import HdpModel
from .ldamodel import LdaModel
from .lsimodel import LsiModel
from .tfidfmodel import TfidfModel
from .rpmodel import RpModel
from .logentropy_model import LogEntropyModel
from .word2vec import Word2Vec
from .doc2vec import Doc2Vec
from .ldamulticore import LdaMulticore
from .phrases import Phrases
from . import wrappers
from gensim import interfaces, utils
class VocabTransform(interfaces.Transfo
|
rmationABC):
"""
Remap feature ids to new values.
Given a mapping between old ids and new ids (some old ids may be missing = these
features are to be discarded), this will wrap a corpus so that iterating over
`VocabTransform[corpus]` returns the same vectors but with the new ids.
Old features that have no counterpart in the new ids are discarded. This
can be used to filter vocabulary of a corpus "online"::
>>>
|
old2new = dict((oldid, newid) for newid, oldid in enumerate(ids_you_want_to_keep))
>>> vt = VocabTransform(old2new)
>>> for vec_with_new_ids in vt[corpus_with_old_ids]:
>>> ...
"""
def __init__(self, old2new, id2token=None):
# id2word = dict((newid, oldid2word[oldid]) for oldid, newid in old2new.iteritems())
self.old2new = old2new
self.id2token = id2token
def __getitem__(self, bow):
"""
Return representation with the ids transformed.
"""
# if the input vector is in fact a corpus, return a transformed corpus as a result
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
return sorted((self.old2new[oldid], weight) for oldid, weight in bow if oldid in self.old2new)
#endclass VocabTransform
|
McIntyre-Lab/papers
|
fear_sem_sd_2015/scripts/haplotype_freqs.py
|
Python
|
lgpl-3.0
| 358
| 0.005587
|
#!/usr/bin/env python
import numpy as
|
np
import pandas as pd
def build_allele_dict():
""" Take a sheet and build a dictionary with:
[gene][allele] = count
"""
fname = '/h
|
ome/jfear/mclab/cegs_sem_sd_paper/from_matt/DSRP_and_CEGS_haps_1-6-15.xlsx'
data = pd.ExcelFile(fname)
dspr = data.parse('DSRP_haps')
f1 = data.parse('CEGS_haps')
data
|
jpardobl/django_sprinkler
|
setup.py
|
Python
|
bsd-3-clause
| 1,456
| 0.021291
|
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name = 'django_sprinkler',
version = '0.4',
packages = ["django_sprinkler", ],
include_package_data = True,
license = 'BSD License',
description = 'Home Automation Python Project Django app meant to control watering',
long_description = README,
url = 'http://blog.digitalhigh.es',
author = 'Javier Pardo Blasco(jpardobl)',
author_email = 'jpardo@digitalhigh.es',
extras_require = {
"json": "simplejson"
},
install_requires = (
"Django==1.5",
"simplejson==2.6.2",
"pyparsing",
"hautomation_restclient",
"astral",
"pytz",
),
# test_suite='test_project.tests.runtests',
# tests_require=("selenium", "requests"),
classifiers = [
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: De
|
velopers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Con
|
tent',
],
)
|
phyng/phyip
|
geodata/provinces_script.py
|
Python
|
mit
| 637
| 0.004926
|
# coding: utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import json
china = json.loads(open('china.json', 'r').read(
|
)) # slow
new_provs = []
new_citys = []
for prov in china['children']:
new_provs.append(prov['name'])
for city in prov['children']:
if city['name'] not in [u'市辖区', u'县', u'省直辖县级行政区划']:
if c
|
ity['name'][-1] == '市':
new_citys.append(city['name'][:-1])
else:
new_citys.append(city['name'])
print new_citys
with open('citys.json', 'w') as f:
f.write(json.dumps(new_citys, ensure_ascii=False, indent=4))
|
356255531/SpikingDeepRLControl
|
code/EnvBo/Q-Learning/Testing_Arm_4points/q_networks.py
|
Python
|
gpl-3.0
| 3,008
| 0.002992
|
#!/usr/bin/python
import numpy as np
import os
import sys
from keras.layers import Activation, Dense, Input
from keras.layers.normalization import BatchNormalization
from keras.models import Model, Sequential
from keras.optimizers import RMSprop
NUM_OF_HIDDEN_NEURONS = 100
QNETWORK_NAME = 'online_network'
TARGETNET_NAME = 'target_network'
TAU = 0.0001 # soft update / low pass filter
class QNetworks:
def __init__(self, num_of_actions, num_of_states, num_of_hidden_neurons=NUM_OF_HIDDEN_NEURONS, tau=TAU):
self.NUM_OF_ACTIONS = num_of_actions
self.NUM_OF_HIDDEN_NEURONS = num_of_hidden_neurons
self.NUM_OF_STATES = num_of_states
self.TAU = tau
self.online_net = self.init_model(QNETWORK_NAME)
self.target_net = self.init_model(QNETWORK_NAME)
def do_soft_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = self.TAU*weights[i] + (1.0-self.TAU)*target_weights[i]
self.target_net.set_weights(target_weights)
return
def do_hard_update(self):
weights = self.online_net.get_weights()
target_weights = self.target_net.get_weights()
for i in xrange(len(weights)):
target_weights[i] = weights[i]
self.target_net.set_weights(target_weights)
return
def get_weights(self):
# get weights of the online Q network
return self.online_net.get_weights()
def init_model(self, net_name):
model = Sequential()
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS, input_shape=(self.NUM_OF_STATES,)))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
|
model.add(Dense(self.NUM_OF_HIDDEN_NEURONS))
model.add(Activation('relu'))
model.add(Dense(self.NUM_OF_
|
ACTIONS))
model.add(Activation('linear'))
model.compile(loss='mse', optimizer='rmsprop')
filename = net_name+'/'+net_name
if os.path.isfile(filename+str(0)+'.txt'):
weights = model.get_weights()
for i in xrange(len(weights)):
loaded_weights = np.loadtxt(filename+str(i)+'.txt')
weights[i] = loaded_weights
model.set_weights(weights)
else:
print 'No model', filename, 'found. Creating a new model.'
return model
def save_models(self):
weights = self.online_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(QNETWORK_NAME+'/'+QNETWORK_NAME+str(i)+'.txt', weights[i])
weights = self.target_net.get_weights()
for i in xrange(len(weights)):
np.savetxt(TARGETNET_NAME+'/'+TARGETNET_NAME+str(i)+'.txt', weights[i])
print("Saved models to disk.")
|
kalahbrown/HueBigSQL
|
apps/jobbrowser/src/jobbrowser/models.py
|
Python
|
apache-2.0
| 22,026
| 0.009262
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import lxml.html
import re
import urllib2
from urlparse import urlparse, urlunparse
from django.core.urlresolvers import reverse
from desktop.lib.view_util import format_duration_in_millis
from desktop.lib import i18n
from django.utils.html import escape
from filebrowser.views import location_to_url
from hadoop import job_tracker
from hadoop import confparse
from hadoop.api.jobtracker.ttypes import JobNotFoundException
import hadoop.api.jobtracker.ttypes as ttypes
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
LOGGER = logging.getLogger(__name__)
def can_view_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-view-job', '')
return acl == '*' or username in acl.split(',')
def can_modify_job(username, job):
acl = get_acls(job).get('mapreduce.job.acl-modify-job', '')
return acl == '*' or username in acl.split(',')
def get_acls(job):
if job.is_mr2:
return job.acls
else:
return job.full_job_conf
class JobLinkage(object):
"""
A thin representation of a job, without much of the details.
Its purpose is to wrap a JobID to allow us to get further
information from Hadoop, without instantiating a full Job object
(which requires talking to Hadoop).
"""
def __init__(self, jobtracker, jobid):
"""
JobLinkage(jobtracker, jobid) -> JobLinkage
The jobid is the jobid string (not the thrift jobid)
"""
self._jobtracker = jobtracker
self.jobId = jobid
self.jobId_short = "_".join(jobid.split("_")[-2:])
self.is_mr2 = False
def get_task(self, task_id):
"""Retrieve a TaskInProgress from hadoop."""
ttask = self._jobtracker.get_task(
self._jobtracker.thriftjobid_from_string(self.jobId),
self._jobtracker.thrifttaskid_from_string(task_id))
return Task(ttask, self._jobtracker)
class Job(JobLinkage):
"""
Creates a Job instance pulled from the job tracker Thrift interface.
"""
def __getitem__(self, item):
"""
For backwards-compatibility, resolve job["foo"] as job.foo
"""
return getattr(self, item)
@staticmethod
def from_id(jt, jobid, is_finished=False):
"""
Returns a Job instance given a job tracker interface and an id. The job tracker interface is typically
located in request.jt.
"""
try:
thriftjob = jt.get_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException:
try:
thriftjob = jt.get_retired_job(jt.thriftjobid_from_string(jobid))
except JobNotFoundException, e:
raise PopupException(_("Could not find job with id %(jobid)s.") % {'jobid': jobid}, detail=e)
return Job(jt, thriftjob)
@staticmethod
def from_thriftjob(jt, thriftjob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that job tracker interface.
The job tracker interface is typically located in request.jt
"""
return Job(jt, thriftjob)
def __init__(self, jt, thriftJob):
"""
Returns a Job instance given a job tracker interface and a thriftjob object returned from that
job tracker interface. The job tracker interface is typically located in request.jt
"""
JobLinkage.__init__(self, jt, thriftJob.jobID.asString)
self.jt = jt
self.job = thriftJob
self.tasks = []
if self.job.tasks is not None:
self.tasks = TaskList.from_thriftTaskList(self.job.tasks, jt)
self.task_map = dict( (task.taskId, task) for task in self.tasks )
self._counters = None
self._conf_keys = None
self._full_job_conf = None
self._init_attributes()
self.is_retired = hasattr(thriftJob, 'is_retired')
self.is_mr2 = False
self.applicationType = 'MR2'
@property
def counters(self):
if self.is_retired:
self._counters = {}
elif self._counters is None:
rollups = self.jt.get_job_counter_rollups(self.job.jobID)
# We get back a structure with counter lists for maps, reduces, and total
# and we need to invert this
def aggregate_counters(ctrs_from_jt, key, target):
for group in ctrs_from_jt.groups:
if group.name not in target:
target[group.name] = {
'name': group.name,
'displayName': group.displayName,
'counters': {}
}
agg_counters = target[group.name]['counters']
for counter in group.counters.itervalues():
if counter.name not in agg_counters:
agg_counters[counter.name] = {
'name': counter.name,
'displayName': counter.displayName,
}
agg_counters[counter.name][key] = counter.value
self._counters = {}
aggregate_counters(rollups.mapCounters, "map", self._counters)
aggregate_counters(rollups.reduceCounters, "reduce", self._counters)
aggregate_counters(rollups.jobCounters, "total", self._counters)
return self._counters
@property
def conf_keys(self):
if self._conf_keys is None:
self._initialize_conf_keys()
return self._conf_keys
@property
def full_job_conf(self):
if self._full_job_conf is None:
self._initialize_conf_keys()
return self._full_job_conf
def _init_attributes(self):
self.queueName = i18n.smart_unicode(self.job.profile.queueName)
self.jobName = i18n.smart_unicode(self.job.profile.name)
self.user = i18n.smart_unicode(self.job.profile.user)
self.mapProgress = self.job.status.mapProgress
self.reduceProgress = self.job.status.reduceProgress
self.setupProgress = self.job
|
.status.setupProgress
self.cleanupProgress = self.job.status.cleanupProgress
if self.job.desiredMaps == 0:
maps_percent_complete = 0
else:
maps_percent_complete = int(round(float(self.job.finished
|
Maps) / self.job.desiredMaps * 100))
self.desiredMaps = self.job.desiredMaps
if self.job.desiredReduces == 0:
reduces_percent_complete = 0
else:
reduces_percent_complete = int(round(float(self.job.finishedReduces) / self.job.desiredReduces * 100))
self.desiredReduces = self.job.desiredReduces
self.maps_percent_complete = maps_percent_complete
self.finishedMaps = self.job.finishedMaps
self.finishedReduces = self.job.finishedReduces
self.reduces_percent_complete = reduces_percent_complete
self.startTimeMs = self.job.startTime
self.startTimeFormatted = format_unixtime_ms(self.job.startTime)
self.launchTimeMs = self.job.launchTime
self.launchTimeFormatted = format_unixtime_ms(self.job.launchTime)
self.finishTimeMs = self.job.finishTime
self.finishTimeFormatted = format_unixtime_ms(self.job.finishTime)
self.status = self.job.status.runStateAsString
self.priority = self.job.priorityAsString
self.jobFile = self.job.profile.jobFile
finishTime = self.job.finishTime
if finishTime == 0:
finishTime = datetime.datetime.now()
else:
finishTime = datetime.datetime.fromtimestamp(finishTime / 1000)
self.duration = finishTime - datetime.datetime.fromtimestamp(self.job.startTime / 1000)
diff = int(finishTime.strftime("%s")) * 1000 - self.startTimeMs
self.durationFormatted = format_duration_in_millis(diff)
self.durationInMillis = diff
def kill(self):
self.jt.kill_job(
|
soumyanishan/azure-linux-extensions
|
VMBackup/main/fsfreezer.py
|
Python
|
apache-2.0
| 8,407
| 0.00904
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from mounts import Mounts
import datetime
import threading
import os
import time
import sys
import signal
import traceback
import threading
def thread_for_binary(self,args):
self.logger.log("Thread for binary is called",True)
time.sleep(5)
self.logger.log("Waited in thread for 5 seconds",True)
self.child = subprocess.Popen(args,stdout=subprocess.PIPE)
self.logger.log("Binary subprocess Created",True)
class FreezeError(object):
def __init__(self):
self.errorcode = None
self.fstype = None
self.path = None
def __str__(self):
return "errorcode:" + str(self.errorcode) + " fstype:" + str(self.fstype) + " path" + str(self.path)
class FreezeResult(object):
def __init__(self):
self.errors = []
def __str__(self):
error_str = ""
for error in self.errors:
error_str+=(str(error)) + "\n"
return error_str
class FreezeHandler(object):
def __init__(self,logger):
# sig_handle valid values(0:nothing done,1: freezed successfully, 2:freeze failed)
self.sig_handle = 0
self.child= None
self.logger=logger
def sigusr1_handler(self,signal,frame):
self.logger.log('freezed',False)
self.sig_handle=1
def sigchld_handler(self,signal,frame):
self.logger.log('some child process terminated')
if(self.child is not None and self.child.poll() is not None):
self.logger.log("binary child terminated",True)
self.sig_handle=2
def startproc(self,args):
binary_thread = threading.Thread(target=thread_for_binary, args=[self, args])
binary_thread.start()
for i in range(0,33):
if(self.sig_handle==0):
self.logger.log("inside while with sig_handle "+str(self.sig_handle))
time.sleep(2)
else:
break
self.logger.log("Binary output for signal handled: "+str(self.sig_handle))
return self.sig_handle
def signal_receiver(self):
signal.signal(signal.SIGUSR1,self.sigusr1_handler)
signal.signal(signal.SIGCHLD,self.sigchld_handler)
class FsFreezer:
def __init__(self, patching, logger):
"""
"""
self.patching = patching
self.logger = logger
try:
self.mounts = Mounts(patching = self.patching, logger = self.logger)
except Exception as e:
errMsg='Failed to retrieve mount points, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
self.logger.log(errMsg,True,'Warning')
self.logger.log(str(e), True)
self.mounts = None
self.frozen_items = set()
self.unfrozen_items = set()
self.freeze_handler = FreezeHandler(self.logger)
def should_skip(self, mount):
if((mount.fstype == 'ext3' or mount.fstype == 'ext4' or mount.fstype == 'xfs' or mount.fstype == 'btrfs') and mount.type != 'loop'):
return False
else:
return True
def freeze_safe(self,timeout):
self.root_seen = False
error_msg=''
try:
freeze_result = FreezeResult()
freezebin=os.path.join(os.getcwd(),os.path.dirname(__file__),"safefreeze/bin/safefreeze")
args=[freezebin,str(timeout)]
arg=[]
for mount in self.mounts.mounts:
self.logger.log("fsfreeze mount :" + str(mount.mount_point), True)
if(mount.mount_point == '/'):
self.root_seen = True
self.root_mount = mount
elif(mount.mount_point and not self.should_skip(mount)):
args.append(str(mount.mount_point))
if(self.root_seen):
args.append('/')
self.logger.log("arg : " + str(args),True)
self.freeze_handler.signal_receiver()
self.logger.log("proceeded for accepting signals", True)
self.logger.enforce_local_flag(False)
sig_handle=self.freeze_handler.startproc(args)
if(sig_handle != 1):
if (self.freeze_handler.child is not None):
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line,encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
error_msg="freeze failed for some mount"
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
except Exception as e:
self.logger.enforce_local_flag(True)
error_msg='freeze failed for some mount with exception, Exception %s, stack trace: %s' % (str(e), traceback.format_exc())
freeze_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
return freeze_result
def thaw_safe(self):
thaw_result = FreezeResult()
unable_to_sleep = False
if(self.freeze_handler.child is None):
self.logger.log("child already completed", True)
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
elif(self.freeze_handler.child.poll() is None):
self.logger.log("child process still running")
self.freeze_handler.child.send_signal(signal.SIGUSR1)
for i in range(0,30):
if(self.freeze_handler.child.poll() is None):
self.logger.log("child still running sigusr1 sent")
time.sleep(1)
else:
break
self.logger.enforce_local_flag(True)
self.logger.log("Binary output after process end: ", True)
while True:
line=self.freeze_handler.child.stdout.readline()
if sys
|
.version_info > (3,):
|
line = str(line, encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
self.logger.log(line.rstrip(), True)
else:
break
if(self.freeze_handler.child.returncode!=0):
error_msg = 'snapshot result inconsistent as child returns with failure'
thaw_result.errors.append(error_msg)
self.logger.log(error_msg, True, 'Error')
else:
self.logger.log("Binary output after process end when no thaw sent: ", True)
if(self.freeze_handler.child.returncode==2):
error_msg = 'Unable to execute sleep'
thaw_result.errors.append(error_msg)
unable_to_sleep = True
else:
error_msg = 'snapshot result inconsistent'
thaw_result.errors.append(error_msg)
self.logger.enforce_local_flag(True)
while True:
line=self.freeze_handler.child.stdout.readline()
if sys.version_info > (3,):
line = str(line, encoding='utf-8', errors="backslashreplace")
else:
line = str(line)
if(line != ''):
sel
|
gypogypo/plugin.video.sneek
|
_Edit.py
|
Python
|
gpl-3.0
| 121
| 0.008264
|
import xbmcaddon
MainBase
|
= 'http://164.132.106.213/da
|
ta/home/home.txt'
addon = xbmcaddon.Addon('plugin.video.sneek')
|
spellrun/Neural-Photo-Editor
|
gan/models/ian_simple.py
|
Python
|
mit
| 8,119
| 0.045695
|
### Simple IAN model for use with Neural Photo Editor
# This model is a simplified version of the Introspective Adversarial Network that does not
# make use of Multiscale Dilated Convolutional blocks, Ternary Adversarial Loss, or an
# autoregressive RGB-Beta layer. It's designed to be sleeker and to run on laptop GPUs with <1GB of memory.
from math import sqrt
import os
import sys
import numpy as np
import lasagne.layers
from lasagne.layers import batch_norm as BN
from lasagne.layers import ConcatLayer as CL
from lasagne.layers import DenseLayer as DL
from lasagne.layers import ElemwiseSumLayer as ESL
from lasagne.layers import NonlinearityLayer as NL
from lasagne.layers import SliceLayer as SL
from lasagne.layers import TransposedConv2DLayer as TC2D
from lasagne.init import Normal as initmethod
from lasagne.nonlinearities import elu
from lasagne.nonlinearities import rectify as relu
from lasagne.nonlinearities import LeakyRectify as lrelu
from theano import tensor as T
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from gan.util.layers import GaussianSampleLayer,MinibatchLayer
CFG = {
'batch_size': 128,
'learning_rate': {
0: 0.0002,
},
'optimizer': 'Adam',
'beta1': 0.5,
'update_ratio': 1,
'decay_rate': 0,
'reg': 1e-5,
'momentum': 0.9,
'shuffle': True,
'dims': (64,64),
'n_channels': 3,
'batches_per_chunk': 64,
'max_epochs': 250,
'checkpoint_every_nth': 1,
'num_latents': 100,
'recon_weight': 3.0,
'feature_weight': 1.0,
'dg_weight': 1.0,
'dd_weight': 1.0,
'agr_weight': 1.0,
'ags_weight': 1.0,
'n_shuffles': 1,
'ortho': 1e-3,
}
def get_model(interp=False, dnn=True):
if dnn:
import lasagne.layers.dnn
from lasagne.layers.dnn import Conv2DDNNLayer as C2D
from theano.sandbox.cuda.basic_ops import (
as_cuda_ndarray_variable,
host_from_gpu,
gpu_contiguous,
HostFromGpu,
gpu_alloc_empty,
)
from theano.sandbox.cuda.dnn import (
GpuDnnConvDesc,
GpuDnnConv,
GpuDnnConvGradI,
dnn_conv,
dnn_pool,
)
from gan.util.layers import DeconvLayer
else:
from lasagne.layers import Conv2DLayer as C2D
dims, n_channels = tuple(CFG['dims']), CFG['n_channels']
shape = (None, n_channels)+dims
l_in = lasagne.layers.InputLayer(shape=shape)
l_enc_conv1 = C2D(
incoming = l_in,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv1'
)
l_enc_conv2 = BN(C2D(
incoming = l_enc_conv1,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv2'
),name = 'bnorm2')
l_enc_conv3 = BN(C2D(
incoming = l_enc_conv2,
num_filters = 512,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv3'
),name = 'bnorm3')
l_enc_conv4 = BN(C2D(
incoming = l_enc_conv3,
num_filters = 1024,
filter_size = [5,5],
stride = [2,2],
pad = (2,2),
W = initmethod(0.02),
nonlinearity = lrelu(0.2),
flip_filters=False,
name = 'enc_conv4'
),name = 'bnorm4')
l_enc_fc1 = BN(DL(
incoming = l_enc_conv4,
num_units = 1000,
W = initmethod(0.02),
nonlinearity = elu,
name = 'enc_fc1'
),
name = 'bnorm_enc_fc1')
l_enc_mu,l_enc_logsigma = [BN(DL(incoming = l_enc_fc1,num_units=CFG['num_latents'],nonlinearity = None,name='enc_mu'),name='mu_bnorm'),
BN(DL(incoming = l_enc_fc1,num_units=CFG['num_latents'],nonlinearity = None,name='enc_logsigma'),name='ls_bnorm')]
l_Z = GaussianSampleLayer(l_enc_mu, l_enc_logsigma, name='l_Z')
l_dec_fc2 = BN(DL(
incoming = l_Z,
num_units = 1024*16,
nonlinearity = relu,
W=initmethod(0.02),
name='l_dec_fc2'),
name = 'bnorm_dec_fc2')
l_unflatten = lasagne.layers.ReshapeLayer(
incoming = l_dec_fc2,
shape = ([0],1024,4,4),
)
if dnn:
l_dec_conv1 = BN(DeconvLayer(
incoming = l_unflatten,
num_filters = 512,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv1'
),name = 'bnorm_dc1')
l_dec_conv2 = BN(DeconvLayer(
incoming = l_dec_conv1,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv2'
),name = 'bnorm_dc2')
l_dec_conv3 = BN(DeconvLayer(
incoming = l_dec_conv2,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv3'
),name = 'bnorm_dc3')
l_out = DeconvLayer(
incoming = l_dec_conv3,
num_filters = 3,
filter_size = [5,5],
stride = [2,2],
crop = (2,2),
W = initmethod(0.02),
b = None,
nonlinearity = lasagne.nonlinearities.tanh,
name = 'dec_out'
)
else:
l_dec_conv1 = SL(SL(BN(TC2D(
incoming = l_unflatten,
num_filters = 512,
|
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv1'
),name = 'bnorm_dc1'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
l_dec_conv2 = SL(SL(BN(TC2D(
incoming = l_dec_conv1,
num_filters = 256,
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
|
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv2'
),name = 'bnorm_dc2'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
l_dec_conv3 = SL(SL(BN(TC2D(
incoming = l_dec_conv2,
num_filters = 128,
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
W = initmethod(0.02),
nonlinearity = relu,
name = 'dec_conv3'
),name = 'bnorm_dc3'),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
l_out = SL(SL(TC2D(
incoming = l_dec_conv3,
num_filters = 3,
filter_size = [5,5],
stride = [2,2],
crop = (1,1),
W = initmethod(0.02),
b = None,
nonlinearity = lasagne.nonlinearities.tanh,
name = 'dec_out'
),indices=slice(1,None),axis=2),indices=slice(1,None),axis=3)
# l_in,num_filters=1,filter_size=[5,5],stride=[2,2],crop=[1,1],W=dc.W,b=None,nonlinearity=None)
minibatch_discrim = MinibatchLayer(lasagne.layers.GlobalPoolLayer(l_enc_conv4), num_kernels=500,name='minibatch_discrim')
l_discrim = DL(incoming = minibatch_discrim,
num_units = 1,
nonlinearity = lasagne.nonlinearities.sigmoid,
b = None,
W=initmethod(),
name = 'discrimi')
return {'l_in': l_in,
'l_out': l_out,
'l_mu': l_enc_mu,
'l_ls': l_enc_logsigma,
'l_Z': l_Z,
'l_introspect': [l_enc_conv1, l_enc_conv2,l_enc_conv3,l_enc_conv4],
'l_discrim': l_discrim}
|
google/citest
|
tests/json_predicate/map_predicate_test.py
|
Python
|
apache-2.0
| 7,327
| 0.003276
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
import unittest
from citest.base import (
ExecutionContext,
JsonSnapshotHelper)
from citest.json_predicate.path_predicate_helpers import PathEqPredicate
import citest.json_predicate as jp
_LETTER_DICT = {'a':'A', 'b':'B', 'z':'Z'}
_NUMBER_DICT = {'a':1, 'b':2, 'three':3}
_MIXED_DICT = {'a':'A', 'b':2, 'x':'X'}
_COMPOSITE_DICT = {'letters': _LETTER_DICT, 'numbers': _NUMBER_DICT}
_LETTER_ARRAY = ['a', 'b', 'c']
_NUMBER_ARRAY = [1, 2, 3]
_DICT_ARRAY = [{}, _LETTER_DICT, _NUMBER_DICT, _COMPOSITE_DICT]
_MULTI_ARRAY = [_LETTER_DICT, _NUMBER_DICT, _LETTER_DICT, _NUMBER_DICT]
class JsonMapPredicateTest(unittest.TestCase):
def assertEqual(self, expect, have, msg=''):
JsonSnapshotHelper.AssertExpectedValue(expect, have, msg)
def _try_map(self, context, pred, obj, expect_ok, expect_map_result=None,
dump=False, min=1):
"""Helper function for invoking finder and asserting the result.
Args:
pred: The jp.ValuePredicate to map.
obj: The object to apply the predicate to.
expect_ok: Whether we expect apply to succeed or not.
expect_map_result: If not None, then the expected
jp.MapPredicateResult from apply().
dump: If True then print the filter_result to facilitate debugging.
"""
map_result = jp.
|
MapPredicate(pred, min=min)(context, obj)
if dump:
print('MAP_RESULT:\n{0}\n'.format(
JsonSnapshotHelper.ValueToEncodedJson(map_result)))
if expect_map_result:
self.assertEqual(expect_map_result, map_result)
error_msg = '{expect_ok} != {
|
ok}\n{map_result}'.format(
expect_ok=expect_ok, ok=map_result.__nonzero__(),
map_result=map_result)
self.assertEqual(expect_ok, map_result.__nonzero__(), error_msg)
def test_map_predicate_good_1(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
aA_attempt = jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))
expect_result = jp.MapPredicateResult(
valid=True, pred=aA,
obj_list=[_LETTER_DICT], all_results=[aA_attempt.result],
good_map=[aA_attempt],
bad_map=[])
self._try_map(context, aA, _LETTER_DICT, True, expect_result)
def test_map_predicate_bad(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_NUMBER_DICT], all_results=[aA(context, _NUMBER_DICT)],
bad_map=[jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))],
good_map=[])
self._try_map(context, aA, _NUMBER_DICT, False, expect_result)
def test_map_predicate_good_and_bad_min_1(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
aa_number_attempt = jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))
aa_letter_attempt = jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))
expect_result = jp.MapPredicateResult(
valid=True, pred=aA,
obj_list=[_NUMBER_DICT, _LETTER_DICT],
all_results=[aa_number_attempt.result, aa_letter_attempt.result],
good_map=[aa_letter_attempt],
bad_map=[aa_number_attempt])
self._try_map(context, aA, [_NUMBER_DICT, _LETTER_DICT],
True, expect_result)
def test_map_predicate_good_and_bad_min_2(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_NUMBER_DICT, _LETTER_DICT],
all_results=[aA(context, _NUMBER_DICT), aA(context, _LETTER_DICT)],
good_map=[jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))],
bad_map=[jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))])
self._try_map(
context, aA, [_NUMBER_DICT, _LETTER_DICT], False, expect_result, min=2)
def test_map_predicate_good_and_bad_min_indirect(self):
context = ExecutionContext(min=2)
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_NUMBER_DICT, _LETTER_DICT],
all_results=[aA(context, _NUMBER_DICT), aA(context, _LETTER_DICT)],
good_map=[jp.ObjectResultMapAttempt(_LETTER_DICT,
aA(context, _LETTER_DICT))],
bad_map=[jp.ObjectResultMapAttempt(_NUMBER_DICT,
aA(context, _NUMBER_DICT))])
self._try_map(
context, aA, [_NUMBER_DICT, _LETTER_DICT], False, expect_result,
min=lambda x: x['min'])
def test_map_not_found(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
aa_composite_attempt = jp.ObjectResultMapAttempt(
_COMPOSITE_DICT, aA(context, _COMPOSITE_DICT))
expect_result = jp.MapPredicateResult(
valid=False, pred=aA,
obj_list=[_COMPOSITE_DICT], all_results=[aa_composite_attempt.result],
bad_map=[aa_composite_attempt],
good_map=[])
self._try_map(context, aA, _COMPOSITE_DICT, False, expect_result)
def test_object_filter_cases(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
self._try_map(context, aA, _LETTER_DICT, True)
self._try_map(context, aA, _COMPOSITE_DICT, False)
self._try_map(context, aA, _NUMBER_DICT, False)
self._try_map(context, aA, _MULTI_ARRAY, True)
self._try_map(context, aA, [_COMPOSITE_DICT, _COMPOSITE_DICT], False)
self._try_map(context, aA, _MIXED_DICT, True)
AandB = jp.AND([PathEqPredicate('a', 'A'),
PathEqPredicate('b', 'B')])
self._try_map(context, AandB, _LETTER_DICT, True)
self._try_map(context, AandB, _COMPOSITE_DICT, False)
self._try_map(context, AandB, _NUMBER_DICT, False)
self._try_map(context, AandB, _MULTI_ARRAY, True)
self._try_map(context, AandB, _MIXED_DICT, False)
def test_none_bad(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
self._try_map(context, aA, None, False)
def test_none_good(self):
context = ExecutionContext()
aA = jp.PathPredicate('a', jp.STR_EQ('A'))
self._try_map(context, aA, None, True, min=0)
if __name__ == '__main__':
unittest.main()
|
NinjaMSP/crossbar
|
crossbar/adapter/mqtt/_events.py
|
Python
|
agpl-3.0
| 21,416
| 0.000654
|
#####################################################################################
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Unless a separate license agreement exists between you and Crossbar.io GmbH (e.g.
# you have purchased a commercial license), the license terms below apply.
#
# Should you enter into a separate license agreement after having received a copy of
# this software, then the terms of such license agreement replace the terms below at
# the time at which such license agreement becomes effective.
#
# In case a separate license agreement ends, and such agreement ends without being
# replaced by another separate license agreement, the license terms below apply
# from the time at which said agreement ends.
#
# LICENSE TERMS
#
# This program is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License, version 3, as published by the
# Free Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU Affero General Public License Version 3 for more details.
#
# You should have received a copy of the GNU Affero General Public license along
# with this program. If not, see <http://www.gnu.org/licenses/agpl-3.0.en.html>.
#
#####################################################################################
from __future__ import absolute_import, division, print_function
import warnings
import attr
from attr.validators import instance_of, optional
from bitstring import pack
from ._utils import (read_prefixed_data, read_string, build_string,
build_header, ParseFailure, SerialisationFailure)
unicode = type(u"")
@attr.s
class Failure(object):
reason = attr.ib(default=None)
@attr.s
class Disconnect(object):
def serialise(self):
"""
Assemble this into an on-wire message.
"""
return build_header(14, (False, False, False, False), 0)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
return cls()
@attr.s
class PingRESP(object):
def serialise(self):
"""
Assemble this into an on-wire message.
"""
return build_header(13, (False, False, False, False), 0)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
return cls()
@attr.s
class PingREQ(object):
def serialise(self):
"""
Assemble this into an on-wire message.
"""
return build_header(12, (False, False, False, False), 0)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
return cls()
@attr.s
class UnsubACK(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(11, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Session identifier
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier=packet_identifier)
@attr.s
class Unsubscribe(object):
packet_identifier = attr.ib(validator=instance_of(int))
topics = attr.ib(validator=instance_of(list))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload
|
= self._make_payload()
header = build_header(10, (False, False, True, False), len(payload))
return header + payload
|
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
# Session identifier
b.append(pack('uint:16', self.packet_identifier).bytes)
for topic in self.topics:
if not isinstance(topic, unicode):
raise SerialisationFailure(self, "Topics must be Unicode")
b.append(build_string(topic))
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
if flags != (False, False, True, False):
raise ParseFailure(cls, "Bad flags")
topics = []
packet_identifier = data.read('uint:16')
while not data.bitpos == len(data):
topics.append(read_string(data))
if len(topics) == 0:
raise ParseFailure(cls, "Must contain a payload.")
return cls(packet_identifier=packet_identifier, topics=topics)
@attr.s
class PubCOMP(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(7, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier)
@attr.s
class PubREL(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(6, (False, False, True, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, True, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier)
@attr.s
class PubREC(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(5, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its constituent parts.
"""
b = []
b.append(pack('uint:16', self.packet_identifier).bytes)
return b"".join(b)
@classmethod
def deserialise(cls, flags, data):
"""
Disassemble from an on-wire message.
"""
if flags != (False, False, False, False):
raise ParseFailure(cls, "Bad flags")
packet_identifier = data.read('uint:16')
return cls(packet_identifier)
@attr.s
class PubACK(object):
packet_identifier = attr.ib(validator=instance_of(int))
def serialise(self):
"""
Assemble this into an on-wire message.
"""
payload = self._make_payload()
header = build_header(4, (False, False, False, False), len(payload))
return header + payload
def _make_payload(self):
"""
Build the payload from its con
|
vitan/openrave
|
python/databases/kinematicreachability.py
|
Python
|
lgpl-3.0
| 20,281
| 0.015926
|
# -*- coding: utf-8 -*-
# Copyright (C) 2009-2010 Rosen Diankov (rosen.diankov@gmail.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""6D kinematic reachability space of a robot's manipulators.
.. image:: ../../images/databases/kinematicreachability.jpg
:width: 640
.. image:: ../../images/databases/kinematicreachability_side.jpg
:width: 640
`[source] <../_modules/openravepy/databases/kinematicreachability.html>`_
**Running the Generator**
.. code-block:: bash
openrave.py --database kinematicreachability --robot=robots/barrettsegway.robot.xml
**Showing the Reachability** (uses mayavi2)
.. code-block:: bash
openrave.py --database kinematicreachability --robot=robots/barrettsegway.robot.xml --show
Description
-----------
This is the reachability when counting the total number of configurations possible at each pose.
Command-line
------------
.. shell-block:: openrave.py --database kinematicreachability --help
Class Definitions
-----------------
"""
from __future__ import with_statement # for python 2.5
__author__ = 'Rosen Diankov'
__copyright__ = 'Copyright (C) 2009-2010 Rosen Diankov (rosen.diankov@gmail.com)'
__license__ = 'Apache License, Version 2.0'
if not __openravepy_build_doc__:
from numpy import *
else:
from numpy import array
from ..openravepy_int import RaveFindDatabaseFile, IkParameterization, rotationMatrixFromQArray, poseFromMatrix
from ..openravepy_ext import transformPoints, quatArrayTDist
from .. import metaclass, pyANN
from ..misc import SpaceSamplerExtra
from . import DatabaseGenerator
from . import convexdecomposition, inversekinematics
import numpy
import time
import os.path
from os import makedirs
from heapq import nsmallest # for nth smallest element
from optparse import OptionParser
import logging
log = logging.getLogger('openravepy.'+__name__.split('.',2)[-1])
class ReachabilityModel(DatabaseGenerator):
"""Computes the robot manipulator's reachability space (stores it in 6D) and
offers several functions to use it effectively in planning."""
class QuaternionKDTree(metaclass.AutoReloader):
"""Artificially add more weight to the X,Y,Z translation dimensions"""
def __init__(self, poses,transmult):
self.numposes = len(poses)
self.transmult = transmult
self.itransmult = 1/transmult
searchposes = array(poses)
searchposes[:,4:] *= self.transmult # take translation errors more seriously
allposes = r_[searchposes,searchposes]
allposes[self.numposes:,0:4] *= -1
self.nnposes = pyANN.KDTree(allposes)
def kSearch(self,poses,k,eps):
"""returns distance squared"""
poses[:,4:] *= self.transmult
# neighs,dists = self.nnposes.kSearch(poses,k,eps)
neighs,dists = zip(*[self.nnposes.kSearch(pose,k,eps) f
|
or pose in poses])
neighs[neighs>=self.numposes] -= self.numposes
poses[:,4:] *= self.itransmult
return neighs,dists
def kFRSearch(self,pose,radiussq,k,eps):
"""returns distance squared"""
pose[4:] *= self.transmult
neighs,dists,kball = self
|
.nnposes.kFRSearch(pose,radiussq,k,eps)
neighs[neighs>=self.numposes] -= self.numposes
pose[4:] *= self.itransmult
return neighs,dists,kball
def kFRSearchArray(self,poses,radiussq,k,eps):
"""returns distance squared"""
poses[:,4:] *= self.transmult
neighs,dists,kball = self.nnposes.kFRSearchArray(poses,radiussq,k,eps)
neighs[neighs>=self.numposes] -= self.numposes
poses[:,4:] *= self.itransmult
return neighs,dists,kball
xyzdelta = None # the sampling discretization of the XYZ space
reachabilitystats = None # Nx8 array of all the poses that are reachable. The first 7 columns are the quaternion and translation, the last column is the number of IK solutions present
reachability3d = None # a KxKxK voxelized map that repsents the density of solutions for each XYZ point. The higher the density, the more rotations the arm can be solved for. Use xyzdelta to from 3D point to voxel index.
def __init__(self,robot):
DatabaseGenerator.__init__(self,robot=robot)
self.ikmodel = inversekinematics.InverseKinematicsModel(robot=robot,iktype=IkParameterization.Type.Transform6D)
self.reachabilitystats = None
self.reachability3d = None
self.reachabilitydensity3d = None
self.pointscale = None
self.xyzdelta = None
self.quatdelta = None
self.kdtree6d = None
self.kdtree3d = None
def clone(self,envother):
clone = DatabaseGenerator.clone(self,envother)
return clone
def has(self):
return len(self.reachabilitydensity3d) > 0 and len(self.reachability3d) > 0 and len(self.reachabilitystats) > 0
def getversion(self):
return 5
def save(self):
try:
self.SaveHDF5()
except ImportError:
log.warn('python h5py library not found, will not be able to speedup database access')
self.SavePickle()
def load(self):
try:
if not self.ikmodel.load():
self.ikmodel.autogenerate()
try:
return self.LoadHDF5()
except ImportError:
log.warn('python h5py library not found, will not be able to speedup database access')
return self.LoadPickle()
except Exception, e:
log.warn(e)
return False
def SavePickle(self):
DatabaseGenerator.save(self,(self.reachabilitystats,self.reachabilitydensity3d,self.reachability3d, self.pointscale,self.xyzdelta,self.quatdelta))
def LoadPickle(self):
params = DatabaseGenerator.load(self)
if params is None:
return False
self.reachabilitystats,self.reachabilitydensity3d,self.reachability3d,self.pointscale,self.xyzdelta,self.quatdelta = params
return self.has()
def SaveHDF5(self):
import h5py
filename=self.getfilename(False)
log.info(u'saving model to %s',filename)
try:
makedirs(os.path.split(filename)[0])
except OSError:
pass
f=h5py.File(filename,'w')
try:
f['version'] = self.getversion()
f['reachabilitystats'] = self.reachabilitystats
f['reachabilitydensity3d'] = self.reachabilitydensity3d
f['reachability3d'] = self.reachability3d
f['pointscale'] = self.pointscale
f['xyzdelta'] = self.xyzdelta
f['quatdelta'] = self.quatdelta
finally:
f.close()
def LoadHDF5(self):
import h5py
filename = self.getfilename(True)
if len(filename) == 0:
return False
self._CloseDatabase()
try:
f=h5py.File(filename,'r')
if f['version'].value != self.getversion():
log.error('version is wrong %s!=%s ',f['version'],self.getversion())
return False
self.reachabilitystats = f['reachabilitystats']
self.reachabilitydensity3d = f['reachabilitydensity3d']
self.reachability3d = f['reachability3d']
self.pointscale = f['pointscale'].value
self.xyzdelta = f['xyzdelta'].value
self.quatdelta = f['quatdelta'].value
self._databasefile = f
f = None
return self.has()
except Exception,e:
log.debug('LoadHDF5 for %s: ',filename,
|
carthach/essentia
|
test/src/unittests/standard/test_idct.py
|
Python
|
agpl-3.0
| 2,364
| 0.015651
|
#!/usr/bin/env python
# Copyright (C) 2006-2016 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this
|
program. If not, see http://www.gnu.org/licenses/
from essentia_test import *
class TestIDCT(TestC
|
ase):
def testInvalidParam(self):
self.assertConfigureFails(IDCT(), { 'inputSize': 0, 'outputSize': 2 })
self.assertConfigureFails(IDCT(), { 'inputSize': 6, 'outputSize': 0 })
def testRegression(self):
# values from Matlab/Octave
inputArray = [ 0.89442718, -0.60150099, -0.12078822, -0.37174806, 0.82789522]
expected = [ 0, 0, 1, 0, 1 ]
self.assertAlmostEqualVector(IDCT(outputSize=len(expected), inputSize = len(inputArray))(inputArray), expected, 1e-6)
def testLifteringRegression(self):
# DCT III and Liftening computed using PLP and RASTA matlab toolbox.
# A big tolerance is necessary due to the smoothing caused by the smaller amount of bins in the DCT domain.
inputArray = [ 1.89736652, 0.95370573, 3.39358997, -3.35009956]
expected = [1, 1, 0, 0, 1]
self.assertAlmostEqualVector(IDCT(inputSize=len(inputArray),
outputSize=len(expected),
dctType = 3,
liftering = 22)(inputArray), expected, 1e0)
def testZero(self):
self.assertEqualVector(IDCT(outputSize=10)(zeros(5)), zeros(10))
def testInvalidInput(self):
self.assertComputeFails(IDCT(), []) # = testEmpty
self.assertComputeFails(IDCT(outputSize = 2, inputSize = 1), [ 0, 2, 4 ])
suite = allTests(TestIDCT)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite)
|
Kopachris/py-id003
|
protocol_analyzer.py
|
Python
|
bsd-3-clause
| 17,902
| 0.005307
|
#!/usr/bin/env python3
import os
import sys
src_dir = os.path.abspath('src/')
sys.path.append(src_dir)
sys.ps1 = ''
sys.ps2 = ''
import id003
import termutils as t
import time
import logging
import configparser
import threading
import serial.tools.list_ports
from serial.serialutil import SerialException
from collections import OrderedDict
X_SIZE, Y_SIZE = t.get_size()
CONFIG_FILE = 'bv.ini'
CONFIG = configparser.ConfigParser()
CONFIG.read(CONFIG_FILE)
def get_denoms():
denom = 0
for k in CONFIG['bv.denom_inhibit']:
if CONFIG['bv.denom_inhibit'].getboolean(k):
denom |= id003.DENOMS[k]
return [denom, 0]
def get_security():
sec = 0
for k in CONFIG['bv.security']:
if CONFIG['bv.security'].getboolean(k):
sec |= id003.DENOMS[k]
return [sec, 0]
def get_directions():
dir = 0
for k in CONFIG['bv.direction']:
if CONFIG['bv.direction'].getboolean(k):
dir |= id003.DIRECTIONS[k]
return [dir]
def get_optional():
opt = 0
for k in CONFIG['bv.optional']:
if CONFIG['bv.optional'].getboolean(k):
opt |= id003.OPTIONS[k]
return [opt, 0]
def kb_loop(bv, stdout_lock, bv_lock):
global CONFIG
print("Press Q at any time to quit, or H for help")
while True:
with stdout_lock:
opt = t.get_key(0.1)
if opt is not None:
opt = opt.lower()
if opt == b'q':
bv.bv_on = False
with open(CONFIG_FILE, 'w') as f:
CONFIG.write(f)
return
elif opt == b'h':
print("Q - Quit\n" "H - Help\n" "S - Settings menu\n"
"R - Reset and initialize bill validator\n"
"P - Pause bill validator\n" "M - Stop polling "
"and return to main menu")
elif opt == b'm':
return
elif opt == b's':
with stdout_lock:
logging.debug("Entered settings menu from status poll")
settings()
logging.debug("Exited settings menu")
bv.bv_status = None # print current status after returning
t.wipe()
elif opt == b'r':
with bv_lock:
logging.debug("Sending reset command")
status = None
while status != id003.ACK:
bv.send_command(id003.RESET)
status, data = bv.read_response()
time.sleep(0.2)
logging.debug("Received ACK")
if bv.req_status()[0] == id003.INITIALIZE:
denom = get_denoms()
sec = get_security()
dir = get_directions()
opt = get_optional()
logging.info("Initializing bill validator")
bv.initialize(denom, sec, dir, opt)
bv.bv_status = None
elif opt == b'p':
print("Not implemented yet")
def poll_loop(bv, stdout_lock, bv_lock, interval=0.2):
denom = get_denoms()
sec = get_security()
dir = get_directions()
opt = get_optional()
print("Please connect bill validator.")
bv.power_on(denom, sec, dir, opt)
if bv.init_status == id003.POW_UP:
logging.info("BV powered up normally.")
elif bv.init_status == id003.POW_UP_BIA:
logging.info("BV powered up with bill in acceptor.")
elif bv.init_status == id003.POW_UP_BIS:
logging.info("BV powered up with bill in stacker.")
while True:
poll_start = time.time()
if not bv.bv_on:
return
with bv_lock:
status, data = bv.req_status()
if (status, data) != bv.bv_status and status in bv.bv_events:
if stdout_lock.acquire(timeout=0.5):
bv.bv_events[status](data)
stdout_lock.release()
bv.bv_status = (status, data)
wait = interval - (time.time() - poll_start)
if wait > 0.0:
time.sleep(wait)
def display_header(text):
t.set_pos(0, 0)
print(text.center(X_SIZE), end='')
print('=' * X_SIZE, end='')
def display_menu(menu, prompt='>>>', header='', info=''):
if len(menu) > Y_SIZE - 5:
raise ValueError("Too many menu options")
# print the header
t.wipe()
display_header(header)
# print the menu items
for k, v in menu.items():
print("{}) {}".format(k, v))
# print prompt and info
print(prompt, end=' ')
x, y = t.get_pos()
print('\n\n' + info)
t.set_pos(x, y)
# get user's choice
k = None
while k not in menu:
k = input('')
t.set_pos(x, y)
print(' ' * (X_SIZE - x), end='')
t.set_pos(x, y)
return k
def settings():
global CONFIG
t.wipe()
settings_menu = OrderedDict()
settings_menu['e'] = "Denomination enable/inhibit"
settings_menu['s'] = "Denomination security"
settings_menu['d'] = "Direction enable/inhibit"
settings_menu['o'] = "Optional functions"
settings_menu['b'] = "Bar code ticket options"
settings_menu['q'] = "Back"
choice = display_menu(settings_menu, '>>>', "Settings",
"Changes will take effect next time bill validator is initialized")
if choice == 'e':
denom_settings()
elif choice == 's':
security_settings()
elif choice == 'd':
direction_settings()
elif choice == 'o':
opt_settings()
elif choice == 'b':
t.wipe()
print("Barcode settings not available.")
input("Press enter to go back")
return
def opt_settings():
global CONFIG
t.wipe()
display_header("Optional function settings")
opts = dict()
set_opts = OrderedDict()
opt_txt = {
'power_recovery': "Power recovery:\t\t\t\t",
'auto_retry': "Auto-retry operaton:\t\t\t",
'24_char_barcode': "Accept 24-character barcodes:\t\t",
'near_full': "Stacker nearly full event:\t\t",
'entrance_event': "Entrance sensor event:\t\t\t",
'encryption': "Encryption:\t\t\t\t",
}
for i, k in enumerate(CONFIG['bv.optional'].keys()):
opt_enabled = CONFIG['bv.optional'].getboolean(k)
opts[i] = k
set_opts[k] = opt_enabled
print(opt_txt[k], end='')
start_x, start_y = t.get_pos()
if opt_enabled:
print('X')
else:
print('_')
print("\n\n_ = disabled, X = enabled")
print("\nPress Enter to save and go back, or Esc to go back without saving")
t.set_pos(start_x, 3)
max_opt = len(CONFIG['bv.optional']) - 1
cur_opt = 0
while True:
x, y = t.get_pos()
c = t.getch()
if c == b'\xe0H' and cur_opt > 0:
# up
t.set_pos(x, y-1)
cur_opt -= 1
elif c == b'\xe0P' and cur_opt < max_opt:
# down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'\t' and cur_opt == max_opt:
# wrap around to first option
t.set_pos(x, 3)
cur_opt = 0
elif c == b'\t':
# next option, same as down
t.set_pos(x, y+1)
cur_opt += 1
elif c == b'X' or c == b'x':
set_opts[opts[cur_opt]] = True
print('X', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b' ':
set_opts[opts[cur_opt]] = False
print('_', end='')
if cur_opt < max_opt:
t.set_pos(x, y+1)
|
cur_opt += 1
else:
t.set_pos(x, y)
elif c == b'\r':
# save and go back
CONFIG['bv.optional'] = set_opts
return
elif c == b'\x1b':
# escape, go back witho
|
ut saving
return
def direction_settin
|
mryanlam/f5-ansible
|
library/bigip_user.py
|
Python
|
gpl-3.0
| 18,876
| 0.000371
|
#!/usr/bin/python
# -*- codin
|
g: utf-8 -*-
#
# Copyright 2017 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3
|
of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_user
short_description: Manage user accounts and user attributes on a BIG-IP.
description:
- Manage user accounts and user attributes on a BIG-IP.
version_added: "2.4"
options:
full_name:
description:
- Full name of the user.
username_credential:
description:
- Name of the user to create, remove or modify.
required: True
aliases:
- name
password_credential:
description:
- Set the users password to this unencrypted value.
C(password_credential) is required when creating a new account.
shell:
description:
- Optionally set the users shell.
choices:
- bash
- none
- tmsh
partition_access:
description:
- Specifies the administrative partition to which the user has access.
C(partition_access) is required when creating a new account.
Should be in the form "partition:role". Valid roles include
C(acceleration-policy-editor), C(admin), C(application-editor), C(auditor)
C(certificate-manager), C(guest), C(irule-manager), C(manager), C(no-access)
C(operator), C(resource-admin), C(user-manager), C(web-application-security-administrator),
and C(web-application-security-editor). Partition portion of tuple should
be an existing partition or the value 'all'.
state:
description:
- Whether the account should exist or not, taking action if the state is
different from what is stated.
default: present
choices:
- present
- absent
update_password:
description:
- C(always) will allow to update passwords if the user chooses to do so.
C(on_create) will only set the password for newly created users.
default: on_create
choices:
- always
- on_create
notes:
- Requires the requests Python package on the host. This is as easy as
pip install requests
- Requires BIG-IP versions >= 12.0.0
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = '''
- name: Add the user 'johnd' as an admin
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
username_credential: "johnd"
password_credential: "password"
full_name: "John Doe"
partition_access: "all:admin"
update_password: "on_create"
state: "present"
delegate_to: localhost
- name: Change the user "johnd's" role and shell
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
username_credential: "johnd"
partition_access: "NewPartition:manager"
shell: "tmsh"
state: "present"
delegate_to: localhost
- name: Make the user 'johnd' an admin and set to advanced shell
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "johnd"
partition_access: "all:admin"
shell: "bash"
state: "present"
delegate_to: localhost
- name: Remove the user 'johnd'
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
name: "johnd"
state: "absent"
delegate_to: localhost
- name: Update password
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
username_credential: "johnd"
password_credential: "newsupersecretpassword"
delegate_to: localhost
# Note that the second time this task runs, it would fail because
# The password has been changed. Therefore, it is recommended that
# you either,
#
# * Put this in its own playbook that you run when you need to
# * Put this task in a `block`
# * Include `ignore_errors` on this task
- name: Change the Admin password
bigip_user:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
username_credential: "admin"
password_credential: "NewSecretPassword"
delegate_to: localhost
'''
RETURN = '''
full_name:
description: Full name of the user
returned: changed and success
type: string
sample: "John Doe"
partition_access:
description:
- List of strings containing the user's roles and which partitions they
are applied to. They are specified in the form "partition:role".
returned: changed and success
type: list
sample: "['all:admin']"
shell:
description: The shell assigned to the user account
returned: changed and success
type: string
sample: "tmsh"
'''
from distutils.version import LooseVersion
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'partitionAccess': 'partition_access',
'description': 'full_name',
}
updatables = [
'partition_access', 'full_name', 'shell', 'password_credential'
]
returnables = [
'shell', 'partition_access', 'full_name', 'username_credential'
]
api_attributes = [
'shell', 'partitionAccess', 'description', 'name', 'password'
]
@property
def partition_access(self):
"""Partition access values will require some transformation.
This operates on both user and device returned values.
Check if the element is a string from user input in the format of
name:role, if it is split it and create dictionary out of it.
If the access value is a dictionary (returned from device,
or already processed) and contains nameReference
key, delete it and append the remaining dictionary element into
a list.
If the nameReference key is removed just append the dictionary
into the list.
:returns list of dictionaries
"""
if self._values['partition_access'] is None:
return
result = []
part_access = self._values['partition_access']
for access in part_access:
if isinstance(access, dict):
if 'nameReference' in access:
del access['nameReference']
result.append(access)
else:
result.append(access)
if isinstance(access, str):
acl = access.split(':')
if acl[0].lower() == 'all':
acl[0] = 'all-partitions'
value = dict(
name=acl[0],
role=acl[1]
)
result.append(value)
return result
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if api_attribute in self.api_map:
result[api_attribute] = getattr(
self, self.api_map[api_attribute])
elif api_attribute == 'password':
result[api_attribute] = self._values['password_credential']
else:
result[api_attribute] = getattr(self, api
|
ict-felix/stack
|
modules/resource/orchestrator/src/credentials/cred_util.py
|
Python
|
apache-2.0
| 16,348
| 0.00312
|
# ----------------------------------------------------------------------
# Copyright (c) 2010-2014 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
# ----------------------------------------------------------------------
from extensions.sfa.util.xrn import hrn_authfor_hrn
'''
Credential creation and verification utilities.
'''
import os
import sys
import datetime
import dateutil
from extensions.sfa.trust import credential as cred
from extensions.sfa.trust import gid
from extensions.sfa.trust import rights
from extensions.sfa.trust.certificate import Certificate
from extensions.sfa.trust.credential_factory import CredentialFactory
from extensions.sfa.trust.abac_credential import ABACCredential
from extensions.sfa.trust.speaksfor_util import determine_speaks_for_ex
def naiveUTC(dt):
"""Converts dt to a naive datetime in UTC.
if 'dt' has a timezone then
convert to UTC
strip off timezone (make it "naive" in Python parlance)
"""
if dt.tzinfo:
tz_utc = dateutil.tz.tzutc()
dt = dt.astimezone(tz_utc)
dt = dt.replace(tzinfo=None)
return dt
class CredentialVerifier(object):
"""Utilities to verify signed credentials from a given set of
root certificates. Will compare target and source URNs, and privileges.
See verify and verify_from_strings methods in particular."""
CATEDCERTSFNAME = 'CATedCACerts.pem'
# root_cert_fileordir is a trusted root cert file or directory of
# trusted roots for verifying credentials
def __init__(self, root_cert_fileordir):
if root_cert_fileordir is None:
raise Exception("Missing Root certs argument")
elif os.path.isdir(root_cert_fileordir):
files = os.listdir(root_cert_fileordir)
self.root_cert_files = []
for file in files:
# FIXME: exclude files that aren't cert files?
#print file == CredentialVerifier.CATEDCERTSFNAME
if file == CredentialVerifier.CATEDCERTSFNAME:
continue
self.root_cert_files.append(os.path.expanduser(os.path.join(root_cert_fileordir, file)))
|
#self.root_cert_files = [root_cert_fileordir]
else:
raise Exception("Couldn't find Root certs in %s" % root_cert_fileordir)
@classmethod
def getCAsFileFromDir(cls, caCerts):
'''Take a directory of CA certificates and concatenate them into a
|
single
file suitable for use by the Python SSL library to validate client
credentials. Existing file is replaced.'''
if caCerts is None:
raise Exception ('Missing caCerts argument')
if os.path.isfile(os.path.expanduser(caCerts)):
return caCerts
if not os.path.isdir(os.path.expanduser(caCerts)):
raise Exception ('caCerts arg Not a file or a dir: %s' % caCerts)
# Now we have a dir of caCerts files
# For each file in the dir (isfile), concatenate them into a new file
comboFullPath = os.path.join(caCerts, CredentialVerifier.CATEDCERTSFNAME)
caFiles = os.listdir(caCerts)
#logger.debug('Got %d potential caCert files in the dir', len(caFiles))
outfile = open(comboFullPath, "w")
okFileCount = 0
for filename in caFiles:
filepath = os.path.join(caCerts, filename)
# Confirm it's a CA file?
# if not file.endswith('.pem'):
# continue
if not os.path.isfile(os.path.expanduser(filepath)):
continue
if filename == CredentialVerifier.CATEDCERTSFNAME:
# logger.debug('Skipping previous cated certs file')
continue
okFileCount += 1
certfile = open(filepath)
for line in certfile:
outfile.write(line)
certfile.close()
outfile.close()
if okFileCount == 0:
sys.exit('Found NO trusted certs in %s!' % caCerts)
return comboFullPath
def verify_from_strings(self, gid_string, cred_strings, target_urn,
privileges, options=None):
'''Create Credential and GID objects from the given strings,
and then verify the GID has the right privileges according
to the given credentials on the given target.'''
def make_cred(cred_string):
credO = None
try:
credO = CredentialFactory.createCred(credString=cred_string)
except Exception, e:
print(e)
return credO
root_certs = \
[Certificate(filename=root_cert_file) \
for root_cert_file in self.root_cert_files]
caller_gid = gid.GID(string=gid_string)
# Potentially, change gid_string to be the cert of the actual user
# if this is a 'speaks-for' invocation
speaksfor_gid = \
determine_speaks_for_ex(None, \
cred_strings, # May include ABAC speaks_for credential
caller_gid, # Caller cert (may be the tool 'speaking for' user)
options, # May include 'geni_speaking_for' option with user URN
root_certs
)
if caller_gid.get_subject() != speaksfor_gid.get_subject():
# speaksfor_urn = speaksfor_gid.get_urn()
caller_gid = speaksfor_gid
# Remove the abac credentials
cred_strings = [cred_string for cred_string in cred_strings
if CredentialFactory.getType(cred_string) == cred.Credential.SFA_CREDENTIAL_TYPE]
return self.verify(caller_gid,
map(make_cred, cred_strings),
target_urn,
privileges)
def verify_source(self, source_gid, credential):
'''Ensure the credential is giving privileges to the caller/client.
Return True iff the given source (client) GID's URN
is == the given credential's Caller (Owner) URN'''
source_urn = source_gid.get_urn()
cred_source_urn = credential.get_gid_caller().get_urn()
#self.logger.debug('Verifying source %r against credential source %r (cred target %s)',
# source_urn, cred_source_urn, credential.get_gid_object().get_urn())
result = (cred_source_urn == source_urn)
if result:
# self.logger.debug('Source URNs match')
pass
return result
def verify_target(self, target_urn, credential):
'''Ensure the credential is giving privileges on the right subject/target.
Return True if no target is specified, or the target URN
matches the credential's Object's (target's) URN, else return False.
No target is required, for example, to ListResources.'''
if not target_urn:
# self.logger.debug('No target specified, considering it a match.')
return True
else:
cred_target_urn = credential.get_gid_object().get_urn()
# self.logger.debug('Verifying target %r against credential target %r',
#
|
sysbot/trigger
|
trigger/utils/cli.py
|
Python
|
bsd-3-clause
| 9,769
| 0.001843
|
#coding=utf-8
"""
Command-line interface utilities for Trigger tools. Intended for re-usable
pieces of code like user prompts, that don't fit in other utils modules.
"""
__author__ = 'Jathan McCollum'
__maintainer__ = 'Jathan McCollum'
__email__ = 'jathan.mccollum@teamaol.com'
__copyright__ = 'Copyright 2006-2012, AOL Inc.'
import datetime
from fcntl import ioctl
import os
import pwd
from pytz import timezone
import struct
import sys
import termios
import time
import tty
# Exports
__all__ = ('yesno', 'get_terminal_width', 'get_terminal_size', 'Whirlygig',
'NullDevice', 'print_severed_head', 'min_sec', 'pretty_time',
'proceed', 'get_user')
# Functions
def yesno(prompt, default=False, autoyes=False):
"""
Present a yes-or-no prompt, get input, and return a boolean.
The ``default`` argument is ignored if ``autoyes`` is set.
:param prompt:
Prompt text
:param default:
Yes if True; No if False
:param autoyes:
Automatically return True
Default behavior (hitting "enter" returns ``False``)::
>>> yesno('Blow up the moon?')
Blow up the moon? (y/N)
False
Reversed behavior (hitting "enter" returns ``True``)::
>>> yesno('Blow up the moon?', default=True)
Blow up the moon? (Y/n)
True
Automatically return ``True`` with ``autoyes``; no prompt is displayed::
>>> yesno('Blow up the moon?', autoyes=True)
True
"""
if autoyes:
return True
sys.stdout.write(prompt)
if default:
sys.stdout.write(' (Y/n) ')
else:
sys.stdout.write(' (y/N) ')
sys.stdout.flush()
fd = sys.stdin.fileno()
attr = termios.tcgetattr(fd)
try:
tty.setraw(fd)
yn = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSANOW, attr)
print ''
if yn in ('y', 'Y'):
return True
elif yn in ('n', 'N'):
return False
else:
return default
def proceed():
"""Present a proceed prompt. Return ``True`` if Y, else ``False``"""
return raw_input('\nDo you wish to proceed? [y/N] ').lower().startswith('y')
def get_terminal_width():
"""Find and return stdout's terminal width, if applicable."""
try:
width = struct.unpack("hhhh", ioctl(1, termios.TIOCGWINSZ, ' '*8))[1]
except IOError:
width = sys.maxint
return width
def get_terminal_size():
"""Find and return stdouts terminal size as (height, width)"""
rows, cols = os.popen('stty size', 'r').read().split()
return rows, cols
def get_user():
"""Return the name of the current user."""
|
return pwd.getpwuid(os.getuid())[0]
def print_severed_head():
"""
Prints a demon holding a severed head. Best used when things go wrong, like
production-impacting network outages caused by fat-fingered ACL changes.
Thanks to Jeff Sullivan for this best error message ever.
"""
print r"""
|
_( (~\
_ _ / ( \> > \
-/~/ / ~\ :; \ _ > /(~\/
|| | | /\ ;\ |l _____ |; ( \/ > >
_\\)\)\)/ ;;; `8o __-~ ~\ d| \ //
///(())(__/~;;\ "88p;. -. _\_;.oP (_._/ /
(((__ __ \\ \ `>,% (\ (\./)8" ;:' i
)))--`.'-- (( ;,8 \ ,;%%%: ./V^^^V' ;. ;.
((\ | /)) .,88 `: ..,,;;;;,-::::::'_::\ ||\ ;[8: ;
)| ~-~ |(|(888; ..``'::::8888oooooo. :\`^^^/,,~--._ |88:: |
|\ -===- /| \8;; ``:. oo.8888888888:`((( o.ooo8888Oo;:;:' |
|_~-___-~_| `-\. ` `o`88888888b` )) 888b88888P""' ;
; ~~~~;~~ "`--_`. b`888888888;(.,"888b888" ..::;-'
; ; ~"-.... b`8888888:::::.`8888. .:;;;''
; ; `:::. `:::OOO:::::::.`OO' ;;;''
: ; `. "``::::::'' .'
; `. \_ /
; ; +: ~~-- `:' -'; ACL LOADS FAILED
`: : .::/
; ;;+_ :::. :..;;; YOU LOSE
;;;;;;,;;;;;;;;,;
"""
def pretty_time(t):
"""
Print a pretty version of timestamp, including timezone info. Expects
the incoming datetime object to have proper tzinfo.
:param t:
A ``datetime.datetime`` object
>>> import datetime
>>> from pytz import timezone
>>> localzone = timezone('US/Eastern')
<DstTzInfo 'US/Eastern' EST-1 day, 19:00:00 STD>
>>> t = datetime.datetime.now(localzone)
>>> print t
2011-07-19 12:40:30.820920-04:00
>>> print pretty_time(t)
09:40 PDT
>>> t = datetime.datetime(2011,07,20,04,13,tzinfo=localzone)
>>> print t
2011-07-20 04:13:00-05:00
>>> print pretty_time(t)
tomorrow 02:13 PDT
"""
from trigger.conf import settings
localzone = timezone(os.environ.get('TZ', settings.BOUNCE_DEFAULT_TZ))
t = t.astimezone(localzone)
midnight = datetime.datetime.combine(datetime.datetime.now(), datetime.time(tzinfo=localzone))
midnight += datetime.timedelta(1)
if t < midnight:
return t.strftime('%H:%M %Z')
elif t < midnight + datetime.timedelta(1):
return t.strftime('tomorrow %H:%M %Z')
elif t < midnight + datetime.timedelta(6):
return t.strftime('%A %H:%M %Z')
else:
return t.strftime('%Y-%m-%d %H:%M %Z')
def min_sec(secs):
"""
Takes an epoch timestamp and returns string of minutes:seconds.
:param secs:
Timestamp (in seconds)
>>> import time
>>> start = time.time() # Wait a few seconds
>>> finish = time.time()
>>> min_sec(finish - start)
'0:11'
"""
secs = int(secs)
return '%d:%02d' % (secs / 60, secs % 60)
def setup_tty_for_pty(func):
"""
Sets up tty for raw mode while retaining original tty settings and then
starts the reactor to connect to the pty. Upon exiting pty, restores
original tty settings.
:param func:
The callable to run after the tty is ready, such as ``reactor.run``
"""
# Preserve original tty settings
stdin_fileno = sys.stdin.fileno()
old_ttyattr = tty.tcgetattr(stdin_fileno)
try:
# Enter raw mode on the local tty.
tty.setraw(stdin_fileno)
raw_ta = tty.tcgetattr(stdin_fileno)
raw_ta[tty.LFLAG] |= tty.ISIG
raw_ta[tty.OFLAG] |= tty.OPOST | tty.ONLCR
# Pass ^C through so we can abort traceroute, etc.
raw_ta[tty.CC][tty.VINTR] = '\x18' # ^X is the new ^C
# Ctrl-Z is used by a lot of vendors to exit config mode
raw_ta[tty.CC][tty.VSUSP] = 0 # disable ^Z
tty.tcsetattr(stdin_fileno, tty.TCSANOW, raw_ta)
# Execute our callable here
func()
finally:
# Restore original tty settings
tty.tcsetattr(stdin_fileno, tty.TCSANOW, old_ttyattr)
def update_password_and_reconnect(hostname):
"""
Prompts the user to update their password and reconnect to the target
device
:param hostname: Hostname of the device to connect to.
"""
if yesno('Authentication failed, would you like to update your password?',
default=True):
from trigger import tacacsrc
tacacsrc.update_credentials(hostname)
if yesno('\nReconnect to %s?' % hostname, default=True):
# Replaces the current process w/ same pid
os.execl(sys.executable, sys.executable, *sys.argv)
# Classes
class NullDevice(object):
"""
Used to supress output to ``sys.stdout`` (aka ``print``).
Example::
>>> from trigger.utils.cli import NullDevice
>>> import sys
>>> print "1 - this will print to STDOUT"
1 - this will print to STDOUT
>>> original_stdout = sys.stdout # keep a reference to STDOUT
>>> sys.stdout = NullDevice() # redirect the real STDOUT
>>> print "2 -
|
cidadania/e-cidadania
|
src/apps/thirdparty/smart_selects/urls.py
|
Python
|
apache-2.0
| 1,198
| 0.003339
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 Clione Software
# Copyright (c) 2010-2013 Cidadania S. Coop. Galega
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf.urls.defaults import *
urlpatterns = patterns('apps.thirdparty.smart_selects.views',
url(r'^all/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain_all', name='chained_filter_all'),
url(r'^filter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>[\w\-]+)/$', 'filterchain', name='chained_filter'),
url(r'^fi
|
lter/(?P<app>[\w\-]+)/(?P<model>[\w\-]+)/(?P<manager>[\w\-]+)/(?P<field>[\w\-]+)/(?P<value>
|
[\w\-]+)/$', 'filterchain', name='chained_filter'),
)
|
lispc/Paddle
|
python/paddle/trainer_config_helpers/tests/configs/test_cost_layers.py
|
Python
|
apache-2.0
| 1,402
| 0
|
from paddle.trainer_config_helpers import *
settings(learning_rate=1e-4, batch_size=1000)
seq_in = data_layer(name='input', size=200)
labels = data_layer(name='labels', size=5000)
probs = data_layer(name='probs', size=10)
xe_label = data_layer(name='xe-label', size=10)
hidden = fc_layer(input=seq_in, size=4)
outputs(
ctc_layer(
input=seq_in, label=labels),
warp_ctc_layer(
input=seq_in, label=labels, blank=0),
crf_layer(
input=hidden, label=data_layer(
name='crf_label', size=4)),
rank_cost(
left=data_layer(
name='left', size=1),
right=data_layer(
name='right', size=1),
label=data_layer(
name='label', size=1)),
lambda_cost(
input=data_layer(
name='list_feature', size=100),
score=data_layer(
name='list_scores', size=1)),
cross_entropy(
input=probs, label=xe_label),
cross_entropy_with_selfnorm(
input=probs, label
|
=xe_label),
huber_regression_cost(
input=seq_in, label=labels),
huber_classification_cost(
input=data_layer(
nam
|
e='huber_probs', size=1),
label=data_layer(
name='huber_label', size=1)),
multi_binary_label_cross_entropy(
input=probs, label=xe_label),
sum_cost(input=hidden),
nce_layer(
input=hidden, label=labels))
|
sentriz/steely
|
steely/plugins/bible/main.py
|
Python
|
gpl-3.0
| 3,384
| 0.001182
|
import json
import random
import requests
from plugin import create_plugin
from message import SteelyMessage
HELP_STR = """
Request your favourite bible quotes, right to the chat.
Usage:
/bible - Random quote
/bible Genesis 1:3 - Specific verse
/bible help - This help text
Verses are specified in the format {book} {chapter}:{verse}
TODO: Book acronyms, e.g. Gen -> Genesis
TODO: Verse ranges, e.g. Genesis 1:1-3
"""
BIBLE_FILE = "plugins/bible/en_kjv.json"
BIBLE_URL = 'https://raw.githubusercontent.com/thiagobodruk/bible/master/json/en_kjv.json'
plugin = create_plugin(name='bible', author='CianLR', help=HELP_STR)
bible = None
book_to_index = {}
def make_book_to_index(bible):
btoi = {}
for i, book in enumerate(bible):
btoi[book['name'].lower()] = i
return btoi
@plugin.setup()
def plugin_setup():
global bible, book_to_index
try:
bible = json.loads(open(BIBLE_FILE, encoding='utf-8-sig').read())
book_to_index = make_book_to_index(bible)
return
except BaseException as e:
pass
# We've tried nothing and we're all out of ideas, download a new bible.
try:
bible = json.loads(
requests.get(BIBLE_URL).content.decode('utf-8-sig'))
except BaseException as e:
return "Error loading bible: " + str(e)
book_to_index = make_book_to_index(bible)
with open(BIBLE_FILE, 'w') as f:
json.dump(bible, f)
@plugin.listen(command='bible help')
def help_command(bot, message: SteelyMessage, **kwargs):
bot.sendMessage(
HELP_STR,
thread_id=message.thread_id, thread_type=message.thread_type)
def is_valid_quote(book, chapter, verse):
return (0 <= book < len(bible) and
0 <= chapter < len(bible[book]['chapters']) and
0 <= verse < len(bible[book]['chapters'][chapter]))
def get_quote(book, chapter, verse):
return "{}\n - {} {}:{}".format(
bible[book]["chapters"][chapter][verse],
bible[book]["name"], chapter + 1, verse + 1)
def get_quote_from_ref(book_name, ref):
if book_name.lower() not in book_to_index:
return "Could not find book name: " + book_name
book_i = book_to_index[book_name.lower()]
if len(ref.split(':')) != 2:
return 'Reference not in form "Book Chapter:Passage"'
chapter, verse = ref.split(':')
if not chapter.isnumeric():
return "Chapter must be an int"
chapter_i = int(chapter) - 1
if not verse.isnumeric():
return "Passage must be an int"
verse_i = int(verse) - 1
|
if not is_valid_quote(book_i, chapter_i, verse_i):
return "Verse or chapter out of range"
return get_quote(book_i, chapter_i, verse_i)
@plugin.listen(command='bible [book] [passage]')
def passage_command(bot, message: SteelyMessage, **kwargs):
if 'passage' not in kwargs:
book = random.randrange(len(bible))
chapter = random.randrange(len(bible[book]["chapters"]))
verse = random.randrange(len(bible[book]["chapters"][chapter]
|
))
bot.sendMessage(
get_quote(book, chapter, verse),
thread_id=message.thread_id, thread_type=message.thread_type)
else:
bot.sendMessage(
get_quote_from_ref(kwargs['book'], kwargs['passage']),
thread_id=message.thread_id, thread_type=message.thread_type)
|
sujith7c/py-system-tools
|
en_mod_rw.py
|
Python
|
gpl-3.0
| 636
| 0.031447
|
#!/usr/bin/python
import os,sys,re
#Check the OS Version
RELEASE_FILE = "/etc/redhat-release"
RWM_FILE = "/etc/httpd/conf.modules.d/00-base.conf"
if os.path.isfile(RELEASE_FILE):
f=open(RELEASE_FILE,"r")
rel_list = f.rea
|
d().split()
if rel_list[2] == "release" and tuple(rel_list[3].split(".")) < ('8','5'):
print("so far good")
else:
raise("Unable to find the OS version")
#Check Apache installed
#TODO
#
#Test if the rewrite module file present
if os.path.isfile(RWM_FILE):
print("re write")
##print sys.version_info
##if sys.version_info < (2,7):
## print "This programm works only with the Python 2.7"###
| |
akayunov/amqpsfw
|
lib/amqpsfw/application.py
|
Python
|
mit
| 6,761
| 0.002367
|
import logging
import select
import socket
from collections import deque
from amqpsfw import amqp_spec
from amqpsfw.exceptions import SfwException
from amqpsfw.configuration import Configuration
amqpsfw_logger = logging.getLogger('amqpsfw')
log_handler = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s.py.%(funcName)s:%(lineno)d - %(message)s')
log_handler.setFormatter(formatter)
amqpsfw_logger.addHandler(log_handler)
amqpsfw_logger.setLevel(logging.ERROR)
log = logging.getLogger(__name__)
class BufferOut:
def __init__(self):
self.frame_queue = deque()
self.current_frame_bytes = b''
def clear(self):
self.current_frame_bytes = b''
def append_frame(self, x):
self.frame_queue.append(x)
class BufferIn:
def __init__(self):
self.frame_bytes = b''
self.parsed_data_size = 0
def clear(self):
self.__init__()
class Application:
STOPPED = 'STOPPPED'
RUNNING = 'RUNNING'
READ = select.EPOLLIN
WRITE = select.EPOLLOUT
ERROR = (select.EPOLLERR | select.EPOLLPRI | select.EPOLLHUP | select.EPOLLRDHUP | select.EPOLLRDBAND)
PRIWRITE = select.EPOLLWRBAND # doesn't use
def __init__(self, ioloop, app_socket=None):
self.buffer_out = BufferOut()
self.buffer_in = BufferIn()
self.ioloop = ioloop
self.status = 'RUNNING'
self.socket = app_socket
self.expected_response_frames = []
self.app_gen = self.processor()
self.config = Configuration()
def start(self):
raise NotImplementedError
def modify_to_read(self):
events = self.READ | self.ERROR
self.ioloop.update_handler(self.socket.fileno(), events)
def modify_to_write(self):
events = self.WRITE | self.READ | self.ERROR
self.ioloop.update_handler(self.socket.fileno(), events)
def write(self, value):
if self.status == self.STOPPED:
raise SfwException('Internal', 'Aplication is stopped')
self.buffer_out.append_frame(value)
self.modify_to_write()
def handler(self, fd, event):
# TODO why == RUNNING is here?
if event & self.WRITE and self.status == 'RUNNING':
self.handle_write()
if event & self.READ and self.status == 'RUNNING':
self.handle_read()
if event & self.ERROR:
self.handle_error(fd)
def handle_error(self, fd):
log.error('Get error on socket: %s', fd)
self.stop()
def handle_read(self):
# we cant parse full buffer in one call because if many data in buffer then we will be run this cycle by buffer while buffer became empty
# but in case Basic.Ack we need to write response immediatly after get frame,
# so we read data, but don't remove it from socket buffer for getting read events again and then all data in app buffer is parser
# remove data from socket buffer
payload_size, frame, self.buffer_in.frame_bytes = amqp_spec.decode_frame(self.buffer_in.frame_bytes)
if not frame:
self.buffer_in.frame_bytes = self.socket.recv(4096, socket.MSG_PEEK)
if not self.buffer_in.frame_bytes:
self.stop()
payload_size, frame, self.buffer_in.frame_bytes = amqp_spec.decode_frame(self.buffer_in.frame_bytes)
if frame:
self.buffer_in.parsed_data_size += (payload_size + 8)
log.debug('IN {}: {}'.format(self.socket.fileno(), frame))
if self.expected_response_frames and not issubclass(type(frame), tuple(self.expected_response_frames)):
log.error('Unexpected frame type: %s', str(frame))
self.stop()
else:
self.expected_response_frames = []
response = self.method_handler(frame)
_, next_frame, _ = amqp_spec.decode_frame(self.buffer_in.frame_bytes)
if not next_frame:
# "frame" was last frame in buffer_in so remove already parsed data, do second read without flag
self.socket.recv(self.buffer_in.parsed_data_size)
self.buffer_in.clear()
if response:
# TODO why this try here?
try:
self.app_gen.send(response)
except StopIteration:
pass
def handle_write(self):
if len(self.buffer_out.frame_queue) > 0 and not self.buffer_out.current_frame_bytes:
self.expected_response_frames = self.buffer_out.frame_queue[-1].expected_response_frames
for frame in self.buffer_out.frame_queue:
log.debug('OUT {}: {}'.format(self.socket.fileno(), frame))
self.buffer_out.current_frame_bytes += frame.encoded
self.buffer_out.frame_queue.clear()
if self.buffer_out.current_frame_bytes:
writed_bytes = self.socket.send(self.buffer_out.current_frame_bytes)
self.buffer_out.current_frame_bytes = self.buffer_out.current_frame_bytes[writed_bytes:]
if not self.buffer_out.current_frame_bytes and not len(self.buffer_out.frame_queue):
self.modify_to_read()
if self.expected_response_frames is None:
# TODO why this try here?
try:
self.app_gen.send(None)
except StopIteration:
pass
self.buffer_out.clear()
def sleep(self, duration):
self.modify_to_write()
self.ioloop.current().call_later(duration, next, self.app_gen)
return
def pro
|
cessor(self):
yield
raise NotImplementedError
def stop(self):
log.debug('Stop application')
self.buffer_in.clear()
self.buffer_out.clear()
self.status = self.STOPPED
self.ioloop.stop()
self.socket.close()
def on_hearbeat(self, method):
self.write(amqp_spec.Heartbeat())
def on_connection_close(self, method):
self.write(amqp_spec.Connection.CloseOk())
|
self.stop()
def on_channel_flow(self, method):
# TODO if active=0 stop sending data
self.write(amqp_spec.Channel.FlowOk(method.active))
def on_channel_close(self, method):
self.write(amqp_spec.Channel.CloseOk())
method_mapper = {
amqp_spec.Heartbeat: on_hearbeat,
amqp_spec.Connection.Close: on_connection_close,
amqp_spec.Channel.Close: on_channel_close,
amqp_spec.Channel.Flow: on_channel_flow
}
def method_handler(self, method):
if type(method) in self.method_mapper:
return self.method_mapper[type(method)](self, method)
else:
return method
|
JamesKBowler/networking_scripts
|
cisco/cisco_telnet_recon.py
|
Python
|
mit
| 1,465
| 0.006826
|
import telnetlib
from time import sleep
import re
import os
HOST_IPs = [
"172.16.1.253", "172.16.1.254"
]
telnet_password = b"pass_here"
enable_password = b"pass_here"
show_commands_list = [
b"show run",
b"show ip arp",
b"show vlan",
b"show cdp neighbors",
b"show ip interface brief"
b"show interface status",
b"show interface description",
b"show etherchannel summary"
]
for HOST_IP in HOST_IPs:
# Telnet to the device and login
tn = telnetlib.Telnet(HOST_IP)
tn.read_until(b"Password: ")
tn.write(
|
telnet_password + b"\r\n")
sleep(0.5)
# Get host name from prompt and make a directory
host_name = re.su
|
b(
'\r\n',"",tn.read_very_eager().decode('ascii'))[:-1]
if not os.path.exists(host_name):
os.makedirs(host_name)
# Log into enable mode
tn.write(b"enable\r\n")
tn.write(enable_password + b"\r\n")
# Set terminal output to 0
tn.write(b"terminal length 0\r\n")
tn.read_very_eager().decode('ascii')
sleep(1)
# execute show commands and save in the directory created
for show_command in show_commands_list:
tn.write(show_command+b"\r\n")
sleep(1)
out = tn.read_very_eager().decode('ascii')
file_name = re.sub(' ', '_', show_command.decode('ascii'))
with open(host_name+'/'+file_name+'.txt','w') as f:
f.write(out)
# Close telnet connection
tn.close()
|
jessekl/flixr
|
venv/lib/python2.7/site-packages/fabric/sftp.py
|
Python
|
mit
| 12,958
| 0.000772
|
from __future__ import with_statement
import hashlib
import os
import posixpath
import stat
import re
from fnmatch import filter as fnfilter
from fabric.state import output, connections, env
from fabric.utils import warn
from fabric.context_managers import settings
def _format_local(local_path, local_is_path):
"""Format a path for log output"""
if local_is_path:
return local_path
else:
# This allows users to set a name attr on their StringIO objects
# just like an open file object would have
return getattr(local_path, 'name', '<file obj>')
class SFTP(object):
"""
SFTP helper class, which is also a facade for ssh.SFTPClient.
"""
def __init__(self, host_string):
self.ftp = connections[host_string].open_sftp()
# Recall that __getattr__ is the "fallback" attribute getter, and is thus
# pretty safe to use for facade-like behavior as we're doing here.
def __getattr__(self, attr):
return getattr(self.ftp, attr)
def isdir(self, path):
try:
return stat.S_ISDIR(self.ftp.stat(path).st_mode)
except IOError:
return False
def islink(self, path):
try:
return stat.S_ISLNK(self.ftp.lstat(path).st_mode)
except IOError:
return False
def exists(self, path):
try:
self.ftp.lstat(path).st_mode
except IOError:
return False
return True
def glob(self, path):
from fabric.state import win32
dirpart, pattern = os.path.split(path)
rlist = self.ftp.listdir(dirpart)
names = fnfilter([f for f in rlist if not f[0] == '.'], pattern)
ret = [path]
if len(names):
s = '/'
ret = [dirpart.rstrip(s) + s + name.lstrip(s) for name in names]
if not win32:
ret = [posixpath.join(dirpart, name) for name in names]
return ret
def walk(self, top, topdown=True, onerror=None, followlinks=False):
from os.path import join
# We may not have read permission for top, in which case we can't get a
# list of the files the directory contains. os.path.walk always
# suppressed the exception then, rather than blow up for a minor reason
# when (say) a thousand readable directories are still left to visit.
# That logic is copied here.
try:
# Note that listdir and error are globals in this module due to
# earlier import-*.
names = self.ftp.listdir(top)
except Exception, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if self.isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not self.islink(path):
for x in self.walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
def mkdir(self, path, use_sudo):
from fabric.api import sudo, hide
if use_sudo:
with hide('everything'):
sudo('mkdir "%s"' % path)
else:
self.ftp.mkdir(path)
def get(self, remote_path, local_path, use_sudo, local_is_path, rremote=None, temp_dir=""):
from fabric.api import sudo, hide
# rremote => relative remote path, so get(/var/log) would result in
# this function being called with
# remote_path=/var/log/apache2/access.log and
# rremote=apache2/access.log
rremote = rremote if rremote is not None else remote_path
# Handle format string interpolation (e.g. %(dirname)s)
path_vars = {
'host': env.host_string.replace(':', '-'),
'basename': os.path.basename(rremote),
'dirname': os.path.dirname(rremote),
'path': rremote
}
if local_is_path:
# Naive fix to issue #711
escaped_path = re.sub(r'(%[^()]*\w)', r'%\1', local_path)
local_path = os.path.abspath(escaped_path % path_vars )
# Ensure we give ssh.SFTPCLient a file by prepending and/or
# creating local directories as appropriate.
dirpath, filepath = os.path.split(local_path)
if dirpath and not os.path.exists(dirpath):
os.makedirs(dirpath)
if os.path.isdir(local_path):
local_path = os.path.join(local_path, path_vars['basename'])
if output.running:
print("[%s] download: %s <- %s" % (
env.host_string,
_format_local(local_path, local_is_path),
remote_path
))
# Warn about overwrites, but keep going
if local_is_path and os.path.exists(local_path):
msg = "Local file %s already exists and is being overwritten."
warn(msg % local_path)
# When using sudo, "bounce" the file through a guaranteed-unique file
# path in the default remote CWD (which, typically, the login user will
# have write permissions on) in order to sudo(cp) it.
if use_sudo:
target_path = remote_path
hasher = hashlib.sha1()
hasher.update(env.host_string)
hasher.update(target_path)
target_path = posixpath.join(temp_dir, hasher.hexdigest())
# Temporarily nuke 'cwd' so sudo() doesn't "cd" its mv command.
# (The target path has already been cwd-ified elsewhere.)
with settings(hide('everything'), cw
|
d=""):
sudo('cp -p "%s" "%s"' % (remote_path, target_path))
# The user should always own the copied file.
sudo('chown %s "%s"' % (env.user, target_path))
# Only root and the user has the right to read the file
sudo('chmod %o "%s"' % (0400, target_path))
remote_path = target_path
try:
# File-like objects: reset to file seek 0 (to ensure full over
|
write)
# and then use Paramiko's getfo() directly
getter = self.ftp.get
if not local_is_path:
local_path.seek(0)
getter = self.ftp.getfo
getter(remote_path, local_path)
finally:
# try to remove the temporary file after the download
if use_sudo:
with settings(hide('everything'), cwd=""):
sudo('rm -f "%s"' % remote_path)
# Return local_path object for posterity. (If mutated, caller will want
# to know.)
return local_path
def get_dir(self, remote_path, local_path, use_sudo, temp_dir):
# Decide what needs to be stripped from remote paths so they're all
# relative to the given remote_path
if os.path.basename(remote_path):
strip = os.path.dirname(remote_path)
else:
strip = os.path.dirname(os.path.dirname(remote_path))
# Store all paths gotten so we can return them when done
result = []
# Use our facsimile of os.walk to find all files within remote_path
for context, dirs, files in self.walk(remote_path):
# Normalize current directory to be relative
# E.g. remote_path of /var/log and current dir of /var/log/apache2
# would be turned into just 'apache2'
lcontext = rcontext = context.replace(strip, '', 1).lstrip('/')
# Prepend local path to that to arrive at the local mirrored
# version of this directory. So if local_path was 'mylogs', we'd
# end up with 'mylogs/apache2'
lcontext = os.path.join(local_path, lcontext)
# Download any files in current directory
for f in files:
# Construct full and relative remote paths to this file
rpath = posixpath.join(cont
|
PyQuake/earthquakemodels
|
code/cocobbob/coco/code-preprocessing/archive-update/extract_extremes.py
|
Python
|
bsd-3-clause
| 4,277
| 0.006079
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
from cocoprep.archive_load_data import get_file_name_list, parse_archive_file_name, get_key_value, parse_range
from cocoprep.archive_exceptions import PreprocessingException, PreprocessingWarning
def extract_extremes(input_paths, output_file, functions, instances, dimensions):
"""
Extracts the extreme points from the archives contained in input_paths and outputs them to the output_file in
the following format:
[problem_name] [extreme_point_1] [extreme_point_2]
Assumes the two extreme points are contained in the first two lines of every instance archive. If not, that
instance is skipped.
Performs no kind of sorting or filtering of the problems, therefore if multiple copies of one problem are present
in the input, multiple lines for one problem will be also present in the output.
"""
# Check whether input paths exist
input_files = get_file_name_list(input_paths, ".adat")
if len(input_files) == 0:
raise PreprocessingException('Folder {} does not exist or is empty'.format(input_paths))
# Read the input files one by one and save the result in the output_file
with open(output_file, 'a') as f_out:
for input_file in input_files:
try:
(suite_name, function, instance, dimension) = parse_archive_file_name(input_file)
if (function not in functions) or (instance not in instances) or (dimension not in dimensions):
continue
except PreprocessingWarning as warning:
print('Skipping file {}\n{}'.format(input_file, warning))
continue
print(input_file)
with open(input_file, 'r') as f_in:
extreme1 = None
count = 0
for line in f_in:
if line[0] == '%' and 'instance' in l
|
ine:
instance = int(get_key_value(line
|
[1:], 'instance').strip(' \t\n\r'))
count = 0
elif count > 1 or (len(line) == 0) or line[0] == '%':
continue
elif count == 0:
extreme1 = line.split()[1:3]
count = 1
elif count == 1:
extreme2 = line.split()[1:3]
count = 2
try:
string = '{}_f{:02d}_i{:02d}_d{:02d}\t'.format(suite_name, function, instance, dimension)
string = string + '\t'.join(extreme1) + '\t' + '\t'.join(extreme2) + '\n'
f_out.write(string)
except ValueError:
print('Skipping instance {} in file {}'.format(instance, input_file))
f_in.close()
f_out.flush()
f_out.close()
if __name__ == '__main__':
"""Extracts information on the two extreme points from the archives of solutions. Results are stored into an output
file.
"""
import timing
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--functions', type=parse_range, default=range(1, 56),
help='function numbers to be included in the processing of archives')
parser.add_argument('-i', '--instances', type=parse_range, default=range(1, 11),
help='instance numbers to be included in the processing of archives')
parser.add_argument('-d', '--dimensions', type=parse_range, default=[2, 3, 5, 10, 20, 40],
help='dimensions to be included in the processing of archives')
parser.add_argument('output', help='path to the output file')
parser.add_argument('input', default=[], nargs='+', help='path(s) to the input folder(s)')
args = parser.parse_args()
print('Program called with arguments: \ninput folders = {}\noutput file = {}'.format(args.input, args.output))
print('functions = {} \ninstances = {}\ndimensions = {}\n'.format(args.functions, args.instances, args.dimensions))
extract_extremes(args.input, args.output, args.functions, args.instances, args.dimensions)
|
trivoldus28/pulsarch-verilog
|
tools/local/bas-release/bas,3.9-SunOS-i386/lib/python/lib/python2.4/distutils/file_util.py
|
Python
|
gpl-2.0
| 8,320
| 0.002404
|
"""distutils.file_util
Utility functions for operating on single files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: file_util.py,v 1.17 2004/11/10 22:23:14 loewis Exp $"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents (src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not open '%s': %s" % (src, errstr)
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not delete '%s': %s" % (dst, errstr)
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not create '%s': %s" % (dst, errstr)
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not read from '%s': %s" % (src, errstr)
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not write to '%s': %s" % (dst, errstr)
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
# _copy_file_contents()
def copy_file (src, dst,
preserve
|
_mode=1,
preserve_times=1,
update=0,
link=None,
verbose=0,
dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the
|
same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError, \
"can't copy '%s': doesn't exist or not a regular file" % src
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError, \
"invalid value '%s' for 'link' argument" % link
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# On Mac OS, use the native file copy routine
if os.name == 'mac':
import macostools
try:
macostools.copy(src, dst, 0, preserve_times)
except os.error, exc:
raise DistutilsFileError, \
"could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <da@ski.org>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode:
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# copy_file ()
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=0,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError, \
"can't move '%s': not a regular file" % src
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError, \
"can't move '%s': destination '%s' already exists" % \
(src, dst)
if not isdir(dirname(dst)):
raise DistutilsFileError, \
"can't move '%s': destination '%s' not a valid path" % \
(src, dst)
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError, \
"couldn't move '%s' to '%s': %s" % (src, dst, msg)
if copy_it:
copy_file(src, dst)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError, \
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") % \
(src, dst, src, msg)
return dst
# move_file ()
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of string
|
sxjscience/tvm
|
python/tvm/contrib/clang.py
|
Python
|
apache-2.0
| 3,361
| 0.000595
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Util to invoke clang in the system."""
# pylint: disable=invalid-name
import subprocess
from tvm._ffi.base import py_str
import tvm.target
from . import util
d
|
ef find_clang(required=True):
"""Find clang in system.
Parameters
----------
required : bool
Whether it i
|
s required,
runtime error will be raised if the compiler is required.
Returns
-------
valid_list : list of str
List of possible paths.
Note
----
This function will first search clang that
matches the major llvm version that built with tvm
"""
cc_list = []
major = tvm.target.codegen.llvm_version_major(allow_none=True)
if major is not None:
cc_list += ["clang-%d.0" % major]
cc_list += ["clang-%d" % major]
cc_list += ["clang"]
cc_list += ["clang.exe"]
valid_list = [util.which(x) for x in cc_list]
valid_list = [x for x in valid_list if x]
if not valid_list and required:
raise RuntimeError("cannot find clang, candidates are: " + str(cc_list))
return valid_list
def create_llvm(inputs, output=None, options=None, cc=None):
"""Create llvm text ir.
Parameters
----------
inputs : list of str
List of input files name or code source.
output : str, optional
Output file, if it is none
a temporary file is created
options : list
The list of additional options string.
cc : str, optional
The clang compiler, if not specified,
we will try to guess the matched clang version.
Returns
-------
code : str
The generated llvm text IR.
"""
cc = cc if cc else find_clang()[0]
cmd = [cc]
cmd += ["-S", "-emit-llvm"]
temp = util.tempdir()
output = output if output else temp.relpath("output.ll")
inputs = [inputs] if isinstance(inputs, str) else inputs
input_files = []
for i, code in enumerate(inputs):
if util.is_source_path(code):
input_files.append(code)
else:
temp_path = temp.relpath("input%d.cc" % i)
with open(temp_path, "w") as output_file:
output_file.write(code)
input_files.append(temp_path)
if options:
cmd += options
cmd += ["-o", output]
cmd += input_files
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
(out, _) = proc.communicate()
if proc.returncode != 0:
msg = "Compilation error:\n"
msg += py_str(out)
raise RuntimeError(msg)
return open(output).read()
|
Serulab/Py4Bio
|
code/ch14/basiccircle.py
|
Python
|
mit
| 157
| 0
|
from bokeh.plotting import figure, output
|
_file, show
p = figure(width=400, height=400)
p.circle(2, 3, radi
|
us=.5, alpha=0.5)
output_file('out.html')
show(p)
|
danielwpz/soybean
|
src/util/optimizer.py
|
Python
|
mit
| 1,090
| 0.000917
|
class Optimizer:
def __init
|
__(self, model, params=None):
self.model = model
if params:
self.model.set_params(**params)
self.params = self.model.get_params()
self.__chain = list()
def step(self, name, values, skipped=False):
if not skipped:
self.__chain.append({
'pname': name,
'pvalues': values
})
return self
def solve(self, evaluator):
score = -1
for param in self.__chain:
|
self.model.set_params(**self.params) # set previous best param
results = [(evaluator(self.model.set_params(**{param['pname']: value})), value)
for value in param['pvalues']]
results = sorted(results, lambda a, b: -1 if a[0] < b[0] else 1)
print param['pname']
for result in results:
print result[1], ' : ', result[0]
# update best params
self.params[param['pname']] = results[0][1]
score = results[0][0]
return score
|
kreatorkodi/repository.torrentbr
|
plugin.video.yatp/site-packages/hachoir_core/field/field.py
|
Python
|
gpl-2.0
| 8,646
| 0.002429
|
"""
Parent of all (field) classes in Hachoir: Field.
"""
from hachoir_core.compatibility import reversed
from hachoir_core.stream import InputFieldStream
from hachoir_core.error import HachoirError, HACHOIR_ERRORS
from hachoir_core.log import Logger
from hachoir_core.i18n import _
from hachoir_core.tools import makePrintable
from weakref import ref as weakref_ref
class FieldError(HachoirError):
"""
Error raised by a L{Field}.
@see: L{HachoirError}
"""
pass
def joinPath(path, name):
if path != "/":
return "/".join((path, name))
else:
return "/%s" % name
class MissingField(KeyError, FieldError):
def __init__(self, field, key):
KeyError.__init__(self)
self.field = field
self.key = key
def __str__(self):
return 'Can\'t get field "%s" from %s' % (self.key, self.field.path)
def __unicode__(self):
return u'Can\'t get field "%s" from %s' % (self.key, self.field.path)
class Field(Logger):
# static size can have two differents value: None (no static size), an
# integer (number of bits), or a function which returns an integer.
#
# This function receives exactly the same arguments than the constructor
# except the first one (one). Example of function:
# static_size = staticmethod(lambda *args, **kw: args[1])
static_size = None
# Indicate if this field contains other fields (is a field set) or not
is_field_set = False
def __init__(self, parent, name, size=None, description=None):
"""
Set default class attributes, set right address if None address is
given.
@param parent: Parent field of this field
@type parent: L{Field}|None
@param name: Name of the field, have to be unique in parent. If it ends
with "[]", end will be replaced with "[new_id]" (eg. "raw[]"
becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.)
@type name: str
@param size: Size of the field in bit (can be None, so it
will be computed later)
@type size: int|None
@param address: Address in bit relative to the parent absolute address
@type address: int|None
@param description: Optional string description
@type description: str|None
"""
assert issubclass(parent.__class__, Field)
assert (size is None) or (0 <= size)
self._parent = parent
if not name:
raise ValueError("empty field name")
self._name = name
self._address = parent.nextFieldAddress()
self._size = size
self._description = description
def _logger(self):
return self.path
def createDescription(self):
return ""
def _getDescription(self):
if self._description is None:
try:
self._description = self.createDescription()
if isinstance(self._description, str):
self._description = makePrintable(
self._description, "ISO-8859-1", to_unicode=True)
except HACHOIR_ERRORS, err:
self.error("Error getting description: " + unicode(err))
self._description = ""
return self._description
description = property(_getDescription,
doc="Description of the field (string)")
def __str__(self):
return self.display
def __unicode__(self):
return self.display
def __repr__(self):
return "<%s path=%r, address=%s, size=%s>" % (
self.__class__.__name__, self.path, self._address, self._size)
def hasValue(self):
return self._getValue() is not None
def createValue(self):
raise NotImplementedError()
def _getValue(self):
try:
value = self.createValue()
except HACHOIR_ERRORS, err:
self.error(_("Unable to create value: %s") % unicode(err))
value = None
self._getValue = lambda: value
return value
value = property(lambda self: self._getValue(), doc="Value of field")
def _getParent(self):
return self._parent
parent = property(_getParent, doc="Parent of this field")
def createDisplay(self):
return unicode(self.value)
def _getDisplay(self):
if not hasattr(self, "_Field__display"):
try:
self.__display = self.createDisplay()
except HACHOIR_ERRORS, err:
self.error("Unable to create display: %s" % err)
self.__display = u""
return self.__display
display = property(lambda self: self._getDisplay(),
doc="Short (unicode) string which represents field content")
def createRawDisplay(self):
value = self.value
if isinstance(value, str):
return makePrintable(value, "ASCII", to_unicode=True)
else:
return unicode(value)
def _getRawDisplay(self):
if not hasattr(self, "_Field__raw_display"):
try:
self.__raw_display = self.createRawDisplay()
except HACHOIR_ERRORS, err:
self.error("Unable to create raw display: %s" % err)
self.__raw_display = u""
return self.__raw_display
raw_display = property(lambda self: self._getRawDisplay(),
doc="(Unicode) string which represents raw field content")
def _getName(self):
return self._name
name = property(_getName,
doc="Field name (unique in its parent field set list)")
def _getIndex(self):
if not self._parent:
return None
return self._parent.getFieldIndex(self)
index = property(_getIndex)
def _getPath(self):
if not self._parent:
return '/'
names = []
field = self
while field is not None:
names.append(field._name)
field = field._parent
names[-1] = ''
return '/'.join(reversed(names))
path = property(_getPath,
doc="Full path of the field starting at root field")
def _getAddress(self):
return self._address
address = property(_getAddress,
doc="Relative address in bit to parent address")
def _getAbsoluteAddress(self):
address = self._address
current = self._parent
while current:
address += current._address
current = current._parent
return address
absolute_address = property(_getAbsoluteAddress,
doc="Absolute address (from stream beginning) in bit")
def _getSize(self):
return self._size
size = property(_getSize, doc="Content size in bit")
def _getField(self, name, const):
if name.strip("."):
return None
field = self
for index in xrange(1, len(name)):
field = field._parent
if field is None:
break
return field
def getField(self, key, const=True):
if key:
if key[0] == "/":
|
if self._parent:
current = self._parent.root
else:
current = self
if len(key) == 1:
return current
key = key[1:]
else:
current = self
for part in key.split("/"):
field =
|
current._getField(part, const)
if field is None:
raise MissingField(current, part)
current = field
return current
raise KeyError("Key must not be an empty string!")
def __getitem__(self, key):
return self.getField(key, False)
def __contains__(self, key):
try:
return self.getField(key, False) is not None
except FieldError:
return False
def _createInputStream(self, **args):
assert self._parent
return InputFieldStream(self, **args)
def getSubIStream(self):
if hasattr(self, "_sub_istream"):
stream = self._sub_istream()
else:
stream = None
if stream is None:
stream = self._createInputStream
|
ssdi-drive/nuxeo-drive
|
nuxeo-drive-client/tests/test_long_path.py
|
Python
|
lgpl-2.1
| 3,664
| 0.000819
|
# coding: utf-8
import os
import sys
from nxdrive.logging_config import get_logger
from nxdrive.utils import safe_long_path
from tests.common_unit_test import UnitTestCase
if sys.platform == 'win32':
import win32api
log = get_logger(__name__)
# Number of chars in path c://.../Nuxeo.. is approx 96 chars
FOLDER_A = 'A' * 90
FOLDER_B = 'B' * 90
FOLDER_C = 'C' * 90
FOLDER_D = 'D' * 50
class TestLongPath(UnitTestCase):
def setUp(self):
UnitTestCase.setUp(self)
self.local_1 = self.local_client_1
self.remote_1 = self.remote_document_client_1
log.info("Create a folder AAAA... (90 chars) in server")
self.folder_a = self.remote_1.make_folder("/", FOLDER_A)
self.folder_b = self.remote_1.make_folder(self.folder_a, FOLDER_B)
self.folder_c = self.remote_1.make_folder(self.folder_b, FOLDER_C)
self.remote_1.make_file(self.folder_c, "File1.txt", "Sample Content")
def tearDown(self):
log.info("Delete the folder AAA... in server")
self.remote_1.delete(self.folder_a, use_trash=False)
UnitTestCase.tearDown(self)
def test_long_path(self):
self.engine_1.start()
self.wait_sync(wait_for_async=True)
parent_path = os.path.join(self.local_1.abspath('/'),
FOLDER_A, FOLDER_B, FOLDER_C, FOLDER_D)
log.info("Creating folder with path: %s", parent_path)
if sys.platform == 'win32' and not os.path.exists(parent_path):
log.debug('Add \\\\?\\ prefix to path %r', parent_path)
parent_path = safe_long_path(parent_path)
os.makedirs(parent_path)
if sys.platform == 'win32':
log.info("Convert path of FOLDER_D\File2.txt to short path format")
parent_path = win32api.GetShortPathName(parent_path)
new_file = os.path.join(parent_path, "File2.txt")
log.info("Creating file with path: %s", new_file)
with open(new_file, "w") as f:
f.write("Hello world")
self.wait_sync(wait_for_async=True, timeout=45, fail_if_timeout=False)
remote_children_of_c = self.remote_1.get_children_info(self.folder_c)
children_names = [item.name for item in remote_children_of_c]
log.warn("Verify if FOLDER_D is uploaded to server")
self.assertIn(FOLDER_D, children_names)
folder_d = [item.uid for item in remote_children_of_c if item.name == FOLDER_D][0]
remote_children_of_d = self.remote_1.get_children_info(folder_d)
children_names = [item.name for item in remote_children_of_d]
log.warn("Verify if FOLDER_D\File2.txt is uploaded to server")
self.assertIn('File2.txt', children_names)
def test_setup_on_long_path(self):
""" NXDRIVE 689: Fix error when adding a new account when installation
path is greater than 245 characters.
"""
self.engine_1.stop()
self.engine_1.reinit()
# On Mac, avoid permission denied error
self.engine_1.get_local_client().clean_xattr_root()
test_folder_len = 245 - len(str(self.local_nxdrive_folder_1))
test_folder = 'A' * test_folder_len
self.local_nxdrive_folder_
|
1 = os.path.join(self.local_nxdrive_folder_1,
|
test_folder)
self.assertTrue(len(self.local_nxdrive_folder_1) > 245)
self.manager_1.unbind_all()
self.engine_1 = self.manager_1.bind_server(
self.local_nxdrive_folder_1, self.nuxeo_url, self.user_2,
self.password_2, start_engine=False)
self.engine_1.start()
self.engine_1.stop()
|
ameyjadiye/nxweb
|
sample_config/python/hello.py
|
Python
|
lgpl-3.0
| 2,478
| 0.01937
|
from cgi import parse_qs, escape, FieldStorage
import time
import shutil
def ping_app(environ, start_response):
status = '200 OK'
output = 'Pong!'
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
def hello_world_app(environ, start_response):
parameters=parse_qs(environ.get('QUERY_STRING', ''))
if 'sleep' in parameters:
time.sleep(5)
if 'subject' in parameters:
subject=escape(parameters['subject'][0])
else:
subject='World'
start_response('200 OK', [('Content-Type', 'text/html;charset=utf-8')])
result=u'<p>Hello, %(subject)s!</p>\n' % {'subject': subject}
for key, value in iter(sorted(environ.iteritems())):
result+='<p>'+html_escape(key)+'='+html_escape(value)+'</p>\n'
content_length=environ.get('CONTENT_LENGTH', 0)
if content_length and content_length<100:
result+='bytes read='+environ['wsgi.input'].read()
return [result.encode('utf-8')]
def file_upload_app(environ, start_response):
result=''
if environ['REQUEST_METHOD'].upper()=='POST':
start_response('200 OK', [('Content-Type', 'text/plain;charset=utf-8')])
try:
fs=FieldStorage(fp=environ['wsgi.input'], environ=environ, keep_blank_values=True, strict_parsing=True)
if fs.list:
count=0
for item in fs.list:
if item.filename:
count+=1
result+='%s: file; %s, %s, %s, %r\n' % (item.name, item.filename, item.type, item.disposition, item.file)
with open('fupl-'+str(count), 'w') as fdst:
shut
|
il.copyfileobj(item.file, fdst, 8192)
if hasattr(item.file, 'close'):
item.
|
file.close()
else:
result+='%s: value; %s\n' % (item.name, item.value)
except Exception as e:
result='multipart data parse failure: '+repr(e)
else:
start_response('200 OK', [('Content-Type', 'text/html;charset=utf-8')])
result='''
<form action="/py" method="post" enctype="multipart/form-data">
Category: <input type="text" name="category" />
Select file(s): <input type="file" name="upload" multiple />
<input type="submit" value="Start upload!" />
</form>'''
return [result]
def html_escape(s):
if not s: return ''
return unicode(s).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace('\'', ''')
|
linvictor88/vse-lbaas-driver
|
quantum/plugins/nicira/QuantumPlugin.py
|
Python
|
apache-2.0
| 104,734
| 0.000134
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Aaron Rosen, Nicira Networks, Inc.
import hashlib
import logging
import os
from oslo.config import cfg
from sqlalchemy.orm import exc as sa_exc
import webob.exc
from quantum.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from quantum.api.v2 import attributes as attr
from quantum.api.v2 import base
from quantum.common import constants
from quantum.common import exceptions as q_exc
from quantum.common import rpc as q_rpc
from quantum.common import topics
from quantum.common import utils
from quantum import context as q_context
from quantum.db import agents_db
from quantum.db import agentschedulers_db
from quantum.db import api as db
from quantum.db import db_base_plugin_v2
from quantum.db import dhcp_rpc_base
from quantum.db import l3_db
from quantum.db import models_v2
from quantum.db import portsecurity_db
from quantum.db import quota_db # noqa
from quantum.db import securitygroups_db
from quantum.extensions import l3
from quantum.extensions import portsecurity as psec
from quantum.extensions import providernet as pnet
from quantum.extensions import securitygroup as ext_sg
from quantum.openstack.common
|
import importutils
from quantum.openstack.common import rpc
from quantum.plugins.nicira.common import config # noqa
from quantum.plugins.nicira.common import exceptions as nvp_exc
from quantum.plugins.nicira.common import metadata_access as nvp_me
|
ta
from quantum.plugins.nicira.common import securitygroups as nvp_sec
from quantum.plugins.nicira.extensions import nvp_networkgw as networkgw
from quantum.plugins.nicira.extensions import nvp_qos as ext_qos
from quantum.plugins.nicira import nicira_db
from quantum.plugins.nicira import nicira_networkgw_db as networkgw_db
from quantum.plugins.nicira import nicira_qos_db as qos_db
from quantum.plugins.nicira import nvp_cluster
from quantum.plugins.nicira.nvp_plugin_version import PLUGIN_VERSION
from quantum.plugins.nicira import NvpApiClient
from quantum.plugins.nicira import nvplib
LOG = logging.getLogger("QuantumPlugin")
NVP_NOSNAT_RULES_ORDER = 10
NVP_FLOATINGIP_NAT_RULES_ORDER = 224
NVP_EXTGW_NAT_RULES_ORDER = 255
NVP_EXT_PATH = os.path.join(os.path.dirname(__file__), 'extensions')
# Provider network extension - allowed network types for the NVP Plugin
class NetworkTypes:
"""Allowed provider network types for the NVP Plugin."""
L3_EXT = 'l3_ext'
STT = 'stt'
GRE = 'gre'
FLAT = 'flat'
VLAN = 'vlan'
def create_nvp_cluster(cluster_opts, concurrent_connections,
nvp_gen_timeout):
# NOTE(armando-migliaccio): remove this block once we no longer
# want to support deprecated options in the nvp config file
# ### BEGIN
config.register_deprecated(cfg.CONF)
# ### END
cluster = nvp_cluster.NVPCluster(**cluster_opts)
api_providers = [ctrl.split(':') + [True]
for ctrl in cluster.nvp_controllers]
cluster.api_client = NvpApiClient.NVPApiHelper(
api_providers, cluster.nvp_user, cluster.nvp_password,
request_timeout=cluster.req_timeout,
http_timeout=cluster.http_timeout,
retries=cluster.retries,
redirects=cluster.redirects,
concurrent_connections=concurrent_connections,
nvp_gen_timeout=nvp_gen_timeout)
return cluster
class NVPRpcCallbacks(dhcp_rpc_base.DhcpRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def create_rpc_dispatcher(self):
'''Get the rpc dispatcher for this manager.
If a manager would like to set an rpc API version, or support more than
one class as the target of rpc messages, override this method.
'''
return q_rpc.PluginRpcDispatcher([self,
agents_db.AgentExtRpcCallback()])
class NvpPluginV2(db_base_plugin_v2.QuantumDbPluginV2,
l3_db.L3_NAT_db_mixin,
portsecurity_db.PortSecurityDbMixin,
securitygroups_db.SecurityGroupDbMixin,
networkgw_db.NetworkGatewayMixin,
qos_db.NVPQoSDbMixin,
nvp_sec.NVPSecurityGroups,
nvp_meta.NvpMetadataAccess,
agentschedulers_db.AgentSchedulerDbMixin):
"""L2 Virtual network plugin.
NvpPluginV2 is a Quantum plugin that provides L2 Virtual Network
functionality using NVP.
"""
supported_extension_aliases = ["provider", "quotas", "port-security",
"router", "security-group", "nvp-qos",
"network-gateway"]
__native_bulk_support = True
# Map nova zones to cluster for easy retrieval
novazone_cluster_map = {}
port_security_enabled_update = "update_port:port_security_enabled"
def __init__(self, loglevel=None):
if loglevel:
logging.basicConfig(level=loglevel)
nvplib.LOG.setLevel(loglevel)
NvpApiClient.LOG.setLevel(loglevel)
# Routines for managing logical ports in NVP
self._port_drivers = {
'create': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_create_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_create_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_create_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_create_router_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_create_l2_gw_port,
'default': self._nvp_create_port},
'delete': {l3_db.DEVICE_OWNER_ROUTER_GW:
self._nvp_delete_ext_gw_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_delete_router_port,
l3_db.DEVICE_OWNER_FLOATINGIP:
self._nvp_delete_fip_port,
l3_db.DEVICE_OWNER_ROUTER_INTF:
self._nvp_delete_port,
networkgw_db.DEVICE_OWNER_NET_GW_INTF:
self._nvp_delete_port,
'default': self._nvp_delete_port}
}
# If no api_extensions_path is provided set the following
if not cfg.CONF.api_extensions_path:
cfg.CONF.set_override('api_extensions_path', NVP_EXT_PATH)
self.nvp_opts = cfg.CONF.NVP
self.cluster = create_nvp_cluster(cfg.CONF,
self.nvp_opts.concurrent_connections,
self.nvp_opts.nvp_gen_timeout)
db.configure_db()
# Extend the fault map
self._extend_fault_map()
# Set up RPC interface for DHCP agent
self.setup_rpc()
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver)
# Set this flag to false as the default gateway has not
# been yet updated from the config file
self._is_default_net_gw_in_sync = False
def _ensure_default_network_gateway(self):
if self._is_default_net_gw_in_sync:
return
# Add the gw in the db as default, and unset any previous default
def_l2_gw_uuid = self.cluster.default_l2_gw_service_uuid
try:
ctx = q_context.get_admin_context()
|
tangentstorm/scenetool
|
spike/vue2svg.py
|
Python
|
mit
| 5,403
| 0.004442
|
"""
vue2svg : spike/prototype for scenetool.
generates an svg scene from VUE files specified on command line.
usage:
python3.2 vue2svg.py ../test/vue/*.vue
https://github.com/tangentstorm/scenetool
copyright (c) 2013 michal j wallace.
available to the public under the MIT/x11 license. (see ../LICENSE)
"""
import os, sys, io, itertools as it
from collections import namedtuple
import sqlite3
from lxml import etree
DB_PATH = "vuedata.sdb" # note: will be wiped out on each run!
nsmap = {
'xsi':"http://www.w3.org/2001/XMLSchema-instance"
}
def xp(tree, path):
match = tree.xpath(path, namespaces=nsmap)
return match[0] if match else ''
VueData = namedtuple('VueData',
|
('parent ntype shape id ts x y w h text layer autosized'
' fill strokewidth strokecolor strokestyle textcolor'
' font id1 id2 p0x p0y p1x p1y ctrlcount arrowstate'
' c0x c0y c1x c1y').split( ))
def walk(tree, parent=0):
"""
walk the tree recursively, extracting node
|
data
"""
children = tree.xpath('child')
for child in children:
row = VueData(*([parent] +
[xp(child, path) for path in [
'@xsi:type',
'shape/@xsi:type',
'@ID',
'@created',
'@x',
'@y',
'@width',
'@height',
'@label',
'@layerID',
'@autoSized',
'fillColor/text()',
'strokeWidth/text()',
'strokeColor/text()',
'strokeStyle/text()',
'textColor/text()',
'font/text()',
'ID1/text()',
'ID2/text()',
'point1/@x',
'point1/@y',
'point2/@x',
'point2/@y',
'@controlCount',
'@arrowState',
'ctrlPoint0/@x',
'ctrlPoint0/@y',
'ctrlPoint1/@x',
'ctrlPoint1/@y' ]]))
yield row
for item in walk(child, row.id): yield item
def load(dbc, filename):
"""
load data from the vue file into the database
"""
# vue files are not valid xml because the doctype is misplaced.
# so to fix, just strip off the opening comments:
data = open(filename, 'r').read()
data = data[data.find('<?xml'):]
vue = etree.parse(io.BytesIO(bytes(data, 'ascii')))
cur = dbc.cursor()
cur.execute('insert into file values (?)', [filename])
fid = cur.lastrowid
for row in walk(vue, 0):
sql = 'insert into vuedata values (? %s)' \
% (', ? ' * len(VueData._fields))
cur.execute(sql, [fid] + list(row))
def connect():
return sqlite3.connect(DB_PATH, isolation_level=None) # autocommit
def main(filenames):
if os.path.exists(DB_PATH): os.unlink(DB_PATH)
dbc = connect()
cur = dbc.cursor()
cur.execute('create table if not exists file (filename string)')
sql = 'create table if not exists vuedata (fid integer, %s)' \
% ', '.join('%s data' % col for col in VueData._fields)
cur.execute(sql)
for filename in filenames:
load(dbc,filename)
dbc.close()
# run the scripts and check for error (non-zero exit code)
if ( os.system("sqlite3 %s < schema.sql" % DB_PATH)
+ os.system("sqlite3 %s < vue2elem.sql" % DB_PATH)
+ os.system("sqlite3 %s < views.sql" % DB_PATH)
) > 0: sys.exit()
dbc = connect()
cur = dbc.cursor()
def fetch_ntups(cur):
cols = [tup[0] for tup in cur.description]
ntup = namedtuple('row', cols)
for row in cur.fetchall():
yield ntup(*row)
def fetch_dicts(cur):
cols = [tup[0] for tup in cur.description]
for row in cur.fetchall():
yield dict(zip(cols, row))
print('<!doctype html>')
print('<html><head><title>vue2svg</title>')
print('<style type="text/css">')
cur.execute(
"""
SELECT s.rowid AS id, fg, bg, sc, sw, f.font
FROM style s LEFT JOIN font f ON s.font = f.rowid
""")
for row in fetch_dicts(cur):
print(' '.join(
"""
svg .style-{id} text {{
fill: {fg};
}}
svg .style-{id} {{
stroke: {sc};
stroke-width: {sw};
fill: {bg};
}}
""".format(**row).split()))
print('</style>')
print('</head>')
cur.execute("select * from scenes")
cols = [tup[0] for tup in cur.description]
ntup = namedtuple('rec', cols)
templates = {
'node':
'<rect x="{x}" y="{y}" class="style-{style}" '
' width="{w}" height="{h}" />',
'edge':
'<line x1="{x0}" y1="{y0}" class="style-{style}"'
' x2="{x1}" y2="{y1}" />',
}
print('<body>')
for filename, rows in it.groupby(cur.fetchall(), lambda r: r[0]):
print(' <h1>%s</h1>' % filename)
print(' <svg>')
for row in rows:
rec = ntup(*row)
print(' ',templates.get(rec.tag, rec.tag or '')
.format(**rec.__dict__))
print(' </svg>')
print('</body>')
print('</html>')
if __name__=="__main__":
if len(sys.argv) > 1: main(sys.argv[1:])
else: print(__doc__)
|
EmreAtes/spack
|
lib/spack/spack/test/cmd/find.py
|
Python
|
lgpl-2.1
| 3,528
| 0
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import argparse
import pytest
import spack.cmd.find
from spack.util.pattern import Bunch
@pytest.fixture(scope='module')
def parser():
"""Returns the parser for the module command"""
prs = argparse.ArgumentParser()
spack.cmd.find.setup_parser(prs)
return prs
@pytest.fixture()
def specs():
s = []
return s
@pytest.fixture()
def mock_display(monkeypatch, specs):
"""Monkeypatches the display function to return its first argument"""
def display(x, *args, **kwargs):
specs.extend(x)
monkeypatch.setattr(spack.cmd.find, 'display_specs', display)
def test_query_arguments():
query_arguments = spack.cmd.find.query_arguments
# Default arguments
args = Bunch(
only_missing=False,
missing=False,
unknown=False,
explicit=False,
implicit=False,
start_date="2018-02-23",
end_date=None
)
q_args = query_arguments(args)
assert 'installed' in q_args
assert 'known' in q_args
assert 'explicit' in q_args
assert q_args['installed'] is True
assert q_args['known'] is any
assert q_args['explicit'] is any
assert 'start_date' in q_args
assert 'end_date' not in q_args
# Check that explicit works correctly
arg
|
s.explicit = True
|
q_args = query_arguments(args)
assert q_args['explicit'] is True
args.explicit = False
args.implicit = True
q_args = query_arguments(args)
assert q_args['explicit'] is False
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag1(parser, specs):
args = parser.parse_args(['--tags', 'tag1'])
spack.cmd.find.find(parser, args)
assert len(specs) == 2
assert 'mpich' in [x.name for x in specs]
assert 'mpich2' in [x.name for x in specs]
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag2(parser, specs):
args = parser.parse_args(['--tags', 'tag2'])
spack.cmd.find.find(parser, args)
assert len(specs) == 1
assert 'mpich' in [x.name for x in specs]
@pytest.mark.db
@pytest.mark.usefixtures('database', 'mock_display')
def test_tag2_tag3(parser, specs):
args = parser.parse_args(['--tags', 'tag2', '--tags', 'tag3'])
spack.cmd.find.find(parser, args)
assert len(specs) == 0
|
vvladych/forecastmgmt
|
src/forecastmgmt/model/person.py
|
Python
|
unlicense
| 2,819
| 0.025186
|
__author__="vvladych"
__date__ ="$09.10.2014 23:01:15$"
from forecastmgmt.dao.db_connection import get_db_connection
import psycopg2.extras
from MDO import MDO
from person_name import PersonName
class Person(MDO):
sql_dict={"get_all":"SELECT sid, common_name, birth_date, birth_place, person_uuid FROM fc_person",
"insert":"INSERT INTO fc_person(common_name, birth_date, birth_place) VALUES(%s,%s,%s) RETURNING sid",
"delete":"DELETE FROM fc_person WHERE sid=%s",
"load":"SELECT sid, common_name, birth_date, birth_place, person_uuid FROM fc_person WHERE sid=%s",
"update_person":"UPDATE fc_person SET common_name=%s, birth_date=%s, birth_place=%s WHERE sid=%s"
}
def __init__(self, sid=None, common_name=None, birth_date=None, birth_place=None, person_uuid=None):
super(Pers
|
on, self).__init__(Person.sql_dict,sid,person_uuid)
self.common_name=common_name
self.birth_date=birth_date
self.birth_place=birth_place
if sid!=None:
self.names=PersonName().get_all_for_foreign_key(self.sid)
else:
|
self.names=[]
def load_object_from_db(self,rec):
self.common_name=rec.common_name
self.birth_date=rec.birth_date
self.birth_place=rec.birth_place
self.uuid=rec.person_uuid
self.names=PersonName().get_all_for_foreign_key(self.sid)
def get_insert_data(self):
return (self.common_name,self.birth_date,self.birth_place)
def insert(self):
super(Person, self).insert()
for name in self.names:
name.person_sid=self.sid
name.insert()
get_db_connection().commit()
def add_name(self, person_name_sid, person_name_role, person_sid, namepart_list):
self.names.append(PersonName(person_name_sid, person_name_role, person_sid, namepart_list))
def fabric_method(self,rec):
return Person(rec.sid, rec.common_name, rec.birth_date, rec.birth_place, rec.person_uuid)
def update(self, other):
cur=get_db_connection().cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
data=(other.common_name, other.birth_date, other.birth_place, self.sid)
cur.execute(Person.sql_dict["update_person"],data)
cur.close()
# update person_names
# delete outdated person_names
for person_name in self.names:
if person_name not in other.names:
person_name.delete()
for person_name in other.names:
if person_name not in self.names:
person_name.insert()
get_db_connection().commit()
|
OPM/opm-common
|
python/tests/test_field_props.py
|
Python
|
gpl-3.0
| 2,742
| 0.006929
|
import unittest
import opm.io
import numpy as np
from opm.io.parser import Parser
from opm.io.deck import DeckKeyword
from opm.io.ecl_state import EclipseState
try:
from tests.utils import test_path
except ImportError:
from utils import test_path
class TestFieldProps(unittest.TestCase):
def assertClose(self, expected, observed, epsilon=1e-08):
diff = abs(expected - observed)
err_msg = '|%g - %g| = %g > %g' % (expected, observed, diff, epsilon)
self.assertTrue(diff <= epsilon, msg=err_msg)
def setUp(self):
parser = Parser()
deck = parser.parse(test_path('spe3/SPE3CASE1.DATA'))
int_arr
|
ay = np.ones(324)
actnum_kw = DeckKeyword( parser["ACTNUM"], int_array)
deck.add(actnum_kw)
self.spe3 = EclipseState(deck)
self.props = self.spe3.field_props()
def test_contains(self):
p = self.props
self.assertTrue('PORO' in p)
self.assertFalse('NONO' in p)
self.assertTru
|
e('PORV' in p)
self.assertTrue('ACTNUM' in p)
def test_getitem(self):
p = self.props
poro = p.get_double_array('PORO')
self.assertEqual(324, len(poro))
self.assertEqual(0.13, poro[0])
self.assertTrue( 'PERMX' in p )
px = p.get_double_array('PERMX')
print(len(px))
self.assertEqual(324, len(px))
self.assertEqual(324, len(p.get_int_array('ACTNUM')))
def test_permx_values(self):
def md2si(md):
#millidarcy->SI
return md * 1e-3 * 9.869233e-13
field_props = self.props
grid = self.spe3.grid()
permx = field_props.get_double_array('PERMX')
print('set(PERMX) = %s' % set(permx))
# 130mD, 40mD, 20mD, and 150mD, respectively, top to bottom
darcys = {0:md2si(130), 1:md2si(40), 2:md2si(20), 3:md2si(150)}
for i in range(grid.nx):
for j in range(grid.ny):
for k in range(grid.nz):
g_idx = grid.globalIndex(i,j,k)
perm = permx[g_idx]
darcy = darcys[k]
self.assertClose(darcy, perm)
def test_volume(self):
grid = self.spe3.grid()
for i in range(grid.nx):
for j in range(grid.ny):
for k in range(grid.nz):
g_idx = grid.globalIndex(i,j,k)
exp = 293.3 * 293.3 * 30 # cubicfeet = 73 078.6084 cubic meter
exp *= (12*0.0254)**3 # cubic feet to cubic meter
if k == 0:
self.assertClose(exp, grid.getCellVolume(g_idx))
self.assertEqual(grid.getCellVolume(g_idx), grid.getCellVolume(i, j, k))
|
mmahnic/trac-tickethistory
|
tickethistory/test/workdays_t.py
|
Python
|
mit
| 4,805
| 0.028512
|
from ..workdays import *
from datetime import datetime, timedelta
from time import strptime
import math
import traceback
tests=[]
def test( fn ):
tests.append(fn)
return fn
def runTests():
for t in tests:
print t
try: t()
except Exception as e:
print e
traceback.print_exc()
print
def _parse_date( datestr ):
return datetime(*strptime(datestr, "%Y-%m-%d")[0:5]).date()
def _parse_datetime( datestr ):
if type(datestr) == type(""):
return datetime(*strptime(datestr, "%Y-%m-%d %H:%M")[0:5])
elif type(datestr) == type((1,2)):
return datetime(*datestr)
elif type(datestr) == type(dt.datetime(1900,1,1)):
return datestr
return None
def _is_same_dt( d1, d2, numParts=5 ):
return d1.timetuple()[:numParts] == d2.timetuple()[:numParts]
@test
def shouldEstimateEnd():
def test( d1, d2, total, remaining, dexp ):
d1 = _parse_datetime(d1)
d2 = _parse_datetime(d2)
dexp = _parse_date(dexp)
dres = estimate_end( d1, d2, total, remaining )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 3 ) )
# Monday 2017-03-06
test( "2017-03-06 00:00", "2017-03-07 00:00", 2, 1, "2017-03-08" )
test( "2017-03-06 00:00", "2017-03-08 00:00", 2, 1, "2017-03-10" )
test( "2017-03-06 00:00", "2017-03-09 00:00", 2, 1, "2017-03-12" )
test( "2017-03-06 00:00", "2017-03-10 00:00", 2, 1, "2017-03-14" )
test( "2017-03-06 00:00", "2017-03-13 00:00", 2, 1, "2017-03-20" )
@test
def shouldAdjustStart():
def test( d1, dexp ):
dexp = _parse_datetime(dexp)
dres = adjusted_start( _parse_datetime( d1 ) )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 5 ) )
# Monday 2017-03-06
test( "2017-03-06 08:00", "2017-03-06 08:00" )
test( "2017-03-07 08:00", "2017-03-07 08:00" )
test( "2017-03-08 08:00", "2017-03-08 08:00" )
test( "2017-03-09 08:00", "2017-03-09 08:00" )
test( "2017-03-10 08:00", "2017-03-10 08:00" )
test( "2017-03-11 08:00", "2017-03-13 00:00" )
test( "2017-03-12 08:00", "2017-03-13 00:00" )
@test
def shouldAdjustEnd():
def test( d1, dexp ):
dexp = _parse_datetime(dexp)
dres = adjusted_end( _parse_datetime( d1 ) )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 5 ) )
# Monday 2017-03-06
test( "2017-03-06 08:00", "2017-03-06 08:00" )
test( "2017-03-07 08:00", "2017-03-07 08:00" )
test( "2017-03-08 08:00", "2017-03-08 08:00" )
test( "2017-03-09 08:00", "2017-03-09 08:00" )
test( "2017-03-10 08:00", "2017-03-10 08:00" )
test( "2017-03-11 08:00", "2017-03-10 23:59" )
test( "2017-03-12 08:00", "2017-03-10 23:59" )
@test
def shouldEstimateEndWorkdays():
def test( d1, d2, total, remaining, dexp ):
d1 = _parse_datetime(d1)
d2 = _parse_datetime(d2)
dexp = _parse_datetime(dexp)
dres = estimate_end_workdays( d1, d2, total, remaining )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 3 ) )
# Monday 2017-03-06
# same week
test( "2017-03-06 08:00", "2017-03-07 08:00", 2, 1, "2017-03-08 08:00" )
test( "2017-03-06 08:00", "2017-03-08 08:00", 2, 1, "2017-03-10 08:00" )
# projection spans weekends
test( "2017-03-06 08:00", "2017-03-09 08:00", 2, 1, "2017-03-14 08:00" )
test( "2017-03-06 08:00", "2017-03-10 08:00", 2, 1, "2017-03-16 08:00" )
# a weekend is in the completed time, estimate falls on weekend
# 06 07 08 09 10 w11 w12 13 14 15 16 17 w18 w19 20
test( "2017-03-06 08:00", "2017-03-13 08:00", 2, 1, "2017-03-20 08:00" )
# Start on weekend
test( "2017-03-05 08:00", "2017-03-10 08:00", 2, 1, "2017-03-16 08:00" )
test( "2017-03-04 08:00", "2017-03-10 08:00", 2, 1, "2017-03-16 08:00" )
# Start and now on weekend
test( "2017-03-05 08:00", "2017-03-11 08:00", 2, 1, "2017-03-17 23:59" )
test( "2017-03-04 08:00", "2017-03-12 08:00", 2, 1, "2017-03-17 23:59" )
@test
def shouldEstimateEndWorkdays2():
def test( d1, d2, total, remaining, dexp ):
d1 = _parse_datetime(d1)
d2 = _parse_datetime(d2)
dexp = _parse_datetime(dexp)
dr
|
es = estimate_end_workdays( d1, d2, total, remaining )
print "expected: %s, actual %s, %s" % (dexp, dres, _is_same_dt( dres, dexp, 3 ) )
if not _is_same_dt( dres, dexp ):
print " diff:", dres - dexp
# Monday 2017-03-06
d1 = dt.datetime(2017, 03
|
, 06, 8 )
d2 = dt.datetime(2017, 03, 13, 8 )
for done in xrange(1, 22, 5):
dexp = d2 + dt.timedelta( weeks=done )
print done, dt.timedelta( weeks=done ),
test( d1, d2, done+1, done, dexp )
runTests()
|
fasaxc/felix
|
calico/felix/test/test_frules.py
|
Python
|
apache-2.0
| 13,792
| 0.008338
|
# -*- coding: utf-8 -*-
# Copyright 2014 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.test.test_frules
~~~~~~~~~~~
Tests for fiptables. Much of this module is tested in test_felix, but this covers
some parts that are not.
"""
from copy import copy
import logging
import mock
import unittest
import calico.felix.frules as frules
from calico.felix.futils import IPV4, IPV6, FailedSystemCall
import calico.felix.ipsets
import calico.felix.test.stub_ipsets as stub_ipsets
# Expected state
expected_ipsets = stub_ipsets.IpsetState()
# Logger
log = logging.getLogger(__name__)
class TestUpdateIpsets(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Completely replace the ipsets modules.
cls.real_ipsets = calico.felix.ipsets
frules.ipset
|
s = stub_ipsets
@classmethod
def tearDownClass(cls):
# Reinstate the modules we overwrote
frules.ipsets = cls.real_ipsets
def setUp(self):
stub_ipsets.reset()
# Set the expecte
|
d IP tables state to be clean.
expected_ipsets.reset()
def create_ipsets(self, family):
stub_ipsets.create("ipset_port", "hash:net,port", family)
stub_ipsets.create("ipset_addr", "hash:net", family)
stub_ipsets.create("ipset_icmp", "hash:net", family)
expected_ipsets.create("ipset_port", "hash:net,port", family)
expected_ipsets.create("ipset_addr", "hash:net", family)
expected_ipsets.create("ipset_icmp", "hash:net", family)
stub_ipsets.create("tmp_ipset_port", "hash:net,port", family)
stub_ipsets.create("tmp_ipset_addr", "hash:net", family)
stub_ipsets.create("tmp_ipset_icmp", "hash:net", family)
expected_ipsets.create("tmp_ipset_port", "hash:net,port", family)
expected_ipsets.create("tmp_ipset_addr", "hash:net", family)
expected_ipsets.create("tmp_ipset_icmp", "hash:net", family)
if family == "inet":
addr = "9.8.7.6/24"
else:
addr = "9:8:7::6/64"
# Shove some junk into ipsets that will be tidied away.
stub_ipsets.add("ipset_addr", addr)
stub_ipsets.add("ipset_port", addr + ",tcp:123")
stub_ipsets.add("ipset_icmp", addr)
def tearDown(self):
pass
def test_empty_ipsets(self):
"""
Empty ipsets.
"""
description = "Description : blah"
suffix = "whatever"
rule_list = []
self.create_ipsets("inet")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_ipsets)
def test_ipv4_ipsets(self):
"""
IPv4 ipsets
"""
description = "description"
suffix = "suffix"
rule_list = []
default_cidr = "1.2.3.4/24"
self.create_ipsets("inet")
# Ignored rules
rule_list.append({ 'blah': "junk" }) # no CIDR
rule_list.append({ 'cidr': "junk" }) # junk CIDR
rule_list.append({ 'cidr': "::/64" }) # IPv6, not v4
rule_list.append({ 'cidr': default_cidr,
'port': 123 }) # port, no protocol
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': "blah" }) # bad port
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': ["blah", "bloop"] }) # bad port range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [0, 123] }) # bad port in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1, 2, 3] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "tcp",
'port': [1] }) # not two in range
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "ipv6-icmp",
'port': "1" }) # port not allowed
rule_list.append({ 'cidr': default_cidr,
'protocol': "icmp",
'icmp_code': "1" }) # code without type
rule_list.append({ 'cidr': default_cidr,
'protocol': "blah",
'port': "1" }) # port not allowed for protocol
# Better rules
rule_list.append({ 'cidr': "1.2.3.4/24" })
expected_ipsets.add("ipset_addr", "1.2.3.4/24")
rule_list.append({ 'cidr': "10.0.10.0/0",
'protocol': "tcp"})
expected_ipsets.add("ipset_port", "0.0.0.0/1,tcp:1-65535")
expected_ipsets.add("ipset_port", "128.0.0.0/1,tcp:1-65535")
rule_list.append({ 'cidr': "1.0.0.1/8",
'protocol': "udp",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.1/8,udp:2-10")
rule_list.append({ 'cidr': "1.0.0.2/8",
'protocol': "sctp",
'port': "2"})
expected_ipsets.add("ipset_port", "1.0.0.2/8,sctp:2")
rule_list.append({ 'cidr': "1.0.0.3/8",
'protocol': "udplite",
'port': [2,10]})
expected_ipsets.add("ipset_port", "1.0.0.3/8,udplite:2-10")
rule_list.append({ 'cidr': "1.0.0.4/8",
'protocol': "icmp" })
expected_ipsets.add("ipset_icmp", "1.0.0.4/8")
rule_list.append({ 'cidr': "1.0.0.5/8",
'protocol': "icmp",
'icmp_type': 123})
expected_ipsets.add("ipset_port", "1.0.0.5/8,icmp:123/0")
rule_list.append({ 'cidr': "1.0.0.6/8",
'protocol': "icmp",
'icmp_type': "type"})
expected_ipsets.add("ipset_port", "1.0.0.6/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.7/8",
'protocol': "icmp",
'icmp_type': 123,
'icmp_code': "code"})
expected_ipsets.add("ipset_port", "1.0.0.7/8,icmp:123/code")
rule_list.append({ 'cidr': "1.0.0.8/8",
'protocol': "icmp",
'icmp_type': "type",
'icmp_code': "code"}) # code ignored
expected_ipsets.add("ipset_port", "1.0.0.8/8,icmp:type")
rule_list.append({ 'cidr': "1.0.0.9/8",
'protocol': "blah" })
expected_ipsets.add("ipset_port", "1.0.0.9/8,blah:0")
frules.update_ipsets(IPV4,
description,
suffix,
rule_list,
"ipset_addr",
"ipset_port",
"ipset_icmp",
"tmp_ipset_addr",
"tmp_ipset_port",
"tmp_ipset_icmp")
stub_ipsets.check_state(expected_i
|
rgayon/l2tdevtools
|
tests/update.py
|
Python
|
apache-2.0
| 2,954
| 0.008463
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the update tool."""
from __future__ import unicode_literals
import os
import sys
import unittest
from tools import update
from tests import test_lib
@unittest.skipIf(
os.environ.get('TRAVIS_OS_NAME') == 'osx',
'TLS 1.2 not supported by macOS on Travis')
class GithubRepoDownloadHelperTest(test_lib.BaseTestCase):
"""Tests for the GitHub repo download helper class."""
_DOWNLOAD_URL = 'https://github.com/ForensicArtifacts/artifacts/releases'
_PROJECT_NAME = 'artifacts'
_PROJECT_VERSION = '20180628'
def testGetPackageDownloadURLs(self):
"""Tests the GetPackageDownloadURLs function."""
download_helper = update.GithubRepoDownloadHelper(self._DOWNLOAD_URL)
package_download_urls = download_helper.GetPackageDownloadURLs(
preferred_machine_type='x86', preferred_operating_system='Windows')
if (sys.version_info[0] not in (2, 3) or
(sys.version_info[0] == 2 and sys.version_info[1] != 7) or
(sys.version_info[0] == 3 and sys.version_info[1] != 6)):
# Python versions other than 2.7 and 3.6 are not supported.
self.assertIsNone(package_download_urls)
else:
self.assertIsNotNone(package_download_urls)
expected_url = (
'https://github.com/log2timeline/l2tbinaries/raw/master/win32/'
'{0:s}-{1:s}.1.win32.msi').format(
self._PROJECT_NAME, self._PROJECT_VERSION)
self.assertIn(expected_url, package_download_urls)
@unittest.skipIf(
os.environ.get('TRAVIS_OS_NAME') == 'osx',
'TLS 1.2 not supported by macOS on Travis')
class DependencyUpdaterTest(test_lib.BaseTestCase):
"""Tests for the dependency updater class."""
# pylint: disable=protected-access
_PROJECT_NAME = 'dfvfs'
_PROJECT_VERSION = '20180510'
def testGetPackageFilenamesAndVersions(self):
"""Tests the GetPackageFilenamesAndVersions function."""
dependency_updater = update.DependencyUpdater(
preferred_machine_type='x86', preferred_operating_system='Windows')
package_filenames, package_versions = (
dependency_updater._GetPackageFilenamesAndVersions([]))
if (sys.version_info[0] not in (2, 3) or
(sys.version_info[0] == 2 and sys.version_info[1] != 7) or
(sys.version_info[0] == 3 and sys.version_info[1] != 6)):
# Python versions other than 2.7 and 3.6 are not supported.
self.assertIsNone(package_filenames)
self.assertIsNone(package_versions)
else:
self.assertIsNotNone(package_filenames)
self.assertIsNotNone(package_versions)
self.assertEqual(
package_filenames.get(self._PROJECT_NAME, None),
'{0:s}-{1:s}.1.win32.msi'.format(
|
self._PROJECT_NAME, self._PROJECT_VERSION))
self.assertEqual(
package_versions.get(self._P
|
ROJECT_NAME, None),
[self._PROJECT_VERSION, '1'])
if __name__ == '__main__':
unittest.main()
|
flying-sheep/omnitool
|
plugins/tombstone.py
|
Python
|
mit
| 1,209
| 0.01737
|
config = {
"name": "Tombstone counter", # plugin name
"type": "receiver", #plugin type
"description": ["counts tombstones in a world"] #description
}
import database as db # import terraria database
class Receiver(): # required class to be called by plugin manager
def __init__(self): #do any initialization stuff
self.tile_id = db.tiles.index("Tombstone") #we grab the ID of tombstone from database
def rec_header(self, header): #this is called by plugin manager when the header is read
print("Counting Tombstones for %s" % header["name"]) #so we print the name from header
def rec_tiles(self, tiles): #called when tiles a
|
re ready
x = 0 #our counter variable
for column in tiles: # tiles come as 2D list
for tile in column: #so we need to get tiles like this
if tile[0] == self.tile_id: #tile[0] is the tile_id
x += 1 #increment counter for each found tombstone tile
print("Found %d Tombstones" % (x // 4)) #divide counter by 4, because each tombstone consists
|
of 4 "sub tiles"
return False #signal plugin manager we are done and dont need any further data
|
nextBillyonair/compVision
|
AMX/Crystal/loop.py
|
Python
|
mit
| 1,215
| 0.015638
|
import cvlib
angle = 0
angles = []
center = []
for i in range(24): #24
img = cvlib.load("findloop_%d.jpg" % angle)
angles.append(angle)
rng = cvlib.inRangeThresh(img, (20,30,20), (200,130,120))
rng = cvlib.bitNot(rng)
cnt = cvlib.findContours(rng, thresh=250)
if cvlib.area(cnt[0]) > cvlib.area(cnt[1]):
crystal = cnt[0]
else:
crystal = cnt[1]
centroid = cvlib.centroid(crystal)
center.
|
append(centroid[1])
cvlib.drawContour(img, crystal, thickness=10)
cvlib.plotCentroid(img, crystal, radius=7)
cvlib.display(img)
cvlib.save(img, "found%d.jpg" % angle)
angle += 15
cvlib.saveGraph(angles, center, "Y Coord Per Angle", "Angles in Degrees", "Original Data Coord", [0,360,0,400], filename="graph.png")
d = cvlib.approxSinCurve(center)
print d["amplitude"], d["phase shift"], d["vertical shift"]
cvlib.saveGraph(angles, d["data"], "Y Coord
|
Per Angle", "Angles in Degrees", "Y Coord Centroid Best Fit", [0,360,0,400], style="--", filename="fit.png")
cvlib.makeGraph(angles, d["data"], "Y Coord Per Angle", "Angles in Degrees", "Y Coord Centroid", [0,360,0,400], style="r--")
# X = - (MC/PEL) * A * sin(phase)
# Y = - (MC/PEL) * A * cos(phase)
|
Hackfmi/Diaphanum
|
positions/migrations/0002_auto__del_positions__add_position.py
|
Python
|
mit
| 1,927
| 0.006227
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Positions'
db.delete_table(u'positions_positions')
# Adding model 'Position'
db.create_table(u'positions_position', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 8, 21, 0, 0))),
('content', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal(u'positions', ['Position'])
def backwards(self, orm):
# Adding model 'Positions'
d
|
b.create_table(u'positions_positions', (
('content', self.gf('django.db.models.fields.TextField')()),
('date', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(2013, 8, 21, 0, 0))),
(u'id', self.gf('django.db.models.
|
fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=100)),
))
db.send_create_signal(u'positions', ['Positions'])
# Deleting model 'Position'
db.delete_table(u'positions_position')
models = {
u'positions.position': {
'Meta': {'object_name': 'Position'},
'content': ('django.db.models.fields.TextField', [], {}),
'date': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime(2013, 8, 21, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['positions']
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.