repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Esmidth/Biligrab
|
refs/heads/master
|
biligrablite.py
|
2
|
'''
Biligrab Lite 0.21
Beining@ACICFG
cnbeining[at]gmail.com
http://www.cnbeining.com
MIT licence
'''
import sys
import os
from StringIO import StringIO
import gzip
import urllib2
import hashlib
from xml.dom.minidom import parseString
global vid
global cid
global partname
global title
global videourl
global part_now
global appkey
global secretkey
appkey='c1b107428d337928';
secretkey = 'ea85624dfcf12d7cc7b2b3a94fac1f2c'
def list_del_repeat(list):
"""delete repeating items in a list, and keep the order.
http://www.cnblogs.com/infim/archive/2011/03/10/1979615.html"""
l2 = []
[l2.append(i) for i in list if not i in l2]
return(l2)
#----------------------------------------------------------------------
def find_cid_api(vid, p):
"""find cid and print video detail"""
global cid
global partname
global title
global videourl
cookiepath = './bilicookies'
try:
cookies = open(cookiepath, 'r').readline()
#print(cookies)
except:
print('Cannot read cookie, may affect some videos...')
cookies = ''
cid = 0
title = ''
partname = ''
if str(p) is '0' or str(p) is '1':
str2Hash = 'appkey=85eb6835b0a1034e&id=' + str(vid) + '&type=xml2ad42749773c441109bdc0191257a664'
sign_this = hashlib.md5(str2Hash.encode('utf-8')).hexdigest()
biliurl = 'https://api.bilibili.com/view?appkey=85eb6835b0a1034e&id=' + str(vid) + '&type=xml&sign=' + sign_this
else:
str2Hash = 'appkey=85eb6835b0a1034e&id=' + str(vid) + '&page=' + str(p) + '&type=xml2ad42749773c441109bdc0191257a664'
sign_this = hashlib.md5(str2Hash.encode('utf-8')).hexdigest()
biliurl = 'https://api.bilibili.com/view?appkey=85eb6835b0a1034e&id=' + str(vid) + '&page=' + str(p) + '&type=xml&sign=' + sign_this
#print(biliurl)
videourl = 'http://www.bilibili.tv/video/av'+ str(vid)+'/index_'+ str(p)+'.html'
print('Fetching webpage...')
print(biliurl)
try:
request = urllib2.Request(biliurl, headers={ 'User-Agent' : 'Biligrab /0.8 (cnbeining@gmail.com)', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' , 'Cookie': cookies})
response = urllib2.urlopen(request)
data = response.read()
dom = parseString(data)
for node in dom.getElementsByTagName('cid'):
if node.parentNode.tagName == "info":
cid = node.toxml()[5:-6]
print('cid is ' + cid)
break
for node in dom.getElementsByTagName('partname'):
if node.parentNode.tagName == "info":
partname = node.toxml()[10:-11].strip()
print('partname is ' + partname)
break
for node in dom.getElementsByTagName('title'):
if node.parentNode.tagName == "info":
title = node.toxml()[7:-8].strip()
print('Title is ' + title.decode("utf-8"))
except: #If API failed
print('ERROR: Cannot connect to API server!')
#----------------------------------------------------------------------
def find_cid_flvcd(videourl):
""""""
global vid
global cid
global partname
global title
print('Fetching webpage via Flvcd...')
request = urllib2.Request(videourl, headers={ 'User-Agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.63 Safari/537.36', 'Cache-Control': 'no-cache', 'Pragma': 'no-cache' })
request.add_header('Accept-encoding', 'gzip')
response = urllib2.urlopen(request)
if response.info().get('Content-Encoding') == 'gzip':
buf = StringIO( response.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
data_list = data.split('\n')
#Todo: read title
for lines in data_list:
if 'cid=' in lines:
cid = lines.split('&')
cid = cid[0].split('=')
cid = cid[-1]
print('cid is ' + str(cid))
break
#----------------------------------------------------------------------
def main(vid, p, oversea):
global cid
global partname
global title
global videourl
global is_first_run
videourl = 'http://www.bilibili.tv/video/av'+ str(vid)+'/index_'+ str(p)+'.html'
find_cid_api(vid, p)
global cid
if cid is 0:
print('Cannot find cid, trying to do it brutely...')
find_cid_flvcd(videourl)
if cid is 0:
is_black3 = str(raw_input('Strange, still cannot find cid... Type y for trying the unpredictable way, or input the cid by yourself, press ENTER to quit.'))
if 'y' in str(is_black3):
vid = vid - 1
p = 1
find_cid_api(vid-1, p)
cid = cid + 1
elif str(is_black3) is '':
print('Cannot get cid anyway! Quit.')
exit()
else:
cid = str(is_black3)
if len(partname) is not 0:
filename = partname
elif title is not '':
filename = title
else:
filename = cid
print('Fetching XML...')
os.system((u'curl -o "'+filename+u'.xml" --compressed http://comment.bilibili.cn/'+cid+u'.xml').encode(sys.stdout.encoding))
os.system((u'gzip -d '+cid+u'.xml.gz').encode(sys.stdout.encoding))
print(u'The XML file, ' + filename + u'.xml should be ready...enjoy!')
#try api
#
vid = str(raw_input('av'))
p_raw = str(raw_input('P'))
if p_raw == '':
p_raw = '1'
oversea = '0'
p_list = []
p_raw = p_raw.split(',')
for item in p_raw:
if '~' in item:
#print(item)
lower = 0
higher = 0
item = item.split('~')
try:
lower = int(item[0])
except:
print('Cannot read lower!')
try:
higher = int(item[1])
except:
print('Cannot read higher!')
if lower == 0 or higher == 0:
if lower == 0 and higher != 0:
lower = higher
elif lower != 0 and higher == 0:
higher = lower
else:
print('Cannot find any higher or lower, ignoring...')
#break
mid = 0
if higher < lower:
mid = higher
higher = lower
lower = mid
p_list.append(lower)
while lower < higher:
lower = lower + 1
p_list.append(lower)
#break
else:
try:
p_list.append(int(item))
except:
print('Cannot read "'+str(item)+'", abandon it.')
#break
p_list = list_del_repeat(p_list)
global is_first_run
is_first_run = 0
part_now = '0'
print(p_list)
for p in p_list:
print part_now
part_now = str(p)
main(vid, p, oversea)
exit()
|
LiveZenLK/CeygateERP
|
refs/heads/master
|
addons/stock_dropshipping/__openerp__.py
|
27
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Drop Shipping',
'version': '1.0',
'category': 'Warehouse Management',
'summary': 'Drop Shipping',
'description': """
Manage drop shipping orders
===========================
This module adds a pre-configured Drop Shipping picking type
as well as a procurement route that allow configuring Drop
Shipping products and orders.
When drop shipping is used the goods are directly transferred
from vendors to customers (direct delivery) without
going through the retailer's warehouse. In this case no
internal transfer document is needed.
""",
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['purchase', 'sale_stock'],
'data': ['stock_dropshipping.xml'],
'test': [
'../account/test/account_minimal_test.xml',
'../stock_account/test/stock_valuation_account.xml',
'test/cancellation_propagated.yml',
'test/crossdock.yml',
'test/dropship.yml',
'test/procurementexception.yml',
'test/lifo_price.yml'
],
'installable': True,
'auto_install': False,
}
|
gwwfps/boxrps
|
refs/heads/master
|
jinja2/sandbox.py
|
284
|
# -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.runtime import Undefined
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \
FrameType, GeneratorType
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = xrange(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""
Mark a function or method as unsafe::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overriden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (CodeType, TracebackType, FrameType)):
return True
elif isinstance(obj, GeneratorType):
if attr == 'gi_frame':
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or \
getattr(obj, 'alters_data', False))
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
|
shsingh/ansible
|
refs/heads/devel
|
test/units/modules/remote_management/oneview/test_oneview_datacenter_info.py
|
21
|
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
import pytest
from ansible.modules.remote_management.oneview.oneview_datacenter_info import DatacenterInfoModule
from hpe_test_utils import FactsParamsTest
PARAMS_GET_CONNECTED = dict(
config='config.json',
name="MyDatacenter",
options=['visualContent']
)
class TestDatacenterInfoModule(FactsParamsTest):
@pytest.fixture(autouse=True)
def setUp(self, mock_ansible_module, mock_ov_client):
self.resource = mock_ov_client.datacenters
self.mock_ansible_module = mock_ansible_module
self.mock_ov_client = mock_ov_client
def test_should_get_all_datacenters(self):
self.resource.get_all.return_value = {"name": "Data Center Name"}
self.mock_ansible_module.params = dict(config='config.json',)
DatacenterInfoModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
datacenters=({"name": "Data Center Name"})
)
def test_should_get_datacenter_by_name(self):
self.resource.get_by.return_value = [{"name": "Data Center Name"}]
self.mock_ansible_module.params = dict(config='config.json', name="MyDatacenter")
DatacenterInfoModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
datacenters=([{"name": "Data Center Name"}])
)
def test_should_get_datacenter_visual_content(self):
self.resource.get_by.return_value = [{"name": "Data Center Name", "uri": "/rest/datacenter/id"}]
self.resource.get_visual_content.return_value = {
"name": "Visual Content"}
self.mock_ansible_module.params = PARAMS_GET_CONNECTED
DatacenterInfoModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
datacenter_visual_content={'name': 'Visual Content'},
datacenters=[{'name': 'Data Center Name', 'uri': '/rest/datacenter/id'}]
)
def test_should_get_none_datacenter_visual_content(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_GET_CONNECTED
DatacenterInfoModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
datacenter_visual_content=None,
datacenters=[]
)
|
obi-two/Rebelion
|
refs/heads/master
|
data/scripts/templates/object/draft_schematic/structure/shared_generic_house_player_small.py
|
2
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/structure/shared_generic_house_player_small.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
hack4impact/women-veterans-rock
|
refs/heads/master
|
app/admin/__init__.py
|
13
|
from flask import Blueprint
admin = Blueprint('admin', __name__)
from . import views # noqa
|
kelseyoo14/Wander
|
refs/heads/master
|
venv_2_7/lib/python2.7/site-packages/Django-1.9-py2.7.egg/django/contrib/gis/db/backends/oracle/adapter.py
|
273
|
from cx_Oracle import CLOB
from django.contrib.gis.db.backends.base.adapter import WKTAdapter
from django.contrib.gis.geos import GeometryCollection, Polygon
from django.utils.six.moves import range
class OracleSpatialAdapter(WKTAdapter):
input_size = CLOB
def __init__(self, geom):
"""
Oracle requires that polygon rings are in proper orientation. This
affects spatial operations and an invalid orientation may cause
failures. Correct orientations are:
* Outer ring - counter clockwise
* Inner ring(s) - clockwise
"""
if isinstance(geom, Polygon):
self._fix_polygon(geom)
elif isinstance(geom, GeometryCollection):
self._fix_geometry_collection(geom)
self.wkt = geom.wkt
self.srid = geom.srid
def _fix_polygon(self, poly):
# Fix single polygon orientation as described in __init__()
if self._isClockwise(poly.exterior_ring):
poly.exterior_ring = list(reversed(poly.exterior_ring))
for i in range(1, len(poly)):
if not self._isClockwise(poly[i]):
poly[i] = list(reversed(poly[i]))
return poly
def _fix_geometry_collection(self, coll):
# Fix polygon orientations in geometry collections as described in
# __init__()
for i, geom in enumerate(coll):
if isinstance(geom, Polygon):
coll[i] = self._fix_polygon(geom)
def _isClockwise(self, coords):
# A modified shoelace algorithm to determine polygon orientation.
# See https://en.wikipedia.org/wiki/Shoelace_formula
n = len(coords)
area = 0.0
for i in range(n):
j = (i + 1) % n
area += coords[i][0] * coords[j][1]
area -= coords[j][0] * coords[i][1]
return area < 0.0
|
couchbase/perfrunner
|
refs/heads/master
|
cbagent/collectors/kvstore_stats.py
|
1
|
import json
from cbagent.collectors.collector import Collector
from perfrunner.helpers.local import extract_cb_any, get_cbstats
class KVStoreStats(Collector):
COLLECTOR = "kvstore_stats"
CB_STATS_PORT = 11209
METRICS_ACROSS_SHARDS = (
"BlockCacheQuota",
"WriteCacheQuota",
"BlockCacheMemUsed",
"BlockCacheHits",
"BlockCacheMisses",
"BloomFilterMemUsed",
"BytesIncoming",
"BytesOutgoing",
"BytesPerRead",
"IndexBlocksSize",
"MemoryQuota",
"NCommitBatches",
"NDeletes",
"NGets",
"NInserts",
"NReadBytes",
"NReadBytesCompact",
"NReadBytesGet",
"NReadIOs",
"NReadIOsGet",
"NSets",
"NSyncs",
"NTablesCreated",
"NTablesDeleted",
"NTableFiles",
"NFileCountCompacts",
"TableMetaMemUsed",
"ActiveBloomFilterMemUsed",
"TotalBloomFilterMemUsed",
"NWriteBytes",
"NWriteBytesCompact",
"NWriteIOs",
"TotalMemUsed",
"BufferMemUsed",
"WALMemUsed",
"WriteCacheMemUsed",
"NCompacts",
"ReadAmp",
"ReadAmpGet",
"ReadIOAmp",
"WriteAmp",
"TxnSizeEstimate",
"NFlushes",
"NGetsPerSec",
"NSetsPerSec",
"NDeletesPerSec",
"NCommitBatchesPerSec",
"NFlushesPerSec",
"NCompactsPerSec",
"NSyncsPerSec",
"NReadBytesPerSec",
"NReadBytesGetPerSec",
"NReadBytesCompactPerSec",
"BytesOutgoingPerSec",
"NReadIOsPerSec",
"NReadIOsGetPerSec",
"BytesIncomingPerSec",
"NWriteBytesPerSec",
"NWriteIOsPerSec",
"NWriteBytesCompactPerSec",
"RecentWriteAmp",
"RecentReadAmp",
"RecentReadAmpGet",
"RecentReadIOAmp",
"RecentBytesPerRead",
"NGetStatsPerSec",
"NGetStatsComputedPerSec",
"FlushQueueSize",
"CompactQueueSize",
"NBloomFilterHits",
"NBloomFilterMisses",
"BloomFilterFPR",
"NumNormalFlushes",
"NumPersistentFlushes",
"NumSyncFlushes"
)
METRICS_AVERAGE_PER_NODE_PER_SHARD = (
"ReadAmp",
"ReadAmpGet",
"ReadIOAmp",
"WriteAmp",
"TxnSizeEstimate",
"RecentWriteAmp",
"RecentReadAmp",
"RecentReadAmpGet",
"RecentReadIOAmp",
"RecentBytesPerRead",
"FlushQueueSize",
"CompactQueueSize",
"BloomFilterFPR"
)
NO_CAP = (
"TxnSizeEstimate",
"RecentBytesPerRead",
"FlushQueueSize",
"CompactQueueSize",
"BloomFilterFPR"
)
def __init__(self, settings, test):
super().__init__(settings)
extract_cb_any(filename='couchbase')
self.collect_per_server_stats = test.collect_per_server_stats
self.cluster_spec = test.cluster_spec
def _get_stats_from_server(self, bucket: str, server: str):
stats = {}
try:
result = get_cbstats(server, self.CB_STATS_PORT, "kvstore", self.cluster_spec)
buckets_data = list(filter(lambda a: a != "", result.split("*")))
for data in buckets_data:
data = data.strip()
if data.startswith(bucket):
data = data.split("\n", 1)[1]
data = data.replace("\"{", "{")
data = data.replace("}\"", "}")
data = data.replace("\\", "")
data = json.loads(data)
for (shard, metrics) in data.items():
if not shard.endswith(":magma"):
continue
for metric in self.METRICS_ACROSS_SHARDS:
if metric in metrics.keys():
if metric in stats:
stats[metric] += metrics[metric]
else:
stats[metric] = metrics[metric]
if metric == "TxnSizeEstimate" and "walStats" in metrics.keys():
if metric in stats:
stats[metric] += metrics["walStats"][metric]
else:
stats[metric] = metrics["walStats"][metric]
break
except Exception:
pass
return stats
def _get_kvstore_stats(self, bucket: str, server: str):
node_stats = self._get_stats_from_server(bucket, server=server)
return node_stats
def _get_num_shards(self, bucket: str, server: str):
result = get_cbstats(server, self.CB_STATS_PORT, "workload", self.cluster_spec)
buckets_data = list(filter(lambda a: a != "", result.split("*")))
for data in buckets_data:
data = data.strip()
if data.startswith(bucket):
data = data.split("\n", 1)[1]
data = data.replace("\"{", "{")
data = data.replace("}\"", "}")
data = data.replace("\\", "")
data = json.loads(data)
return data["ep_workload:num_shards"]
return 1
def sample(self):
if self.collect_per_server_stats:
for node in self.nodes:
for bucket in self.get_buckets():
num_shards = self._get_num_shards(bucket, self.master_node)
stats = self._get_kvstore_stats(bucket, node)
for metric in self.METRICS_AVERAGE_PER_NODE_PER_SHARD:
if metric in stats:
if stats[metric] / num_shards >= 50 and metric not in self.NO_CAP:
stats[metric] = 50
else:
stats[metric] /= num_shards
if stats:
self.update_metric_metadata(stats.keys(), server=node, bucket=bucket)
self.store.append(stats, cluster=self.cluster,
bucket=bucket, server=node,
collector=self.COLLECTOR)
for bucket in self.get_buckets():
stats = {}
num_shards = self._get_num_shards(bucket, self.master_node)
num_nodes = len(self.nodes)
for node in self.nodes:
temp_stats = self._get_kvstore_stats(bucket, node)
for st in temp_stats:
if st in stats:
stats[st] += temp_stats[st]
else:
stats[st] = temp_stats[st]
for metric in self.METRICS_AVERAGE_PER_NODE_PER_SHARD:
if metric in stats:
if stats[metric]/(num_shards * num_nodes) >= 50 and metric not in self.NO_CAP:
stats[metric] = 50
else:
stats[metric] /= (num_shards * num_nodes)
if stats:
self.update_metric_metadata(stats.keys(), bucket=bucket)
self.store.append(stats, cluster=self.cluster,
bucket=bucket,
collector=self.COLLECTOR)
def update_metadata(self):
self.mc.add_cluster()
for bucket in self.get_buckets():
self.mc.add_bucket(bucket)
for node in self.nodes:
self.mc.add_server(node)
|
robovm/robovm-studio
|
refs/heads/master
|
python/lib/Lib/site-packages/django/contrib/gis/tests/__init__.py
|
229
|
from django.conf import settings
from django.test.simple import build_suite, DjangoTestSuiteRunner
from django.utils import unittest
def run_tests(*args, **kwargs):
from django.test.simple import run_tests as base_run_tests
return base_run_tests(*args, **kwargs)
def run_gis_tests(test_labels, verbosity=1, interactive=True, failfast=False, extra_tests=None):
import warnings
warnings.warn(
'The run_gis_tests() test runner has been deprecated in favor of GeoDjangoTestSuiteRunner.',
DeprecationWarning
)
test_runner = GeoDjangoTestSuiteRunner(verbosity=verbosity, interactive=interactive, failfast=failfast)
return test_runner.run_tests(test_labels, extra_tests=extra_tests)
def geo_apps(namespace=True, runtests=False):
"""
Returns a list of GeoDjango test applications that reside in
`django.contrib.gis.tests` that can be used with the current
database and the spatial libraries that are installed.
"""
from django.db import connection
from django.contrib.gis.geos import GEOS_PREPARE
from django.contrib.gis.gdal import HAS_GDAL
apps = ['geoapp', 'relatedapp']
# No distance queries on MySQL.
if not connection.ops.mysql:
apps.append('distapp')
# Test geography support with PostGIS 1.5+.
if connection.ops.postgis and connection.ops.geography:
apps.append('geogapp')
# The following GeoDjango test apps depend on GDAL support.
if HAS_GDAL:
# 3D apps use LayerMapping, which uses GDAL.
if connection.ops.postgis and GEOS_PREPARE:
apps.append('geo3d')
apps.append('layermap')
if runtests:
return [('django.contrib.gis.tests', app) for app in apps]
elif namespace:
return ['django.contrib.gis.tests.%s' % app
for app in apps]
else:
return apps
def geodjango_suite(apps=True):
"""
Returns a TestSuite consisting only of GeoDjango tests that can be run.
"""
import sys
from django.db.models import get_app
suite = unittest.TestSuite()
# Adding the GEOS tests.
from django.contrib.gis.geos import tests as geos_tests
suite.addTest(geos_tests.suite())
# Adding the measurment tests.
from django.contrib.gis.tests import test_measure
suite.addTest(test_measure.suite())
# Adding GDAL tests, and any test suite that depends on GDAL, to the
# suite if GDAL is available.
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import tests as gdal_tests
suite.addTest(gdal_tests.suite())
from django.contrib.gis.tests import test_spatialrefsys, test_geoforms
suite.addTest(test_spatialrefsys.suite())
suite.addTest(test_geoforms.suite())
else:
sys.stderr.write('GDAL not available - no tests requiring GDAL will be run.\n')
# Add GeoIP tests to the suite, if the library and data is available.
from django.contrib.gis.utils import HAS_GEOIP
if HAS_GEOIP and hasattr(settings, 'GEOIP_PATH'):
from django.contrib.gis.tests import test_geoip
suite.addTest(test_geoip.suite())
# Finally, adding the suites for each of the GeoDjango test apps.
if apps:
for app_name in geo_apps(namespace=False):
suite.addTest(build_suite(get_app(app_name)))
return suite
class GeoDjangoTestSuiteRunner(DjangoTestSuiteRunner):
def setup_test_environment(self, **kwargs):
super(GeoDjangoTestSuiteRunner, self).setup_test_environment(**kwargs)
# Saving original values of INSTALLED_APPS, ROOT_URLCONF, and SITE_ID.
self.old_installed = getattr(settings, 'INSTALLED_APPS', None)
self.old_root_urlconf = getattr(settings, 'ROOT_URLCONF', '')
self.old_site_id = getattr(settings, 'SITE_ID', None)
# Constructing the new INSTALLED_APPS, and including applications
# within the GeoDjango test namespace.
new_installed = ['django.contrib.sites',
'django.contrib.sitemaps',
'django.contrib.gis',
]
# Calling out to `geo_apps` to get GeoDjango applications supported
# for testing.
new_installed.extend(geo_apps())
settings.INSTALLED_APPS = new_installed
# SITE_ID needs to be set
settings.SITE_ID = 1
# ROOT_URLCONF needs to be set, else `AttributeErrors` are raised
# when TestCases are torn down that have `urls` defined.
settings.ROOT_URLCONF = ''
def teardown_test_environment(self, **kwargs):
super(GeoDjangoTestSuiteRunner, self).teardown_test_environment(**kwargs)
settings.INSTALLED_APPS = self.old_installed
settings.ROOT_URLCONF = self.old_root_urlconf
settings.SITE_ID = self.old_site_id
def build_suite(self, test_labels, extra_tests=None, **kwargs):
return geodjango_suite()
|
rahul-c1/scikit-learn
|
refs/heads/master
|
sklearn/metrics/cluster/unsupervised.py
|
10
|
""" Unsupervised evaluation metrics. """
# Authors: Robert Layton <robertlayton@gmail.com>
#
# License: BSD 3 clause
import numpy as np
from ...utils import check_random_state
from ..pairwise import pairwise_distances
def silhouette_score(X, labels, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
Predicted labels for each sample.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance
array itself, use ``metric="precomputed"``.
sample_size : int or None
The size of the sample to use when computing the Silhouette
Coefficient. If ``sample_size is None``, no sampling is used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
n_labels = len(np.unique(labels))
n_samples = X.shape[0]
if not 2 <= n_labels <= n_samples-1:
raise ValueError("Number of labels is %d "
"but should be more than 2"
"and less than n_samples - 1" % n_labels)
if sample_size is not None:
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def silhouette_samples(X, labels, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhoeutte Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficent is only defined if number of labels
is 2 <= n_labels <= n_samples - 1.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
labels : array, shape = [n_samples]
label values for each sample
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is
the distance array itself, use "precomputed" as the metric.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array, shape = [n_samples]
Silhouette Coefficient for each samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<http://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<http://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
distances = pairwise_distances(X, metric=metric, **kwds)
n = labels.shape[0]
A = np.array([_intra_cluster_distance(distances[i], labels, i)
for i in range(n)])
B = np.array([_nearest_cluster_distance(distances[i], labels, i)
for i in range(n)])
sil_samples = (B - A) / np.maximum(A, B)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def _intra_cluster_distance(distances_row, labels, i):
"""Calculate the mean intra-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is excluded from calculation and
used to determine the current label
Returns
-------
a : float
Mean intra-cluster distance for sample i
"""
mask = labels == labels[i]
mask[i] = False
a = np.mean(distances_row[mask])
return a
def _nearest_cluster_distance(distances_row, labels, i):
"""Calculate the mean nearest-cluster distance for sample i.
Parameters
----------
distances_row : array, shape = [n_samples]
Pairwise distance matrix between sample i and each sample.
labels : array, shape = [n_samples]
label values for each sample
i : int
Sample index being calculated. It is used to determine the current
label.
Returns
-------
b : float
Mean nearest-cluster distance for sample i
"""
label = labels[i]
b = np.min([np.mean(distances_row[labels == cur_label])
for cur_label in set(labels) if not cur_label == label])
return b
|
Chilledheart/gyp
|
refs/heads/master
|
test/ios/gyptest-app-ios.py
|
39
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that ios app bundles are built correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode', 'ninja'])
test.run_gyp('test.gyp', chdir='app-bundle')
test.build('test.gyp', test.ALL, chdir='app-bundle')
# Test that the extension is .bundle
test.built_file_must_exist('Test App Gyp.app/Test App Gyp',
chdir='app-bundle')
# Info.plist
info_plist = test.built_file_path('Test App Gyp.app/Info.plist',
chdir='app-bundle')
# Resources
test.built_file_must_exist(
'Test App Gyp.app/English.lproj/InfoPlist.strings',
chdir='app-bundle')
test.built_file_must_exist(
'Test App Gyp.app/English.lproj/MainMenu.nib',
chdir='app-bundle')
test.built_file_must_exist(
'Test App Gyp.app/English.lproj/Main_iPhone.storyboardc',
chdir='app-bundle')
# Packaging
test.built_file_must_exist('Test App Gyp.app/PkgInfo',
chdir='app-bundle')
test.built_file_must_match('Test App Gyp.app/PkgInfo', 'APPLause',
chdir='app-bundle')
test.pass_test()
|
gauribhoite/personfinder
|
refs/heads/master
|
env/google_appengine/google/appengine/ext/mapreduce/file_format_root.py
|
6
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Define file format root."""
from __future__ import with_statement
__all__ = ['FileFormatRoot',
'split']
import copy
from google.appengine.api.files import file as files
from google.appengine.ext.mapreduce import file_formats
from google.appengine.ext.mapreduce import json_util
import google.appengine.ext.mapreduce.file_format_parser as parser
def split(filenames, format_string, shards):
"""Get a FileFormatRoot for each shard.
This method creates a list of FileFormatRoot and assigns each root
some input files. The number of roots is less than or equal to shards.
Args:
filenames: input filenames
format_string: format string from user.
shards: number of shards to split inputs.
Returns:
A list of FileFormatRoot or None if all input files have zero bytes.
"""
parsed_formats = parser.parse(format_string)
sizes = [files.stat(filename).st_size for filename in filenames]
size_per_shard = float(sum(sizes)) / shards
if not size_per_shard:
return
if parsed_formats[0].can_split():
return _deep_split(filenames, size_per_shard, parsed_formats)
else:
return _shallow_split(filenames, size_per_shard, parsed_formats, sizes)
def _shallow_split(filenames, size_per_shard, parsed_formats, sizes):
"""Split files into roots only based on top level file sizes.
This split does not cross file boundary.
"""
roots = []
inputs = []
shard_size = 0
for i, size in enumerate(sizes):
shard_size += size
inputs.append(_FileRange(filenames[i], None))
if shard_size >= size_per_shard:
roots.append(FileFormatRoot(copy.deepcopy(parsed_formats), inputs))
inputs = []
shard_size = 0
if inputs:
roots.append(FileFormatRoot(copy.deepcopy(parsed_formats), inputs))
return roots
def _deep_split(filenames, size_per_shard, parsed_formats):
"""Split files into roots using the first FileFormat.
Deep split can split within a file. It tells the first format how big
a split it wants and the first format will do the actually splitting
because only the first format knows how to operate on this particular
format.
Args:
filenames: a list of input filenames.
size_per_shard: size per shard.
parsed_format: the parsed FileFormats.
Returns:
A list of FileFormatRoot.
"""
roots = []
inputs = []
size_left = size_per_shard
for filename in filenames:
index = 0
with files.open(filename) as f:
cache_for_split = {}
while True:
if size_left <= 0:
roots.append(FileFormatRoot(copy.deepcopy(parsed_formats), inputs))
size_left = size_per_shard
inputs = []
start_index = index
size_left, index = parsed_formats[0].split(size_left,
start_index,
f,
cache_for_split)
if start_index == index:
break
inputs.append(_FileRange(filename, (start_index, index)))
if inputs:
roots.append(FileFormatRoot(copy.deepcopy(parsed_formats), inputs))
return roots
class _FileRange(json_util.JsonMixin):
"""Describe a range of a file to read.
FileFormatRootFactory creates instances of this class and
feeds them to different roots.
"""
FILENAME = 'filename'
RANGE = 'range'
def __init__(self, filename, file_range=None):
"""Init.
Args:
filename: filename in str.
file_range: [start_index, end_index) tuple. This only makes sense for
_FileFormats that support splitting within a file.
It specify the range to read this file.
None means reading the entire file. When defined, what it means
differ for each format. For example, if a file is of zip format,
index specifies the member files to read. If a file is of record
format, index specifies the records to read.
"""
self.filename = filename
self.range = file_range
def to_json(self):
return {self.FILENAME: self.filename,
self.RANGE: self.range}
@classmethod
def from_json(cls, json):
return cls(json[cls.FILENAME], json[cls.RANGE])
class FileFormatRoot(json_util.JsonMixin):
"""FileFormatRoot.
FileFormatRoot takes a list of FileFormats as processing units and
a list of _FileRanges as inputs. It provides an interface to
iterate through all the inputs. All inputs will be processed by each
processing unit in a cascaded manner before being emitted.
The order of the list of FileFormats matters. The last
FileFormat's output is returned by FileFormatRoot.
Each FileFormat asks FileFormatRoot for inputs, which are either outputs
from its previous FileFormat or, in the case of the first FileFormat,
outputs directly from FileFormatRoot.
FileFormats don't know each other. FileFormatRoot coordinates all
their initializations, (de)serialization, and communications.
"""
_INPUTS = 'inputs'
_FORMATS = 'formats'
_FILES_STREAMS = 'files_streams'
def __init__(self, formats, inputs, files_streams_json=None):
"""Init.
Args:
formats: A list of _FileFormats.
inputs: A list of _FileRanges.
init_files_streams: If to initialize files streams to default value.
"""
self._inputs = inputs
self._formats = formats
for i, file_format in enumerate(self._formats):
stream_cls = _RootFilesStream if i == 0 else _FilesStream
if files_streams_json:
file_format._input_files_stream = stream_cls.from_json(
files_streams_json[i], self)
else:
file_format._input_files_stream = stream_cls(i, self)
def __repr__(self):
return str(self.to_json())
def __iter__(self):
return self
def to_json(self):
return {self._INPUTS: [_.to_json() for _ in self._inputs],
self._FORMATS: [_.to_json() for _ in self._formats],
self._FILES_STREAMS:
[_._input_files_stream.to_json() for _ in self._formats]}
@classmethod
def from_json(cls, json):
formats = [file_formats.FORMATS[_json[file_formats.FileFormat._FORMAT]].
from_json(_json) for _json in json[cls._FORMATS]]
root = cls(formats,
[_FileRange.from_json(_) for _ in json[cls._INPUTS]],
json[cls._FILES_STREAMS])
return root
def next(self):
"""Iterate over inputs."""
result = self._formats[-1].next()
self._formats[-1]._input_files_stream.checkpoint()
self._formats[-1].checkpoint()
return result
class _FilesStream(object):
"""Provide FileFormat with a stream of file-like objects as inputs.
Attributes:
current: the current file-like object to read from.
"""
PREVIOUS_OFFSET = 'previous'
INDEX = 'index'
def __init__(self,
index,
file_format_root,
offset=0,
next_func=None):
"""Init.
Args:
file_format_root: the FileFormatRoot this stream should talk to.
index: the index of this stream within the FileFormatRoot.
offset: the offset to start reading current file.
next_func: a function that gives back the next file from the stream.
"""
self._next_file = next_func or file_format_root._formats[index-1].next
self._preprocess = file_format_root._formats[index].preprocess
self._previous_offset = offset
self._index = index
self._current = self._preprocess(self._next_file())
self._current.seek(offset)
def advance(self):
"""Advance _current to the next file-like object.
_FileStream should call this after consumed the current file-like object.
"""
self._previous_offset = 0
self._current.close()
self._current = self._preprocess(self._next_file())
@property
def current(self):
return self._current
def checkpoint(self):
self._previous_offset = self._current.tell()
def to_json(self):
return {self.PREVIOUS_OFFSET: self._previous_offset,
self.INDEX: self._index}
@classmethod
def from_json(cls, json, file_format_root):
return cls(json[cls.INDEX], file_format_root, json[cls.PREVIOUS_OFFSET])
class _RootFilesStream(_FilesStream):
"""Special FilesStream for the first FileFormat"""
PREVIOUS_INPUT_INDEX = 'input_index'
def __init__(self,
index,
file_format_root,
offset=0,
input_index=0):
"""Init.
Args:
index: the index of this stream within the FileFormatRoot.
file_format_root: the FileFormatRoot this stream should talk to.
offset: the offset to start reading current file.
input_index: index of the next input file to read.
"""
self.__inputs = file_format_root._inputs
self.__input_index = input_index
self.__previous_input_index = input_index
self.__file_format_root = file_format_root
super(_RootFilesStream, self).__init__(index,
file_format_root,
offset,
self.next_file)
def next_file(self):
if self.__input_index == len(self.__inputs):
raise StopIteration()
file_input = self.__inputs[self.__input_index]
if file_input.range:
first_format = self.__file_format_root._formats[0]
if not first_format.can_split():
raise ValueError('Input range specified for a non splitable format %s'
% first_format.NAME)
first_format._range = file_input.range
self.__previous_input_index = self.__input_index
self.__input_index += 1
return files.open(file_input.filename, 'r', buffering=-1)
def to_json(self):
result = super(_RootFilesStream, self).to_json()
result[self.PREVIOUS_INPUT_INDEX] = self.__previous_input_index
return result
@classmethod
def from_json(cls, json, file_format_root):
return cls(json[cls.INDEX],
file_format_root,
json[cls.PREVIOUS_OFFSET],
json[cls.PREVIOUS_INPUT_INDEX])
|
JTCunning/sentry
|
refs/heads/master
|
src/sentry/migrations/0121_auto__add_unique_grouprulestatus_rule_group.py
|
36
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'GroupRuleStatus', fields ['rule', 'group']
db.create_unique('sentry_grouprulestatus', ['rule_id', 'group_id'])
def backwards(self, orm):
# Removing unique constraint on 'GroupRuleStatus', fields ['rule', 'group']
db.delete_unique('sentry_grouprulestatus', ['rule_id', 'group_id'])
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}, 'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
|
Hakuba/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/myspass.py
|
105
|
from __future__ import unicode_literals
import os.path
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
)
from ..utils import (
ExtractorError,
)
class MySpassIE(InfoExtractor):
_VALID_URL = r'http://www\.myspass\.de/.*'
_TEST = {
'url': 'http://www.myspass.de/myspass/shows/tvshows/absolute-mehrheit/Absolute-Mehrheit-vom-17022013-Die-Highlights-Teil-2--/11741/',
'md5': '0b49f4844a068f8b33f4b7c88405862b',
'info_dict': {
'id': '11741',
'ext': 'mp4',
"description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
"title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
},
}
def _real_extract(self, url):
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
# video id is the last path element of the URL
# usually there is a trailing slash, so also try the second but last
url_path = compat_urllib_parse_urlparse(url).path
url_parent_path, video_id = os.path.split(url_path)
if not video_id:
_, video_id = os.path.split(url_parent_path)
# get metadata
metadata_url = META_DATA_URL_TEMPLATE % video_id
metadata = self._download_xml(
metadata_url, video_id, transform_source=lambda s: s.strip())
# extract values from metadata
url_flv_el = metadata.find('url_flv')
if url_flv_el is None:
raise ExtractorError('Unable to extract download url')
video_url = url_flv_el.text
title_el = metadata.find('title')
if title_el is None:
raise ExtractorError('Unable to extract title')
title = title_el.text
format_id_el = metadata.find('format_id')
if format_id_el is None:
format = 'mp4'
else:
format = format_id_el.text
description_el = metadata.find('description')
if description_el is not None:
description = description_el.text
else:
description = None
imagePreview_el = metadata.find('imagePreview')
if imagePreview_el is not None:
thumbnail = imagePreview_el.text
else:
thumbnail = None
return {
'id': video_id,
'url': video_url,
'title': title,
'format': format,
'thumbnail': thumbnail,
'description': description,
}
|
ns950/calibre
|
refs/heads/master
|
src/odf/userfield.py
|
13
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2009 Søren Roug, European Environment Agency
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s): Michael Howitz, gocept gmbh & co. kg
#
# $Id: userfield.py 447 2008-07-10 20:01:30Z roug $
"""Class to show and manipulate user fields in odf documents."""
import sys
import zipfile
from odf.text import UserFieldDecl
from odf.namespaces import OFFICENS
from odf.opendocument import load
OUTENCODING = "utf-8"
# OpenDocument v.1.0 section 6.7.1
VALUE_TYPES = {
'float': (OFFICENS, u'value'),
'percentage': (OFFICENS, u'value'),
'currency': (OFFICENS, u'value'),
'date': (OFFICENS, u'date-value'),
'time': (OFFICENS, u'time-value'),
'boolean': (OFFICENS, u'boolean-value'),
'string': (OFFICENS, u'string-value'),
}
class UserFields(object):
"""List, view and manipulate user fields."""
# these attributes can be a filename or a file like object
src_file = None
dest_file = None
def __init__(self, src=None, dest=None):
"""Constructor
src ... source document name, file like object or None for stdin
dest ... destination document name, file like object or None for stdout
"""
self.src_file = src
self.dest_file = dest
self.document = None
def loaddoc(self):
if isinstance(self.src_file, basestring):
# src_file is a filename, check if it is a zip-file
if not zipfile.is_zipfile(self.src_file):
raise TypeError("%s is no odt file." % self.src_file)
elif self.src_file is None:
# use stdin if no file given
self.src_file = sys.stdin
self.document = load(self.src_file)
def savedoc(self):
# write output
if self.dest_file is None:
# use stdout if no filename given
self.document.save('-')
else:
self.document.save(self.dest_file)
def list_fields(self):
"""List (extract) all known user-fields.
Returns list of user-field names.
"""
return [x[0] for x in self.list_fields_and_values()]
def list_fields_and_values(self, field_names=None):
"""List (extract) user-fields with type and value.
field_names ... list of field names to show or None for all.
Returns list of tuples (<field name>, <field type>, <value>).
"""
self.loaddoc()
found_fields = []
all_fields = self.document.getElementsByType(UserFieldDecl)
for f in all_fields:
value_type = f.getAttribute('valuetype')
if value_type == 'string':
value = f.getAttribute('stringvalue')
else:
value = f.getAttribute('value')
field_name = f.getAttribute('name')
if field_names is None or field_name in field_names:
found_fields.append((field_name.encode(OUTENCODING),
value_type.encode(OUTENCODING),
value.encode(OUTENCODING)))
return found_fields
def list_values(self, field_names):
"""Extract the contents of given field names from the file.
field_names ... list of field names
Returns list of field values.
"""
return [x[2] for x in self.list_fields_and_values(field_names)]
def get(self, field_name):
"""Extract the contents of this field from the file.
Returns field value or None if field does not exist.
"""
values = self.list_values([field_name])
if not values:
return None
return values[0]
def get_type_and_value(self, field_name):
"""Extract the type and contents of this field from the file.
Returns tuple (<type>, <field-value>) or None if field does not exist.
"""
fields = self.list_fields_and_values([field_name])
if not fields:
return None
field_name, value_type, value = fields[0]
return value_type, value
def update(self, data):
"""Set the value of user fields. The field types will be the same.
data ... dict, with field name as key, field value as value
Returns None
"""
self.loaddoc()
all_fields = self.document.getElementsByType(UserFieldDecl)
for f in all_fields:
field_name = f.getAttribute('name')
if data.has_key(field_name):
value_type = f.getAttribute('valuetype')
value = data.get(field_name)
if value_type == 'string':
f.setAttribute('stringvalue', value)
else:
f.setAttribute('value', value)
self.savedoc()
|
thnee/ansible
|
refs/heads/devel
|
test/units/modules/network/check_point/test_cp_mgmt_time.py
|
19
|
# Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_time
OBJECT = {
"name": "timeObject1",
"end": {
"date": "24-Nov-2014",
"time": "21:22"
},
"recurrence": {
"pattern": "Daily",
"month": "Any",
"weekdays": [
"Sun",
"Mon"
],
"days": [
"1"
]
},
"start_now": True,
"end_never": False,
"hours_ranges": [
{
"from": "00:00",
"to": "00:00",
"enabled": True,
"index": 1
},
{
"from": "00:00",
"to": "00:00",
"enabled": False,
"index": 2
}
]
}
CREATE_PAYLOAD = {
"name": "timeObject1",
"end": {
"date": "24-Nov-2014",
"time": "21:22"
},
"recurrence": {
"pattern": "Daily",
"month": "Any",
"weekdays": [
"Sun",
"Mon"
],
"days": [
"1"
]
},
"start_now": True,
"end_never": False,
"hours_ranges": [
{
"from": "00:00",
"to": "00:00",
"enabled": True,
"index": 1
},
{
"from": "00:00",
"to": "00:00",
"enabled": False,
"index": 2
}
]
}
UPDATE_PAYLOAD = {
"name": "timeObject1",
"recurrence": {
"pattern": "Weekly",
"weekdays": [
"Fri"
],
"month": "Any"
},
"hours_ranges": [
{
"from": "00:22",
"to": "00:33"
}
]
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "timeObject1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_time.api_call'
api_call_object = 'time'
class TestCheckpointTime(object):
module = cp_mgmt_time
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
|
RRCKI/pilot
|
refs/heads/master
|
saga/utils/test_config.py
|
10
|
__author__ = "Andre Merzky, Ole Weidner"
__copyright__ = "Copyright 2012-2013, The SAGA Project"
__license__ = "MIT"
import copy
import radical.utils.testing as rut
import radical.utils.logger as rul
import saga.exceptions as se
import saga
# ------------------------------------------------------------------------------
#
def add_tc_params_to_jd (tc, jd):
if 'job_walltime_limit' in tc and tc['job_walltime_limit'] != "":
jd.wall_time_limit = tc['job_walltime_limit']
if 'job_project' in tc and tc['job_project'] != "":
jd.project = tc['job_project']
if 'job_queue' in tc and tc['job_queue'] != "":
jd.queue = tc['job_queue']
if 'job_total_cpu_count' in tc and tc['job_total_cpu_count'] != "":
jd.total_cpu_count = tc['job_total_cpu_count']
if 'job_spmd_variation' in tc and tc['job_spmd_variation'] != "":
jd.spmd_variation = tc['job_spmd_variation']
return jd
# ------------------------------------------------------------------------------
#
class TestConfig (rut.TestConfig):
#-----------------------------------------------------------------
#
def __init__ (self, cfg_file):
# initialize configuration. We only use the 'saga.tests' category from
# the config file.
rut.TestConfig.__init__ (self, cfg_file, 'saga.tests')
# setup a saga session for the tests
# don't populate session with default contexts...
self.session = saga.Session (default=False)
# attempt to create a context from the test config
if self.context_type :
c = saga.Context (self.context_type)
c.user_id = self.context_user_id
c.user_pass = self.context_user_pass
c.user_cert = self.context_user_cert
c.user_proxy = self.context_user_proxy
# add it to the session
self.session.add_context (c)
# ------------------------------------------------------------------------------
|
fiuba08/robotframework
|
refs/heads/master
|
src/robot/libraries/Easter.py
|
5
|
# Copyright 2008-2014 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def none_shall_pass(who):
if who is not None:
raise AssertionError('None shall pass!')
print '*HTML* <object width="480" height="385"><param name="movie" value="http://www.youtube.com/v/dhRUe-gz690&hl=en_US&fs=1&rel=0&color1=0x234900&color2=0x4e9e00"></param><param name="allowFullScreen" value="true"></param><param name="allowscriptaccess" value="always"></param><embed src="http://www.youtube.com/v/dhRUe-gz690&hl=en_US&fs=1&rel=0&color1=0x234900&color2=0x4e9e00" type="application/x-shockwave-flash" allowscriptaccess="always" allowfullscreen="true" width="480" height="385"></embed></object>'
|
popazerty/try
|
refs/heads/master
|
lib/python/Plugins/SystemPlugins/HdmiCEC/plugin.py
|
31
|
from boxbranding import getMachineBrand, getMachineName
from os import path
from Screens.Screen import Screen
from Components.ConfigList import ConfigListScreen
from Components.config import config, configfile, getConfigListEntry
from Components.Sources.StaticText import StaticText
if path.exists("/dev/hdmi_cec"):
import Components.HdmiCec
class HdmiCECSetupScreen(Screen, ConfigListScreen):
skin = """
<screen position="c-300,c-250" size="600,500" title="HDMI CEC setup">
<widget name="config" position="25,25" size="550,350" />
<widget source="current_address" render="Label" position="25,375" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<widget source="fixed_address" render="Label" position="25,405" size="550,30" zPosition="10" font="Regular;21" halign="left" valign="center" />
<ePixmap pixmap="buttons/red.png" position="20,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/green.png" position="160,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/yellow.png" position="300,e-45" size="140,40" alphatest="on" />
<ePixmap pixmap="buttons/blue.png" position="440,e-45" size="140,40" alphatest="on" />
<widget source="key_red" render="Label" position="20,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#9f1313" transparent="1" />
<widget source="key_green" render="Label" position="160,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#1f771f" transparent="1" />
<widget source="key_yellow" render="Label" position="300,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#a08500" transparent="1" />
<widget source="key_blue" render="Label" position="440,e-45" zPosition="1" size="140,40" font="Regular;20" halign="center" valign="center" backgroundColor="#18188b" transparent="1" />
</screen>"""
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("HDMI CEC Setup"))
from Components.ActionMap import ActionMap
self["key_red"] = StaticText(_("Cancel"))
self["key_green"] = StaticText(_("OK"))
self["key_yellow"] = StaticText(_("Set fixed"))
self["key_blue"] = StaticText(_("Clear fixed"))
self["current_address"] = StaticText()
self["fixed_address"] = StaticText()
self["actions"] = ActionMap(["SetupActions", "ColorActions", "MenuActions"],
{
"ok": self.keyGo,
"save": self.keyGo,
"cancel": self.keyCancel,
"green": self.keyGo,
"red": self.keyCancel,
"yellow": self.setFixedAddress,
"blue": self.clearFixedAddress,
"menu": self.closeRecursive,
}, -2)
self.onChangedEntry = [ ]
self.list = []
ConfigListScreen.__init__(self, self.list, session = self.session, on_change = self.changedEntry)
self.createSetup()
def createSetup(self):
self.list = []
self.list.append(getConfigListEntry(_("Enabled"), config.hdmicec.enabled))
if config.hdmicec.enabled.value:
self.list.append(getConfigListEntry(_("Put TV in standby"), config.hdmicec.control_tv_standby))
self.list.append(getConfigListEntry(_("Wakeup TV from standby"), config.hdmicec.control_tv_wakeup))
self.list.append(getConfigListEntry(_("Regard deep standby as standby"), config.hdmicec.handle_deepstandby_events))
self.list.append(getConfigListEntry(_("Switch TV to correct input"), config.hdmicec.report_active_source))
self.list.append(getConfigListEntry(_("Use TV remote control"), config.hdmicec.report_active_menu))
self.list.append(getConfigListEntry(_("Handle standby from TV"), config.hdmicec.handle_tv_standby))
self.list.append(getConfigListEntry(_("Handle wakeup from TV"), config.hdmicec.handle_tv_wakeup))
self.list.append(getConfigListEntry(_("Wakeup signal from TV"), config.hdmicec.tv_wakeup_detection))
self.list.append(getConfigListEntry(_("Forward volume keys"), config.hdmicec.volume_forwarding))
self.list.append(getConfigListEntry(_("Put your %s %s in standby") % (getMachineBrand(), getMachineName()), config.hdmicec.control_receiver_standby))
self.list.append(getConfigListEntry(_("Wakeup your %s %s from standby") % (getMachineBrand(), getMachineName()), config.hdmicec.control_receiver_wakeup))
self.list.append(getConfigListEntry(_("Minimum send interval"), config.hdmicec.minimum_send_interval))
self["config"].list = self.list
self["config"].l.setList(self.list)
self.updateAddress()
# for summary:
def changedEntry(self):
if self["config"].getCurrent()[0] == _("Enabled"):
self.createSetup()
for x in self.onChangedEntry:
x()
def keyLeft(self):
ConfigListScreen.keyLeft(self)
def keyRight(self):
ConfigListScreen.keyRight(self)
def keyGo(self):
for x in self["config"].list:
x[1].save()
configfile.save()
self.close()
def keyCancel(self):
for x in self["config"].list:
x[1].cancel()
self.close()
def setFixedAddress(self):
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress(Components.HdmiCec.hdmi_cec.getPhysicalAddress())
self.updateAddress()
def clearFixedAddress(self):
Components.HdmiCec.hdmi_cec.setFixedPhysicalAddress("0.0.0.0")
self.updateAddress()
def updateAddress(self):
self["current_address"].setText(_("Current CEC address") + ": " + Components.HdmiCec.hdmi_cec.getPhysicalAddress())
if config.hdmicec.fixed_physical_address.value == "0.0.0.0":
fixedaddresslabel = ""
else:
fixedaddresslabel = _("Using fixed address") + ": " + config.hdmicec.fixed_physical_address.value
self["fixed_address"].setText(fixedaddresslabel)
def Plugins(**kwargs):
return []
|
matteoalessiocarrara/lib-figafind
|
refs/heads/master
|
src/lib/fbfilter/src/lib/htmlfbapi/src/version.py
|
5
|
#!/usr/bin/python2
# -*- coding: utf-8 -*-
#
# Copyright 2015 - 2016 Matteo Alessio Carrara <sw.matteoac@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301, USA.
"""
Informazioni sulla libreria - contiene le informazioni solitamente ottenibili con
il parametro --version nei software GNU
"""
# Name
lib_name = "lib-htmlfbapi"
# Version
# This project adheres to Semantic Versioning (http://semver.org/)
version_major = 4
version_minor = 2
version_patch = 1
version_str = "%d.%d.%d" % (version_major, version_minor, version_patch)
# Lib
# Numero di versione major
fbwrapper_version_required = 1
# Copyright
copyright = "Copyright (C) 2015 - 2016 Matteo Alessio Carrara"
# License
license = """License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law."""
|
lpatmo/actionify_the_news
|
refs/heads/master
|
open_connect/middleware/accept_terms.py
|
1
|
"""Middleware for verifying a user has accepted the ToS and UCoC."""
# pylint: disable=no-self-use
import re
from django.conf import settings
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
EXEMPT_URLS = [re.compile(settings.LOGIN_URL.lstrip('/'))]
if hasattr(settings, 'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS += [
re.compile(exempt_url) for exempt_url in settings.LOGIN_EXEMPT_URLS]
class AcceptTermsAndConductMiddleware(object):
"""Checks that users have accepted terms and conduct agreements."""
def process_request(self, request):
"""Process request and ask for an invite code if needed."""
# Find users that are logged in but haven't been verified
user = request.user
if user.is_authenticated():
if user.tos_accepted_at and user.ucoc_accepted_at:
return
path = request.path_info.lstrip('/')
# Only check the invite for paths that require login
if not any(m.match(path) for m in EXEMPT_URLS):
redirect_to = '{url}?next={next}'.format(
url=reverse('accept_terms_and_conduct'),
next=request.path_info
)
return HttpResponseRedirect(redirect_to)
|
mrpiracyPT/repository.mrpiracy
|
refs/heads/master
|
plugin.video.mrpiracy/resources/lib/definicoes.py
|
1
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os,xbmc,xbmcaddon,xbmcplugin,xbmcgui,xbmcvfs,sys,urllib,urllib2,unicodedata,re,urlparse,json,base64
import threading
from datetime import datetime
import ast
import controlo
def getQualidade():
return controlo.addon.getSetting('qualidadeFilmes')
def getCookie():
return 'username='+controlo.addon.getSetting('tokenMrpiracy')
def bruteforceconvert(str):
controlo.log("dicionario")
dicionario = {
u"\\xe1" : u"á", "\xe1": u"á",
u"\\xe9" : u"é", "\xe9" : u"é",
u"\\xe7" : u"ç", "\xe7" : u"ç",
u"\\xe3" : u"ã", "\xe3" : u"ã",
u"\\xf3" : u"ó", "\xf3" : u"ó",
u"\\xed" : u"í", "\xed" : u"í",
u"\\xea" : u"ê", "\xea" : u"é"}
for k, v in dicionario.iteritems():
controlo.log(unicode(k, errors='ignore')+" "+v)
str.replace(k, v)
return str.replace("\\xe1", "!", "\xe1")
def getListCategoria():
cat = controlo.ler_ficheiro(os.path.join(controlo.pastaDados,'categorias.mrpiracy')).replace('"', "'")
return cat
def getIdiomas():
return "[{'id':'German','label':'Alemão'},{'id':'Korean','label':'Coreano'},{'id':'Danish','label':'Dinamarquês'},{'id':'Spanish','label':'Espanhol'},{'id':'Filipino','label':'Filipino'},{'id':'Finnish','label':'Filandês'},{'id':'French','label':'Francês'},{'id':'Greek','label':'Grego'},{'id':'Dutch','label':'Holandês'},{'id':'Hungarian','label':'Húngaro'},{'id':'Hindi','label':'Indiano'},{'id':'English','label':'Inglês'},{'id':'Italian','label':'Italiano'},{'id':'Japanese','label':'Japonês'},{'id':'Mandarin','label':'Mandarim'},{'id':'Norwegian','label':'Norueguês'},{'id':'Polish','label':'Polaco'},{'id':'PT-PT','label':'Português de Portugal'},{'id':'PT-BR','label':'Português do Brasil'},{'id':'Russian','label':'Russo'},{'id':'Swedish','label':'Sueco'},{'id':'Thai','label':'Tailandês'},{'id':'Turkish','label':'Turco'},{'id':'Ukrainian','label':'Ucraniano'}]".replace('"', "'")
def getCategoria(id):
#cat = controlo.ler_ficheiro(os.path.join(controlo.pastaDados,'categorias.mrpiracy')).replace(": u", ": ").replace("'", '"').replace("{u", "{").replace(", u", ", ").replace('"', "'")
#cats = re.compile("'categorias':\s*u?'(.*?)',\s*u?'id_categoria':\s*u?'(.+?)'").findall(cat)
cat = controlo.ler_ficheiro(os.path.join(controlo.pastaDados,'categorias.mrpiracy')).replace('"', "'")
for c in ast.literal_eval(cat):
if int(id) == 0:
return ''
if int(c['id_categoria']) == int(id):
try:
cat = c['categorias'].decode('utf-8')
except:
cat = c['categorias'].encode('utf-8')
return cat
"""if int(id) == 0:
return ''
for c, i in cats:
if int(i) == int(id):
return c"""
return ''
def vista_menu():
opcao = controlo.addon.getSetting('menuView')
if opcao == '0': xbmc.executebuiltin("Container.SetViewMode(50)")
elif opcao == '1': xbmc.executebuiltin("Container.SetViewMode(51")
def vista_filmesSeries():
opcao = controlo.addon.getSetting('filmesSeriesView')
if opcao == '0': xbmc.executebuiltin("Container.SetViewMode(50)")
elif opcao == '1': xbmc.executebuiltin("Container.SetViewMode(51)")
elif opcao == '2': xbmc.executebuiltin("Container.SetViewMode(500)")
elif opcao == '3': xbmc.executebuiltin("Container.SetViewMode(501)")
elif opcao == '4': xbmc.executebuiltin("Container.SetViewMode(508)")
elif opcao == '5': xbmc.executebuiltin("Container.SetViewMode(504)")
elif opcao == '6': xbmc.executebuiltin("Container.SetViewMode(503)")
elif opcao == '7': xbmc.executebuiltin("Container.SetViewMode(515)")
def vista_temporadas():
opcao = controlo.addon.getSetting('temporadasView')
if opcao == '0': xbmc.executebuiltin("Container.SetViewMode(50)")
elif opcao == '1': xbmc.executebuiltin("Container.SetViewMode(51)")
elif opcao == '2': xbmc.executebuiltin("Container.SetViewMode(500)")
def vista_episodios():
opcao = controlo.addon.getSetting('episodiosView')
if opcao == '0': xbmc.executebuiltin("Container.SetViewMode(50)")
elif opcao == '1': xbmc.executebuiltin("Container.SetViewMode(51)")
elif opcao == '2': xbmc.executebuiltin("Container.SetViewMode(500)")
|
cole945/nds32-gdb
|
refs/heads/master
|
gdb/contrib/test_pubnames_and_indexes.py
|
34
|
#! /usr/bin/env python
# Copyright (C) 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GDB.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This program requires readelf, gdb and objcopy. The default values are gdb
# from the build tree and objcopy and readelf from $PATH. They may be
# overridden by setting environment variables GDB, READELF and OBJCOPY
# respectively. We assume the current directory is either $obj/gdb or
# $obj/gdb/testsuite.
#
# Example usage:
#
# bash$ cd $objdir/gdb/testsuite
# bash$ python test_pubnames_and_indexes.py <binary_name>
"""test_pubnames_and_indexes.py
Test that the gdb_index produced by gold is identical to the gdb_index
produced by gdb itself.
Further check that the pubnames and pubtypes produced by gcc are identical
to those that gdb produces.
Finally, check that all strings are canonicalized identically.
"""
__author__ = 'saugustine@google.com (Sterling Augustine)'
import os
import subprocess
import sys
OBJCOPY = None
READELF = None
GDB = None
def get_pub_info(filename, readelf_option):
"""Parse and return all the pubnames or pubtypes produced by readelf with the
given option.
"""
readelf = subprocess.Popen([READELF, '--debug-dump=' + readelf_option,
filename], stdout=subprocess.PIPE)
pubnames = []
in_list = False;
for line in readelf.stdout:
fields = line.split(None, 1)
if (len(fields) == 2 and fields[0] == 'Offset'
and fields[1].strip() == 'Name'):
in_list = True
# Either a blank-line or a new Length field terminates the current section.
elif (len(fields) == 0 or fields[0] == 'Length:'):
in_list = False;
elif (in_list):
pubnames.append(fields[1].strip())
readelf.wait()
return pubnames
def get_gdb_index(filename):
"""Use readelf to dump the gdb index and collect the types and names"""
readelf = subprocess.Popen([READELF, '--debug-dump=gdb_index',
filename], stdout=subprocess.PIPE)
index_symbols = []
symbol_table_started = False
for line in readelf.stdout:
if (line == 'Symbol table:\n'):
symbol_table_started = True;
elif (symbol_table_started):
# Readelf prints gdb-index lines formatted like so:
# [ 4] two::c2<double>::c2: 0
# So take the string between the first close bracket and the last colon.
index_symbols.append(line[line.find(']') + 2: line.rfind(':')])
readelf.wait()
return index_symbols
def CheckSets(list0, list1, name0, name1):
"""Report any setwise differences between the two lists"""
if len(list0) == 0 or len(list1) == 0:
return False
difference0 = set(list0) - set(list1)
if len(difference0) != 0:
print "Elements in " + name0 + " but not " + name1 + ": (",
print len(difference0),
print ")"
for element in difference0:
print " " + element
difference1 = set(list1) - set(list0)
if len(difference1) != 0:
print "Elements in " + name1 + " but not " + name0 + ": (",
print len(difference1),
print ")"
for element in difference1:
print " " + element
if (len(difference0) != 0 or len(difference1) != 0):
return True
print name0 + " and " + name1 + " are identical."
return False
def find_executables():
"""Find the copies of readelf, objcopy and gdb to use."""
# Executable finding logic follows cc-with-index.sh
global READELF
READELF = os.getenv('READELF')
if READELF is None:
READELF = 'readelf'
global OBJCOPY
OBJCOPY = os.getenv('OBJCOPY')
if OBJCOPY is None:
OBJCOPY = 'objcopy'
global GDB
GDB = os.getenv('GDB')
if (GDB is None):
if os.path.isfile('./gdb') and os.access('./gdb', os.X_OK):
GDB = './gdb'
elif os.path.isfile('../gdb') and os.access('../gdb', os.X_OK):
GDB = '../gdb'
elif os.path.isfile('../../gdb') and os.access('../../gdb', os.X_OK):
GDB = '../../gdb'
else:
# Punt and use the gdb in the path.
GDB = 'gdb'
def main(argv):
"""The main subprogram."""
if len(argv) != 2:
print "Usage: test_pubnames_and_indexes.py <filename>"
sys.exit(2)
find_executables();
# Get the index produced by Gold--It should have been built into the binary.
gold_index = get_gdb_index(argv[1])
# Collect the pubnames and types list
pubs_list = get_pub_info(argv[1], "pubnames")
pubs_list = pubs_list + get_pub_info(argv[1], "pubtypes")
# Generate a .gdb_index with gdb
gdb_index_file = argv[1] + '.gdb-generated-index'
subprocess.check_call([OBJCOPY, '--remove-section', '.gdb_index',
argv[1], gdb_index_file])
subprocess.check_call([GDB, '-batch', '-nx', gdb_index_file,
'-ex', 'save gdb-index ' + os.path.dirname(argv[1]),
'-ex', 'quit'])
subprocess.check_call([OBJCOPY, '--add-section',
'.gdb_index=' + gdb_index_file + '.gdb-index',
gdb_index_file])
gdb_index = get_gdb_index(gdb_index_file)
os.remove(gdb_index_file)
os.remove(gdb_index_file + '.gdb-index')
failed = False
gdb_index.sort()
gold_index.sort()
pubs_list.sort()
# Find the differences between the various indices.
if len(gold_index) == 0:
print "Gold index is empty"
failed |= True
if len(gdb_index) == 0:
print "Gdb index is empty"
failed |= True
if len(pubs_list) == 0:
print "Pubs list is empty"
failed |= True
failed |= CheckSets(gdb_index, gold_index, "gdb index", "gold index")
failed |= CheckSets(pubs_list, gold_index, "pubs list", "gold index")
failed |= CheckSets(pubs_list, gdb_index, "pubs list", "gdb index")
if failed:
print "Test failed"
sys.exit(1)
if __name__ == '__main__':
main(sys.argv)
|
jaredkoontz/leetcode
|
refs/heads/master
|
Python/maximum-subarray.py
|
3
|
# Time: O(n)
# Space: O(1)
#
# Find the contiguous subarray within an array (containing at least one number) which has the largest sum.
#
# For example, given the array [-2,1,-3,4,-1,2,1,-5,4],
# the contiguous subarray [4,-1,2,1] has the largest sum = 6.
#
# click to show more practice.
#
# More practice:
# If you have figured out the O(n) solution, try coding another solution using the divide and conquer approach, which is more subtle.
#
class Solution(object):
def maxSubArray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if max(nums) < 0:
return max(nums)
global_max, local_max = float("-inf"), 0
for x in nums:
local_max = max(0, local_max + x)
global_max = max(global_max, local_max)
return global_max
if __name__ == "__main__":
print Solution().maxSubArray([-2,1,-3,4,-1,2,1,-5,4])
|
charles-dyfis-net/bpython
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import glob
import os
import platform
import re
import sys
if platform.system() == 'FreeBSD':
man_dir = 'man'
else:
man_dir = 'share/man'
setup(
name="bpython",
version = "0.7.1",
author = "Robert Anthony Farrell",
author_email = "robertanthonyfarrell@gmail.com",
description = "Fancy Interface to the Python Interpreter",
license = "MIT/X",
url = "http://www.noiseforfree.com/bpython/",
long_description = """bpython is a fancy interface to the Python
interpreter for Unix-like operating systems.""",
install_requires = [
'pygments',
'pyparsing',
],
packages = ["bpython"],
data_files = [
(os.path.join(man_dir, 'man1'), ['doc/bpython.1']),
(os.path.join(man_dir, 'man5'), ['doc/bpythonrc.5']),
('share/applications', ['data/bpython.desktop'])
],
entry_points = {
'console_scripts': [
'bpython = bpython.cli:main',
],
}
)
# vim: encoding=utf-8 sw=4 ts=4 sts=4 ai et sta
|
ndawe/rootpy
|
refs/heads/master
|
examples/plotting/plot_hist.py
|
7
|
#!/usr/bin/env python
"""
============================
Working with ROOT histograms
============================
This example demonstrates how to create and work with ROOT histogram in rootpy.
"""
print(__doc__)
from rootpy.extern.six.moves import range
from rootpy.plotting import Hist, Hist2D, Hist3D, HistStack, Legend, Canvas
from rootpy.interactive import wait
import random
# create a simple 1D histogram with 10 constant-width bins between 0 and 1
h_simple = Hist(10, 0, 1)
print(h_simple.name)
# If the name is not specified, a UUID is used so that ROOT never complains
# about two histograms having the same name.
# Alternatively you can specify the name (and the title or any other style
# attributes) in the constructor:
h_simple = Hist(10, -4, 12, name='my hist', title='Some Data',
drawstyle='hist',
legendstyle='F',
fillstyle='/')
# fill the histogram
for i in range(1000):
# all ROOT CamelCase methods are aliased by equivalent snake_case methods
# so you can call fill() instead of Fill()
h_simple.Fill(random.gauss(4, 3))
# easily set visual attributes
h_simple.linecolor = 'blue'
h_simple.fillcolor = 'green'
h_simple.fillstyle = '/'
# attributes may be accessed in the same way
print(h_simple.name)
print(h_simple.title)
print(h_simple.markersize)
# plot
canvas = Canvas(width=700, height=500)
canvas.SetLeftMargin(0.15)
canvas.SetBottomMargin(0.15)
canvas.SetTopMargin(0.10)
canvas.SetRightMargin(0.05)
h_simple.Draw()
# create the legend
legend = Legend([h_simple], pad=canvas,
header='Header',
leftmargin=0.05,
rightmargin=0.5)
legend.Draw()
# 2D and 3D histograms are handled in the same way
# the constructor arguments are repetitions of #bins, left bound, right bound.
h2d = Hist2D(10, 0, 1, 50, -40, 10, name='2d hist')
h3d = Hist3D(3, -1, 4, 10, -1000, -200, 2, 0, 1, name='3d hist')
# variable-width bins may be created by passing the bin edges directly:
h1d_variable = Hist([1, 4, 10, 100])
h2d_variable = Hist2D([2, 4, 7, 100, 200], [-100, -50, 0, 10, 20])
h3d_variable = Hist3D([1, 3, 10], [20, 50, 100], [-10, -5, 10, 20])
# variable-width and constant-width bins can be mixed:
h2d_mixed = Hist2D([2, 10, 30], 10, 1, 5)
# wait for you to close all open canvases before exiting
# wait() will have no effect if ROOT is in batch mode:
# ROOT.gROOT.SetBatch(True)
wait()
|
mick-d/nipype
|
refs/heads/master
|
nipype/interfaces/tests/test_auto_CommandLine.py
|
1
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..base import CommandLine
def test_CommandLine_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = CommandLine.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
|
ArthurGarnier/SickRage
|
refs/heads/master
|
lib/hachoir_parser/program/nds.py
|
58
|
"""
Nintendo DS .nds game file parser
File format references:
- http://www.bottledlight.com/ds/index.php/FileFormats/NDSFormat
- http://imrannazar.com/The-Smallest-NDS-File
- http://darkfader.net/ds/files/ndstool.cpp
- http://crackerscrap.com/docs/dsromstructure.html
- http://nocash.emubase.de/gbatek.htm
"""
from hachoir_parser import Parser
from hachoir_core.field import (ParserError,
UInt8, UInt16, UInt32, UInt64, String, RawBytes, SubFile, FieldSet, NullBits, Bit, Bits, Bytes,
SeekableFieldSet, RootSeekableFieldSet)
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
"""
CRC16 Calculation
Modified from:
http://www.mail-archive.com/python-list@python.org/msg47844.html
Original License:
crc16.py by Bryan G. Olson, 2005
This module is free software and may be used and
distributed under the same terms as Python itself.
"""
class CRC16:
_table = None
def _initTable (self):
from array import array
# CRC-16 poly: p(x) = x**16 + x**15 + x**2 + 1
# top bit implicit, reflected
poly = 0xa001
CRC16._table = array('H')
for byte in range(256):
crc = 0
for bit in range(8):
if (byte ^ crc) & 1:
crc = (crc >> 1) ^ poly
else:
crc >>= 1
byte >>= 1
CRC16._table.append(crc)
def checksum (self, string, value):
if CRC16._table is None:
self._initTable()
for ch in string:
value = self._table[ord(ch) ^ (value & 0xff)] ^ (value >> 8)
return value
class Crc16(UInt16):
"16 bit field for calculating and comparing CRC-16 of specified string"
def __init__(self, parent, name, targetBytes):
UInt16.__init__(self, parent, name)
self.targetBytes = targetBytes
def createDescription(self):
crc = CRC16().checksum(self.targetBytes, 0xffff)
if crc == self.value:
return "matches CRC of %d bytes" % len(self.targetBytes)
else:
return "mismatch (calculated CRC %d for %d bytes)" % (crc, len(self.targetBytes))
class FileNameDirTable(FieldSet):
static_size = (4+2+2)*8
def createFields(self):
yield UInt32(self, "entry_start")
yield UInt16(self, "entry_file_id")
yield UInt16(self, "parent_id")
def createDescription(self):
return "first file id: %d; parent directory id: %d (%d)" % (self["entry_file_id"].value, self["parent_id"].value, self["parent_id"].value & 0xFFF)
class FileNameEntry(FieldSet):
def createFields(self):
yield Bits(self, "name_len", 7)
yield Bit(self, "is_directory")
yield String(self, "name", self["name_len"].value)
if self["is_directory"].value:
yield UInt16(self, "dir_id")
def createDescription(self):
s = ""
if self["is_directory"].value:
s = "[D] "
return s + self["name"].value
class Directory(FieldSet):
def createFields(self):
while True:
fne = FileNameEntry(self, "entry[]")
if fne["name_len"].value == 0:
yield UInt8(self, "end_marker")
break
yield fne
class FileNameTable(SeekableFieldSet):
def createFields(self):
self.startOffset = self.absolute_address / 8
# parent_id of first FileNameDirTable contains number of directories:
dt = FileNameDirTable(self, "dir_table[]")
numDirs = dt["parent_id"].value
yield dt
for i in range(1, numDirs):
yield FileNameDirTable(self, "dir_table[]")
for i in range(0, numDirs):
dt = self["dir_table[%d]" % i]
offset = self.startOffset + dt["entry_start"].value
self.seekByte(offset, relative=False)
yield Directory(self, "directory[]")
class FATFileEntry(FieldSet):
static_size = 2*4*8
def createFields(self):
yield UInt32(self, "start")
yield UInt32(self, "end")
def createDescription(self):
return "start: %d; size: %d" % (self["start"].value, self["end"].value - self["start"].value)
class FATContent(FieldSet):
def createFields(self):
num_entries = self.parent["header"]["fat_size"].value / 8
for i in range(0, num_entries):
yield FATFileEntry(self, "entry[]")
class BannerTile(FieldSet):
static_size = 32*8
def createFields(self):
for y in range(8):
for x in range(8):
yield Bits(self, "pixel[%d,%d]" % (x,y), 4)
class BannerIcon(FieldSet):
static_size = 16*32*8
def createFields(self):
for y in range(4):
for x in range(4):
yield BannerTile(self, "tile[%d,%d]" % (x,y))
class NdsColor(FieldSet):
static_size = 16
def createFields(self):
yield Bits(self, "red", 5)
yield Bits(self, "green", 5)
yield Bits(self, "blue", 5)
yield NullBits(self, "pad", 1)
def createDescription(self):
return "#%02x%02x%02x" % (self["red"].value << 3, self["green"].value << 3, self["blue"].value << 3)
class Banner(FieldSet):
static_size = 2112*8
def createFields(self):
yield UInt16(self, "version")
# CRC of this structure, excluding first 32 bytes:
yield Crc16(self, "crc", self.stream.readBytes(self.absolute_address+(32*8), (2112-32)))
yield RawBytes(self, "reserved", 28)
yield BannerIcon(self, "icon_data")
for i in range(0, 16):
yield NdsColor(self, "palette_color[]")
yield String(self, "title_jp", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_en", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_fr", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_de", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_it", 256, charset="UTF-16-LE", truncate="\0")
yield String(self, "title_es", 256, charset="UTF-16-LE", truncate="\0")
class Overlay(FieldSet):
static_size = 8*4*8
def createFields(self):
yield UInt32(self, "id")
yield textHandler(UInt32(self, "ram_address"), hexadecimal)
yield UInt32(self, "ram_size")
yield UInt32(self, "bss_size")
yield textHandler(UInt32(self, "init_start_address"), hexadecimal)
yield textHandler(UInt32(self, "init_end_address"), hexadecimal)
yield UInt32(self, "file_id")
yield RawBytes(self, "reserved[]", 4)
def createDescription(self):
return "file #%d, %d (+%d) bytes to 0x%08x" % (
self["file_id"].value, self["ram_size"].value, self["bss_size"].value, self["ram_address"].value)
class SecureArea(FieldSet):
static_size=2048*8
def createFields(self):
yield textHandler(UInt64(self, "id"), hexadecimal)
if self["id"].value == 0xe7ffdeffe7ffdeff: # indicates that secure area is decrypted
yield Bytes(self, "fixed[]", 6) # always \xff\xde\xff\xe7\xff\xde
yield Crc16(self, "header_crc16", self.stream.readBytes(self.absolute_address+(16*8), 2048-16))
yield RawBytes(self, "unknown[]", 2048-16-2)
yield Bytes(self, "fixed[]", 2) # always \0\0
else:
yield RawBytes(self, "encrypted[]", 2048-8)
class DeviceSize(UInt8):
def createDescription(self):
return "%d Mbit" % ((2**(20+self.value)) / (1024*1024))
class Header(FieldSet):
def createFields(self):
yield String(self, "game_title", 12, truncate="\0")
yield String(self, "game_code", 4)
yield String(self, "maker_code", 2)
yield UInt8(self, "unit_code")
yield UInt8(self, "device_code")
yield DeviceSize(self, "card_size")
yield String(self, "card_info", 9)
yield UInt8(self, "rom_version")
yield Bits(self, "unknown_flags[]", 2)
yield Bit(self, "autostart_flag")
yield Bits(self, "unknown_flags[]", 5)
yield UInt32(self, "arm9_source", "ARM9 ROM offset")
yield textHandler(UInt32(self, "arm9_execute_addr", "ARM9 entry address"), hexadecimal)
yield textHandler(UInt32(self, "arm9_copy_to_addr", "ARM9 RAM address"), hexadecimal)
yield UInt32(self, "arm9_bin_size", "ARM9 code size")
yield UInt32(self, "arm7_source", "ARM7 ROM offset")
yield textHandler(UInt32(self, "arm7_execute_addr", "ARM7 entry address"), hexadecimal)
yield textHandler(UInt32(self, "arm7_copy_to_addr", "ARM7 RAM address"), hexadecimal)
yield UInt32(self, "arm7_bin_size", "ARM7 code size")
yield UInt32(self, "filename_table_offset")
yield UInt32(self, "filename_table_size")
yield UInt32(self, "fat_offset")
yield UInt32(self, "fat_size")
yield UInt32(self, "arm9_overlay_src")
yield UInt32(self, "arm9_overlay_size")
yield UInt32(self, "arm7_overlay_src")
yield UInt32(self, "arm7_overlay_size")
yield textHandler(UInt32(self, "ctl_read_flags"), hexadecimal)
yield textHandler(UInt32(self, "ctl_init_flags"), hexadecimal)
yield UInt32(self, "banner_offset")
yield Crc16(self, "secure_crc16", self.stream.readBytes(0x4000*8, 0x4000))
yield UInt16(self, "rom_timeout")
yield UInt32(self, "arm9_unk_addr")
yield UInt32(self, "arm7_unk_addr")
yield UInt64(self, "unenc_mode_magic")
yield UInt32(self, "rom_size")
yield UInt32(self, "header_size")
yield RawBytes(self, "unknown[]", 36)
yield String(self, "passme_autoboot_detect", 4)
yield RawBytes(self, "unknown[]", 16)
yield RawBytes(self, "gba_logo", 156)
yield Crc16(self, "logo_crc16", self.stream.readBytes(0xc0*8, 156))
yield Crc16(self, "header_crc16", self.stream.readBytes(0, 350))
yield UInt32(self, "debug_rom_offset")
yield UInt32(self, "debug_size")
yield textHandler(UInt32(self, "debug_ram_address"), hexadecimal)
class NdsFile(Parser, RootSeekableFieldSet):
PARSER_TAGS = {
"id": "nds_file",
"category": "program",
"file_ext": ("nds",),
"mime": (u"application/octet-stream",),
"min_size": 352 * 8, # just a minimal header
"description": "Nintendo DS game file",
}
endian = LITTLE_ENDIAN
def validate(self):
try:
header = self["header"]
except Exception, e:
return False
return (self.stream.readBytes(0, 1) != "\0"
and (header["device_code"].value & 7) == 0
and header["header_size"].value >= 352
and header["card_size"].value < 15 # arbitrary limit at 32Gbit
and header["arm9_bin_size"].value > 0 and header["arm9_bin_size"].value <= 0x3bfe00
and header["arm7_bin_size"].value > 0 and header["arm7_bin_size"].value <= 0x3bfe00
and header["arm9_source"].value + header["arm9_bin_size"].value < self._size
and header["arm7_source"].value + header["arm7_bin_size"].value < self._size
and header["arm9_execute_addr"].value >= 0x02000000 and header["arm9_execute_addr"].value <= 0x023bfe00
and header["arm9_copy_to_addr"].value >= 0x02000000 and header["arm9_copy_to_addr"].value <= 0x023bfe00
and header["arm7_execute_addr"].value >= 0x02000000 and header["arm7_execute_addr"].value <= 0x03807e00
and header["arm7_copy_to_addr"].value >= 0x02000000 and header["arm7_copy_to_addr"].value <= 0x03807e00
)
def createFields(self):
# Header
yield Header(self, "header")
# Secure Area
if self["header"]["arm9_source"].value >= 0x4000 and self["header"]["arm9_source"].value < 0x8000:
secStart = self["header"]["arm9_source"].value & 0xfffff000
self.seekByte(secStart, relative=False)
yield SecureArea(self, "secure_area", size=0x8000-secStart)
# ARM9 binary
self.seekByte(self["header"]["arm9_source"].value, relative=False)
yield RawBytes(self, "arm9_bin", self["header"]["arm9_bin_size"].value)
# ARM7 binary
self.seekByte(self["header"]["arm7_source"].value, relative=False)
yield RawBytes(self, "arm7_bin", self["header"]["arm7_bin_size"].value)
# File Name Table
if self["header"]["filename_table_size"].value > 0:
self.seekByte(self["header"]["filename_table_offset"].value, relative=False)
yield FileNameTable(self, "filename_table", size=self["header"]["filename_table_size"].value*8)
# FAT
if self["header"]["fat_size"].value > 0:
self.seekByte(self["header"]["fat_offset"].value, relative=False)
yield FATContent(self, "fat_content", size=self["header"]["fat_size"].value*8)
# banner
if self["header"]["banner_offset"].value > 0:
self.seekByte(self["header"]["banner_offset"].value, relative=False)
yield Banner(self, "banner")
# ARM9 overlays
if self["header"]["arm9_overlay_src"].value > 0:
self.seekByte(self["header"]["arm9_overlay_src"].value, relative=False)
numOvls = self["header"]["arm9_overlay_size"].value / (8*4)
for i in range(numOvls):
yield Overlay(self, "arm9_overlay[]")
# files
if self["header"]["fat_size"].value > 0:
for field in self["fat_content"]:
if field["end"].value > field["start"].value:
self.seekByte(field["start"].value, relative=False)
yield SubFile(self, "file[]", field["end"].value - field["start"].value)
|
chouseknecht/ansible
|
refs/heads/devel
|
lib/ansible/plugins/inventory/kubevirt.py
|
37
|
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: kubevirt
plugin_type: inventory
author:
- KubeVirt Team (@kubevirt)
version_added: "2.8"
short_description: KubeVirt inventory source
extends_documentation_fragment:
- inventory_cache
- constructed
description:
- Fetch running VirtualMachines for one or more namespaces.
- Groups by namespace, namespace_vms and labels.
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
options:
plugin:
description: token that ensures this is a source file for the 'kubevirt' plugin.
required: True
choices: ['kubevirt']
type: str
host_format:
description:
- Specify the format of the host in the inventory group.
default: "{namespace}-{name}-{uid}"
connections:
type: list
description:
- Optional list of cluster connection settings. If no connections are provided, the default
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
the active user is authorized to access.
suboptions:
name:
description:
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
and port.
type: str
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the OpenShift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
environment variable.
type: str
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
type: str
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
type: str
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
variable.
type: str
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
environment variable.
type: str
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
environment variable.
type: str
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment variable.
type: str
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
environment variable.
type: str
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via
K8S_AUTH_SSL_CA_CERT environment variable.
type: str
verify_ssl:
description:
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
K8S_AUTH_VERIFY_SSL environment variable."
type: bool
namespaces:
description:
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
to access.
type: list
network_name:
description:
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
address.
type: str
api_version:
description:
- "Specify the KubeVirt API version."
type: str
annotation_variable:
description:
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
- "Note, that the value in ansible annotations should be json."
type: str
default: 'ansible'
requirements:
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
# File must be named kubevirt.yaml or kubevirt.yml
# Authenticate with token, and return all virtual machines for all namespaces
plugin: kubevirt
connections:
- host: https://kubevirt.io
token: xxxxxxxxxxxxxxxx
ssl_verify: false
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
# connected to network myovsnetwork and from namespace vms
plugin: kubevirt
connections:
- namespaces:
- vms
network_name: myovsnetwork
'''
import json
from ansible.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
try:
from openshift.dynamic.exceptions import DynamicApiError
except ImportError:
pass
API_VERSION = 'kubevirt.io/v1alpha3'
class InventoryModule(K8sInventoryModule):
NAME = 'kubevirt'
def setup(self, config_data, cache, cache_key):
self.config_data = config_data
super(InventoryModule, self).setup(config_data, cache, cache_key)
def fetch_objects(self, connections):
client = self.get_api_client()
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
if connections:
for connection in connections:
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
interface_name = connection.get('network_name')
api_version = connection.get('api_version', API_VERSION)
annotation_variable = connection.get('annotation_variable', 'ansible')
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
else:
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
try:
obj = v1_vm.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_vms_group = '{0}_vms'.format(namespace_group)
name = self._sanitize_group_name(name)
namespace_group = self._sanitize_group_name(namespace_group)
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_vms_group)
self.inventory.add_child(namespace_group, namespace_vms_group)
for vm in obj.items:
if not (vm.status and vm.status.interfaces):
continue
# Find interface by its name:
if interface_name is None:
interface = vm.status.interfaces[0]
else:
interface = next(
(i for i in vm.status.interfaces if i.name == interface_name),
None
)
# If interface is not found or IP address is not reported skip this VM:
if interface is None or interface.ipAddress is None:
continue
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
vm_ip = interface.ipAddress
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
self.inventory.add_host(vm_name)
if vm.metadata.labels:
# create a group for each label_value
for key, value in vm.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
group_name = self._sanitize_group_name(group_name)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, vm_name)
vm_labels = dict(vm.metadata.labels)
else:
vm_labels = {}
self.inventory.add_child(namespace_vms_group, vm_name)
# add hostvars
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
self.inventory.set_variable(vm_name, 'labels', vm_labels)
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
self.inventory.set_variable(vm_name, 'object_type', 'vm')
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
# Add all variables which are listed in 'ansible' annotation:
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
for k, v in annotations_data.items():
self.inventory.set_variable(vm_name, k, v)
def verify_file(self, path):
if super(InventoryModule, self).verify_file(path):
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
return True
return False
|
SINGROUP/pycp2k
|
refs/heads/master
|
pycp2k/classes/_mo_magnitude2.py
|
1
|
from pycp2k.inputsection import InputSection
from ._each172 import _each172
class _mo_magnitude2(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Section_parameters = None
self.Add_last = None
self.Common_iteration_levels = None
self.Filename = None
self.Log_print_key = None
self.EACH = _each172()
self._name = "MO_MAGNITUDE"
self._keywords = {'Log_print_key': 'LOG_PRINT_KEY', 'Filename': 'FILENAME', 'Add_last': 'ADD_LAST', 'Common_iteration_levels': 'COMMON_ITERATION_LEVELS'}
self._subsections = {'EACH': 'EACH'}
self._attributes = ['Section_parameters']
|
perlygatekeeper/glowing-robot
|
refs/heads/master
|
Little_Alchemy_2/Scraper_python/env/lib/python3.7/site-packages/pip/_vendor/certifi/__init__.py
|
13
|
from .core import where
__version__ = "2019.06.16"
|
rpmcpp/Audacity
|
refs/heads/master
|
lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/compiler_fc.py
|
287
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,imp,types
from waflib import Utils,Configure,Options,Logs,Errors
from waflib.Tools import fc
fc_compiler={'win32':['gfortran','ifort'],'darwin':['gfortran','g95','ifort'],'linux':['gfortran','g95','ifort'],'java':['gfortran','g95','ifort'],'default':['gfortran'],'aix':['gfortran']}
def __list_possible_compiler(platform):
try:
return fc_compiler[platform]
except KeyError:
return fc_compiler["default"]
def configure(conf):
try:test_for_compiler=conf.options.check_fc
except AttributeError:conf.fatal("Add options(opt): opt.load('compiler_fc')")
for compiler in test_for_compiler.split():
conf.env.stash()
conf.start_msg('Checking for %r (fortran compiler)'%compiler)
try:
conf.load(compiler)
except conf.errors.ConfigurationError ,e:
conf.env.revert()
conf.end_msg(False)
Logs.debug('compiler_fortran: %r'%e)
else:
if conf.env['FC']:
conf.end_msg(conf.env.get_flat('FC'))
conf.env.COMPILER_FORTRAN=compiler
break
conf.end_msg(False)
else:
conf.fatal('could not configure a fortran compiler!')
def options(opt):
opt.load_special_tools('fc_*.py')
build_platform=Utils.unversioned_sys_platform()
detected_platform=Options.platform
possible_compiler_list=__list_possible_compiler(detected_platform)
test_for_compiler=' '.join(possible_compiler_list)
fortran_compiler_opts=opt.add_option_group("Fortran Compiler Options")
fortran_compiler_opts.add_option('--check-fortran-compiler',default="%s"%test_for_compiler,help='On this platform (%s) the following Fortran Compiler will be checked by default: "%s"'%(detected_platform,test_for_compiler),dest="check_fc")
for compiler in test_for_compiler.split():
opt.load('%s'%compiler)
|
esatterly/splunk-cassandra
|
refs/heads/master
|
bin/dbinsert-command.py
|
1
|
#!/usr/bin/env python
#
# Copyright 2011 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from extern import extern
extern("dbinsert.py")
|
JoeriHermans/Intelligent-Automation-System
|
refs/heads/master
|
scripts/python/light.py
|
1
|
#
# IAS Basic device framework.
#
# Author: Joeri Hermans
#
import sys
import socket
import struct
import os
# Global members, which are required for the communication
# with the remote IAS controller.
gDeviceIdentifier = sys.argv[1]
gControllerAddress = sys.argv[2]
gControllerPort = int(sys.argv[3])
gSocket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
gSocket.connect((gControllerAddress,gControllerPort))
gRunning = True
# Light state members.
gState = False
gCommandOn = "echo '255' > /sys/class/leds/tpacpi\:\:thinklight/brightness"
gCommandOff = "echo '0' > /sys/class/leds/tpacpi\:\:thinklight/brightness"
def updateState( stateIdentifier , newValue ):
global gSocket
stateIdentifierLength = len(stateIdentifier)
newValueLength = len(newValue)
data = struct.pack("!BBB",0x01,stateIdentifierLength,newValueLength);
data += str.encode(stateIdentifier)
data += str.encode(newValue)
gSocket.sendall(data)
def authenticate():
global gDeviceIdentifier
global gSocket;
identifierLength = len(gDeviceIdentifier)
message = struct.pack("!BB",0x00,identifierLength) + bytes(gDeviceIdentifier.encode("ascii"));
gSocket.sendall(message);
def toggle():
global gState
if( gState == True ):
off()
else:
on()
def on():
global gCommandOn
global gState
gState = True
os.system(gCommandOn)
updateState("state","1")
def off():
global gCommandOff
global gState
gState = False
os.system(gCommandOff)
updateState("state","0")
def processFeature(featureIdentifier,parameter):
if( featureIdentifier == "toggle" ):
toggle()
elif( featureIdentifier == "on" ):
on()
elif( featureIdentifier == "off" ):
off()
def processCommand():
global gSocket
global gRunning
data = gSocket.recv(3);
data = struct.unpack("!BBB",data)
if( data[0] != 0x01 ):
gRunning = False
return
featureIdentifierLength = data[1]
parameterLength = data[2]
featureIdentifier = gSocket.recv(featureIdentifierLength)
featureIdentifier = featureIdentifier.decode("ascii")
if( parameterLength > 0 ):
parameter = gSocket.recv(parameterLength)
parameter = parameter.decode("ascii")
else:
parameter = ""
processFeature(featureIdentifier,parameter)
def processCommands():
global gRunning
while( gRunning ):
try:
processCommand()
except Exception as e:
print(e)
gRunning = False
def main():
authenticate()
processCommands()
if( __name__ == "__main__" ):
main()
|
CiscoUcs/Ironic
|
refs/heads/master
|
build/lib/ironic/openstack/common/eventlet_backdoor.py
|
5
|
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import copy
import errno
import gc
import logging
import os
import pprint
import socket
import sys
import traceback
import eventlet.backdoor
import greenlet
from oslo_config import cfg
from ironic.openstack.common._i18n import _LI
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo-config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
|
Passtechsoft/TPEAlpGen
|
refs/heads/master
|
blender/release/scripts/addons/io_import_gimp_image_to_scene.py
|
3
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Import GIMP Image to Scene (.xcf/.xjt)",
"author": "Daniel Salazar (ZanQdo)",
"version": (2, 0, 1),
"blender": (2, 73, 0),
"location": "File > Import > GIMP Image to Scene(.xcf/.xjt)",
"description": "Imports GIMP multilayer image files as a series of multiple planes",
"warning": "XCF import requires xcftools installed",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/GIMPImageToScene",
"category": "Import-Export",
}
"""
This script imports GIMP layered image files into 3D Scenes (.xcf, .xjt)
"""
def main(report, File, Path, LayerViewers, MixerViewers, LayerOffset,
LayerScale, OpacityMode, AlphaMode, ShadelessMats,
SetCamera, SetupCompo, GroupUntagged, Ext):
#-------------------------------------------------
#Folder = '['+File.rstrip(Ext)+']'+'_images/'
Folder = 'images_'+'['+File.rstrip(Ext)+']/'
if not bpy.data.is_saved:
PathSaveRaw = Path+Folder
PathSave = PathSaveRaw.replace(' ', '\ ')
try: os.mkdir(PathSaveRaw)
except: pass
else:
PathSave = bpy.data.filepath
RSlash = PathSave.rfind('/')
PathSaveRaw = PathSave[:RSlash+1]+Folder
PathSave = PathSaveRaw.replace(' ', '\ ')
try: os.mkdir(PathSaveRaw)
except: pass
PathSaveRaw = bpy.path.relpath(PathSaveRaw)+'/'
PathRaw = Path
Path = Path.replace(' ', '\ ')
if Ext == '.xjt':
ExtSave = '.jpg'
#-------------------------------------------------
# EXTRACT XJT
import tarfile
IMG = tarfile.open ('%s%s' % (PathRaw, File))
PRP = IMG.extractfile('PRP')
Members = IMG.getmembers()
for Member in Members:
Name = Member.name
if Name.startswith('l') and Name.endswith('.jpg'):
IMG.extract(Name, path=PathSaveRaw)
#-------------------------------------------------
# INFO XJT
IMGs = []
for Line in PRP.readlines():
Line = str(Line)
if Line.startswith("b'GIMP_XJ_IMAGE"):
for Segment in Line.split():
if Segment.startswith('w/h:'):
ResX, ResY = map (int, Segment[4:].split(','))
if Line.startswith(("b'L", "b'l")):
"""The "nice" method to check if layer has alpha channel
sadly GIMP sometimes decides not to export an alpha channel
if it's pure white so we are not completly sure here yet"""
if Line.startswith("b'L"): HasAlpha = True
else: HasAlpha = False
md = None
op = 1
ox, oy = 0,0
for Segment in Line.split():
if Segment.startswith("b'"):
imageFile = 'l' + Segment[3:] + '.jpg'
imageFileAlpha ='la'+Segment[3:]+'.jpg'
"""Phisically double checking if alpha image exists
now we can be sure! (damn GIMP)"""
if HasAlpha:
if not os.path.isfile(PathSaveRaw+imageFileAlpha): HasAlpha = False
# Get Widht and Height from images
data = open(PathSaveRaw+imageFile, "rb").read()
hexList = []
for ch in data:
byt = "%02X" % ch
hexList.append(byt)
for k in range(len(hexList)-1):
if hexList[k] == 'FF' and (hexList[k+1] == 'C0' or hexList[k+1] == 'C2'):
ow = int(hexList[k+7],16)*256 + int(hexList[k+8],16)
oh = int(hexList[k+5],16)*256 + int(hexList[k+6],16)
elif Segment.startswith('md:'): # mode
md = Segment[3:]
elif Segment.startswith('op:'): # opacity
op = float(Segment[3:])*.01
elif Segment.startswith('o:'): # origin
ox, oy = map(int, Segment[2:].split(','))
elif Segment.startswith('n:'): # name
n = Segment[3:-4]
OpenBracket = n.find ('[')
CloseBracket = n.find (']')
if OpenBracket != -1 and CloseBracket != -1:
RenderLayer = n[OpenBracket+1:CloseBracket]
NameShort = n[:OpenBracket]
else:
RenderLayer = n
NameShort = n
os.rename(PathSaveRaw+imageFile, PathSaveRaw+NameShort+'.jpg')
if HasAlpha: os.rename(PathSaveRaw+imageFileAlpha, PathSaveRaw+NameShort+'_A'+'.jpg')
IMGs.append({'LayerMode':md, 'LayerOpacity':op,
'LayerName':n, 'LayerNameShort':NameShort,
'RenderLayer':RenderLayer, 'LayerCoords':[ow, oh, ox, oy], 'HasAlpha':HasAlpha})
else: # Ext == '.xcf':
ExtSave = '.png'
#-------------------------------------------------
# CONFIG
XCFInfo = 'xcfinfo'
XCF2PNG = 'xcf2png'
#-------------------------------------------------
# INFO XCF
try:
Info = subprocess.check_output((XCFInfo, Path+File))
except FileNotFoundError as e:
if XCFInfo in str(e):
report({'ERROR'}, "Please install xcftools, xcfinfo seems to be missing (%s)" % str(e))
return False
else:
raise e
Info = Info.decode()
IMGs = []
for Line in Info.split('\n'):
if Line.startswith ('+'):
Line = Line.split(' ', 4)
RenderLayer = Line[4]
OpenBracket = RenderLayer.find ('[')
CloseBracket = RenderLayer.find (']')
if OpenBracket != -1 and CloseBracket != -1:
RenderLayer = RenderLayer[OpenBracket+1:CloseBracket]
NameShort = Line[4][:OpenBracket]
else:
NameShort = Line[4].rstrip()
if GroupUntagged:
RenderLayer = '__Undefined__'
else:
RenderLayer = NameShort
LineThree = Line[3]
Slash = LineThree.find('/')
if Slash == -1:
Mode = LineThree
Opacity = 1
else:
Mode = LineThree[:Slash]
Opacity = float(LineThree[Slash+1:LineThree.find('%')])*.01
IMGs.append ({
'LayerMode': Mode,
'LayerOpacity': Opacity,
'LayerName': Line[4].rstrip(),
'LayerNameShort': NameShort,
'LayerCoords': list(map(int, Line[1].replace('x', ' ').replace('+', ' +').replace('-', ' -').split())),
'RenderLayer': RenderLayer,
'HasAlpha': True,
})
elif Line.startswith('Version'):
ResX, ResY = map (int, Line.split()[2].split('x'))
#-------------------------------------------------
# EXTRACT XCF
if OpacityMode == 'BAKE':
Opacity = ()
else:
Opacity = ("--percent", "100")
xcf_path = Path + File
for Layer in IMGs:
png_path = "%s%s.png" % (PathSave, Layer['LayerName'].replace(' ', '_'))
subprocess.call((XCF2PNG, "-C", xcf_path, "-o", png_path, Layer['LayerName']) + Opacity)
#-------------------------------------------------
Scene = bpy.context.scene
#-------------------------------------------------
# CAMERA
if SetCamera:
bpy.ops.object.camera_add(location=(0, 0, 10))
Camera = bpy.context.active_object.data
Camera.type = 'ORTHO'
Camera.ortho_scale = ResX * .01
#-------------------------------------------------
# RENDER SETTINGS
Render = Scene.render
if SetCamera:
Render.resolution_x = ResX
Render.resolution_y = ResY
Render.resolution_percentage = 100
Render.alpha_mode = 'TRANSPARENT'
#-------------------------------------------------
# 3D VIEW SETTINGS
Scene.game_settings.material_mode = 'GLSL'
Areas = bpy.context.screen.areas
for Area in Areas:
if Area.type == 'VIEW_3D':
Area.spaces.active.viewport_shade = 'TEXTURED'
Area.spaces.active.show_textured_solid = True
Area.spaces.active.show_floor = False
#-------------------------------------------------
# 3D LAYERS
def Make3DLayer (Name, NameShort, Z, Coords, RenderLayer, LayerMode, LayerOpacity, HasAlpha):
# RenderLayer
if SetupCompo:
if not bpy.context.scene.render.layers.get(RenderLayer):
bpy.ops.scene.render_layer_add()
LayerActive = bpy.context.scene.render.layers.active
LayerActive.name = RenderLayer
LayerActive.use_pass_vector = True
LayerActive.use_sky = False
LayerActive.use_edge_enhance = False
LayerActive.use_strand = False
LayerActive.use_halo = False
global LayerNum
for i in range (0,20):
if not i == LayerNum:
LayerActive.layers[i] = False
bpy.context.scene.layers[LayerNum] = True
LayerFlags[RenderLayer] = bpy.context.scene.render.layers.active.layers
LayerList.append([RenderLayer, LayerMode, LayerOpacity])
LayerNum += 1
# Object
bpy.ops.mesh.primitive_plane_add(view_align=False,
enter_editmode=False,
rotation=(0, 0, 0))
bpy.ops.object.transform_apply(location=False, rotation=True, scale=False)
Active = bpy.context.active_object
if SetupCompo:
Active.layers = LayerFlags[RenderLayer]
Active.location = (
(float(Coords[2])-(ResX*0.5))*LayerScale,
(-float(Coords[3])+(ResY*0.5))*LayerScale, Z)
for Vert in Active.data.vertices:
Vert.co[0] += 1
Vert.co[1] += -1
Active.dimensions = float(Coords[0])*LayerScale, float(Coords[1])*LayerScale, 0
bpy.ops.object.transform_apply(location=False, rotation=False, scale=True)
bpy.ops.object.origin_set(type='ORIGIN_GEOMETRY', center='MEDIAN')
Active.show_wire = True
Active.name = NameShort
bpy.ops.mesh.uv_texture_add()
# Material
'''if bpy.data.materials.get(NameShort):
Mat = bpy.data.materials[NameShort]
if not Active.material_slots:
bpy.ops.object.material_slot_add()
Active.material_slots[0].material = Mat
else:'''
Mat = bpy.data.materials.new(NameShort)
Mat.diffuse_color = (1,1,1)
Mat.use_raytrace = False
Mat.use_shadows = False
Mat.use_cast_buffer_shadows = False
Mat.use_cast_approximate = False
if HasAlpha:
Mat.use_transparency = True
if OpacityMode == 'MAT': Mat.alpha = LayerOpacity
else: Mat.alpha = 0
if ShadelessMats: Mat.use_shadeless = True
if Ext == '.xcf':
# Color & Alpha PNG
Tex = bpy.data.textures.new(NameShort, 'IMAGE')
Tex.extension = 'CLIP'
Tex.use_preview_alpha = True
Img = bpy.data.images.new(NameShort, 128, 128)
Img.source = 'FILE'
Img.alpha_mode = AlphaMode
Img.filepath = '%s%s%s' % (PathSaveRaw, Name, ExtSave)
UVFace = Active.data.uv_textures[0].data[0]
UVFace.image = Img
Tex.image = Img
Mat.texture_slots.add()
TexSlot = Mat.texture_slots[0]
TexSlot.texture = Tex
TexSlot.use_map_alpha = True
TexSlot.texture_coords = 'UV'
if OpacityMode == 'TEX': TexSlot.alpha_factor = LayerOpacity
elif OpacityMode == 'MAT': TexSlot.blend_type = 'MULTIPLY'
else: # Ext == '.xjt'
# Color JPG
Tex = bpy.data.textures.new(NameShort, 'IMAGE')
Tex.extension = 'CLIP'
Img = bpy.data.images.new(NameShort, 128, 128)
Img.source = 'FILE'
Img.filepath = '%s%s%s' % (PathSaveRaw, Name, ExtSave)
UVFace = Active.data.uv_textures[0].data[0]
UVFace.image = Img
Tex.image = Img
Mat.texture_slots.add()
TexSlot = Mat.texture_slots[0]
TexSlot.texture = Tex
TexSlot.texture_coords = 'UV'
if HasAlpha:
# Alpha JPG
Tex = bpy.data.textures.new(NameShort+'_A', 'IMAGE')
Tex.extension = 'CLIP'
Tex.use_preview_alpha = True
Img = bpy.data.images.new(NameShort+'_A', 128, 128)
Img.source = 'FILE'
Img.alpha_mode = AlphaMode
Img.filepath = '%s%s_A%s' % (PathSaveRaw, Name, ExtSave)
Img.use_alpha = False
Tex.image = Img
Mat.texture_slots.add()
TexSlot = Mat.texture_slots[1]
TexSlot.texture = Tex
TexSlot.use_map_alpha = True
TexSlot.use_map_color_diffuse = False
TexSlot.texture_coords = 'UV'
if OpacityMode == 'TEX': TexSlot.alpha_factor = LayerOpacity
elif OpacityMode == 'MAT': TexSlot.blend_type = 'MULTIPLY'
if not Active.material_slots:
bpy.ops.object.material_slot_add()
Active.material_slots[0].material = Mat
Z = 0
global LayerNum
LayerNum = 0
LayerFlags = {}
LayerList = []
for Layer in IMGs:
Make3DLayer(Layer['LayerName'].replace(' ', '_'),
Layer['LayerNameShort'].replace(' ', '_'),
Z,
Layer['LayerCoords'],
Layer['RenderLayer'],
Layer['LayerMode'],
Layer['LayerOpacity'],
Layer['HasAlpha'],
)
Z -= LayerOffset
if SetupCompo:
#-------------------------------------------------
# COMPO NODES
Scene.use_nodes = True
Tree = Scene.node_tree
for i in Tree.nodes:
Tree.nodes.remove(i)
LayerList.reverse()
Offset = 0
LayerLen = len(LayerList)
for Layer in LayerList:
Offset += 1
X_Offset = (500*Offset)
Y_Offset = (-300*Offset)
Node = Tree.nodes.new('CompositorNodeRLayers')
Node.location = (-500+X_Offset, 300+Y_Offset)
Node.name = 'R_'+ str(Offset)
Node.scene = Scene
Node.layer = Layer[0]
if LayerViewers:
Node_V = Tree.nodes.new('CompositorNodeViewer')
Node_V.name = Layer[0]
Node_V.location = (-200+X_Offset, 200+Y_Offset)
Tree.links.new(Node.outputs[0], Node_V.inputs[0])
if LayerLen > Offset:
Mode = LayerList[Offset][1] # has to go one step further
LayerOpacity = LayerList[Offset][2]
if not Mode in {'Normal', '-1'}:
Node = Tree.nodes.new('CompositorNodeMixRGB')
if OpacityMode == 'COMPO': Node.inputs['Fac'].default_value = LayerOpacity
else: Node.inputs['Fac'].default_value = 1
Node.use_alpha = True
if Mode in {'Addition', '7'}: Node.blend_type = 'ADD'
elif Mode in {'Subtract', '8'}: Node.blend_type = 'SUBTRACT'
elif Mode in {'Multiply', '3'}: Node.blend_type = 'MULTIPLY'
elif Mode in {'DarkenOnly', '9'}: Node.blend_type = 'DARKEN'
elif Mode in {'Dodge', '16'}: Node.blend_type = 'DODGE'
elif Mode in {'LightenOnly', '10'}: Node.blend_type = 'LIGHTEN'
elif Mode in {'Difference', '6'}: Node.blend_type = 'DIFFERENCE'
elif Mode in {'Divide', '15'}: Node.blend_type = 'DIVIDE'
elif Mode in {'Overlay', '5'}: Node.blend_type = 'OVERLAY'
elif Mode in {'Screen', '4'}: Node.blend_type = 'SCREEN'
elif Mode in {'Burn', '17'}: Node.blend_type = 'BURN'
elif Mode in {'Color', '13'}: Node.blend_type = 'COLOR'
elif Mode in {'Value', '14'}: Node.blend_type = 'VALUE'
elif Mode in {'Saturation', '12'}: Node.blend_type = 'SATURATION'
elif Mode in {'Hue', '11'}: Node.blend_type = 'HUE'
elif Mode in {'Softlight', '19'}: Node.blend_type = 'SOFT_LIGHT'
else: pass
else:
Node = Tree.nodes.new('CompositorNodeAlphaOver')
if OpacityMode == 'COMPO': Node.inputs['Fac'].default_value = LayerOpacity
Node.name = 'M_' + str(Offset)
Node.location = (300+X_Offset, 250+Y_Offset)
if MixerViewers:
Node_V = Tree.nodes.new('CompositorNodeViewer')
Node_V.name = Layer[0]
Node_V.location = (500+X_Offset, 350+Y_Offset)
Tree.links.new(Node.outputs[0], Node_V.inputs[0])
else:
Node = Tree.nodes.new('CompositorNodeComposite')
Node.name = 'Composite'
Node.location = (400+X_Offset, 350+Y_Offset)
Nodes = bpy.context.scene.node_tree.nodes
if LayerLen > 1:
for i in range (1, LayerLen + 1):
if i == 1:
Tree.links.new(Nodes['R_'+str(i)].outputs[0], Nodes['M_'+str(i)].inputs[1])
if 1 < i < LayerLen:
Tree.links.new(Nodes['M_'+str(i-1)].outputs[0], Nodes['M_'+str(i)].inputs[1])
if 1 < i < LayerLen+1:
Tree.links.new(Nodes['R_'+str(i)].outputs[0], Nodes['M_'+str(i-1)].inputs[2])
if i == LayerLen:
Tree.links.new(Nodes['M_'+str(i-1)].outputs[0], Nodes['Composite'].inputs[0])
else:
Tree.links.new(Nodes['R_1'].outputs[0], Nodes['Composite'].inputs[0])
for i in Tree.nodes:
i.location[0] += -250*Offset
i.location[1] += 150*Offset
return True
#------------------------------------------------------------------------
import os, subprocess
import bpy
from bpy.props import *
from math import pi
# Operator
class GIMPImageToScene(bpy.types.Operator):
""""""
bl_idname = "import.gimp_image_to_scene"
bl_label = "GIMP Image to Scene"
bl_description = "Imports GIMP multilayer image files into 3D Scenes"
bl_options = {'REGISTER', 'UNDO'}
filename = StringProperty(name="File Name",
description="Name of the file")
directory = StringProperty(name="Directory",
description="Directory of the file")
LayerViewers = BoolProperty(name="Layer Viewers",
description="Add Viewer nodes to each Render Layer node",
default=True)
MixerViewers = BoolProperty(name="Mixer Viewers",
description="Add Viewer nodes to each Mix node",
default=True)
AlphaMode = EnumProperty(name="Alpha Mode",
description="Representation of alpha information in the RGBA pixels",
items=(
('STRAIGHT', 'Texture Alpha Factor', 'Transparent RGB and alpha pixels are unmodified'),
('PREMUL', 'Material Alpha Value', 'Transparent RGB pixels are multiplied by the alpha channel')),
default='STRAIGHT')
ShadelessMats = BoolProperty(name="Shadeless Material",
description="Set Materials as Shadeless",
default=True)
OpacityMode = EnumProperty(name="Opacity Mode",
description="Layer Opacity management",
items=(
('TEX', 'Texture Alpha Factor', ''),
('MAT', 'Material Alpha Value', ''),
('COMPO', 'Mixer Node Factor', ''),
('BAKE', 'Baked in Image Alpha', '')),
default='TEX')
SetCamera = BoolProperty(name="Set Camera",
description="Create an Ortho Camera matching image resolution",
default=True)
SetupCompo = BoolProperty(name="Setup Node Compositing",
description="Create a compositing node setup (will delete existing nodes)",
default=False)
GroupUntagged = BoolProperty(name="Group Untagged",
description="Layers with no tag go to a single Render Layer",
default=False)
LayerOffset = FloatProperty(name="Layer Separation",
description="Distance between each 3D Layer in the Z axis",
min=0,
default=0.50)
LayerScale = FloatProperty(name="Layer Scale",
description="Scale pixel resolution by Blender units",
min=0,
default=0.01)
def draw(self, context):
layout = self.layout
box = layout.box()
box.label('3D Layers:', icon='SORTSIZE')
box.prop(self, 'SetCamera', icon='OUTLINER_DATA_CAMERA')
box.prop(self, 'OpacityMode', icon='GHOST')
if self.OpacityMode == 'COMPO' and self.SetupCompo == False:
box.label('Tip: Enable Node Compositing', icon='INFO')
box.prop(self, 'AlphaMode', icon='IMAGE_RGB_ALPHA')
box.prop(self, 'ShadelessMats', icon='SOLID')
box.prop(self, 'LayerOffset')
box.prop(self, 'LayerScale')
box = layout.box()
box.label('Compositing:', icon='RENDERLAYERS')
box.prop(self, 'SetupCompo', icon='NODETREE')
if self.SetupCompo:
box.prop(self, 'GroupUntagged', icon='IMAGE_ZDEPTH')
box.prop(self, 'LayerViewers', icon='NODE')
box.prop(self, 'MixerViewers', icon='NODE')
def execute(self, context):
# File Path
filename = self.filename
directory = self.directory
# Settings
LayerViewers = self.LayerViewers
MixerViewers = self.MixerViewers
OpacityMode = self.OpacityMode
AlphaMode = self.AlphaMode
ShadelessMats = self.ShadelessMats
SetCamera = self.SetCamera
SetupCompo = self.SetupCompo
GroupUntagged = self.GroupUntagged
LayerOffset = self.LayerOffset
LayerScale = self.LayerScale
Ext = None
if filename.endswith('.xcf'): Ext = '.xcf'
elif filename.endswith('.xjt'): Ext = '.xjt'
# Call Main Function
if Ext:
ret = main(self.report, filename, directory, LayerViewers, MixerViewers, LayerOffset,
LayerScale, OpacityMode, AlphaMode, ShadelessMats,
SetCamera, SetupCompo, GroupUntagged, Ext)
if not ret:
return {'CANCELLED'}
else:
self.report({'ERROR'},"Selected file wasn't valid, try .xcf or .xjt")
return {'CANCELLED'}
return {'FINISHED'}
def invoke(self, context, event):
wm = bpy.context.window_manager
wm.fileselect_add(self)
return {'RUNNING_MODAL'}
# Registering / Unregister
def menu_func(self, context):
self.layout.operator(GIMPImageToScene.bl_idname, text="GIMP Image to Scene (.xcf, .xjt)", icon='PLUGIN')
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func)
if __name__ == "__main__":
register()
|
Dziolas/inspire-next
|
refs/heads/master
|
inspire/base/recordext/functions/get_files_from_bibdoc.py
|
1
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
def get_files_from_bibdoc(recid):
"""
Retrieves using BibDoc all the files related with a given record
@param recid
@return List of dictionaries containing all the information stored
inside BibDoc if the current record has files attached, the
empty list otherwise
"""
if not recid or recid < 0:
return []
from invenio.legacy.bibdocfile.api import BibRecDocs, InvenioBibDocFileError
files = []
try:
bibrecdocs = BibRecDocs(int(recid))
except InvenioBibDocFileError:
return []
latest_files = bibrecdocs.list_latest_files()
for afile in latest_files:
file_dict = {}
file_dict['comment'] = afile.get_comment()
file_dict['description'] = afile.get_description()
file_dict['eformat'] = afile.get_format()
file_dict['full_name'] = afile.get_full_name()
file_dict['full_path'] = afile.get_full_path()
file_dict['magic'] = afile.get_magic()
file_dict['name'] = afile.get_name()
file_dict['path'] = afile.get_path()
file_dict['size'] = afile.get_size()
file_dict['status'] = afile.get_status()
file_dict['subformat'] = afile.get_subformat()
file_dict['superformat'] = afile.get_superformat()
file_dict['type'] = afile.get_type()
file_dict['url'] = afile.get_url()
file_dict['version'] = afile.get_version()
files.append(file_dict)
return files
|
zozo123/buildbot
|
refs/heads/master
|
master/buildbot/changes/base.py
|
1
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.internet import defer
from twisted.python import log
from zope.interface import implements
from buildbot.interfaces import IChangeSource
from buildbot.util import service
from buildbot.util.poll import method as poll_method
class ChangeSource(service.ClusteredService):
implements(IChangeSource)
def describe(self):
pass
# activity handling
def activate(self):
return defer.succeed(None)
def deactivate(self):
return defer.succeed(None)
# service handling
def _getServiceId(self):
return self.master.data.updates.findChangeSourceId(self.name)
def _claimService(self):
return self.master.data.updates.trySetChangeSourceMaster(self.serviceid,
self.master.masterid)
def _unclaimService(self):
return self.master.data.updates.trySetChangeSourceMaster(self.serviceid,
None)
class PollingChangeSource(ChangeSource):
def __init__(self, name=None, pollInterval=60 * 10, pollAtLaunch=False):
ChangeSource.__init__(self, name)
self.pollInterval = pollInterval
self.pollAtLaunch = pollAtLaunch
def poll(self):
pass
@poll_method
def doPoll(self):
d = defer.maybeDeferred(self.poll)
d.addErrback(log.err, 'while polling for changes')
return d
def force(self):
self.doPoll()
def activate(self):
self.doPoll.start(interval=self.pollInterval, now=self.pollAtLaunch)
def deactivate(self):
return self.doPoll.stop()
|
krother/maze_run
|
refs/heads/master
|
05_print/draw_maze.py
|
3
|
# Code for chapter 05 - Debugging with print
from pygame import image, Surface
from load_tiles import load_tiles, get_tile_rect, SIZE
from generate_maze import create_maze
from util import debug_print
def parse_grid(data):
"""Parses the string representation into a nested list"""
return [list(row) for row in data.strip().split("\n")]
def draw_grid(data, tile_img, tiles):
"""Returns an image of a tile-based grid"""
debug_print("drawing level", data)
xsize = len(data[0]) * SIZE
ysize = len(data) * SIZE
img = Surface((xsize, ysize))
for y, row in enumerate(data):
for x, char in enumerate(row):
rect = get_tile_rect(x, y)
img.blit(tile_img, rect, tiles[char])
return img
if __name__ == '__main__':
tile_img, tiles = load_tiles()
level = create_maze(12, 7)
level = parse_grid(level)
maze = draw_grid(level, tile_img, tiles)
image.save(maze, 'maze.png')
|
sestrella/ansible
|
refs/heads/devel
|
lib/ansible/modules/packaging/os/pkgin.py
|
21
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Shaun Zinck <shaun.zinck at gmail.com>
# Copyright (c) 2015 Lawrence Leonard Gilbert <larry@L2G.to>
# Copyright (c) 2016 Jasper Lievisse Adriaanse <j at jasper.la>
#
# Written by Shaun Zinck
# Based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: pkgin
short_description: Package manager for SmartOS, NetBSD, et al.
description:
- "The standard package manager for SmartOS, but also usable on NetBSD
or any OS that uses C(pkgsrc). (Home: U(http://pkgin.net/))"
version_added: "1.0"
author:
- "Larry Gilbert (@L2G)"
- "Shaun Zinck (@szinck)"
- "Jasper Lievisse Adriaanse (@jasperla)"
notes:
- "Known bug with pkgin < 0.8.0: if a package is removed and another
package depends on it, the other package will be silently removed as
well. New to Ansible 1.9: check-mode support."
options:
name:
description:
- Name of package to install/remove;
- multiple names may be given, separated by commas
state:
description:
- Intended state of the package
choices: [ 'present', 'absent' ]
default: present
update_cache:
description:
- Update repository database. Can be run with other steps or on it's own.
type: bool
default: 'no'
version_added: "2.1"
upgrade:
description:
- Upgrade main packages to their newer versions
type: bool
default: 'no'
version_added: "2.1"
full_upgrade:
description:
- Upgrade all packages to their newer versions
type: bool
default: 'no'
version_added: "2.1"
clean:
description:
- Clean packages cache
type: bool
default: 'no'
version_added: "2.1"
force:
description:
- Force package reinstall
type: bool
default: 'no'
version_added: "2.1"
'''
EXAMPLES = '''
# install package foo
- pkgin:
name: foo
state: present
# Update database and install "foo" package
- pkgin:
name: foo
update_cache: yes
# remove package foo
- pkgin:
name: foo
state: absent
# remove packages foo and bar
- pkgin:
name: foo,bar
state: absent
# Update repositories as a separate step
- pkgin:
update_cache: yes
# Upgrade main packages (equivalent to C(pkgin upgrade))
- pkgin:
upgrade: yes
# Upgrade all packages (equivalent to C(pkgin full-upgrade))
- pkgin:
full_upgrade: yes
# Force-upgrade all packages (equivalent to C(pkgin -F full-upgrade))
- pkgin:
full_upgrade: yes
force: yes
# clean packages cache (equivalent to C(pkgin clean))
- pkgin:
clean: yes
'''
import re
from ansible.module_utils.basic import AnsibleModule
def query_package(module, name):
"""Search for the package by name.
Possible return values:
* "present" - installed, no upgrade needed
* "outdated" - installed, but can be upgraded
* False - not installed or not found
"""
# test whether '-p' (parsable) flag is supported.
rc, out, err = module.run_command("%s -p -v" % PKGIN_PATH)
if rc == 0:
pflag = '-p'
splitchar = ';'
else:
pflag = ''
splitchar = ' '
# Use "pkgin search" to find the package. The regular expression will
# only match on the complete name.
rc, out, err = module.run_command("%s %s search \"^%s$\"" % (PKGIN_PATH, pflag, name))
# rc will not be 0 unless the search was a success
if rc == 0:
# Search results may contain more than one line (e.g., 'emacs'), so iterate
# through each line to see if we have a match.
packages = out.split('\n')
for package in packages:
# Break up line at spaces. The first part will be the package with its
# version (e.g. 'gcc47-libs-4.7.2nb4'), and the second will be the state
# of the package:
# '' - not installed
# '<' - installed but out of date
# '=' - installed and up to date
# '>' - installed but newer than the repository version
pkgname_with_version, raw_state = package.split(splitchar)[0:2]
# Search for package, stripping version
# (results in sth like 'gcc47-libs' or 'emacs24-nox11')
pkg_search_obj = re.search(r'^(.*?)\-[0-9][0-9.]*(nb[0-9]+)*', pkgname_with_version, re.M)
# Do not proceed unless we have a match
if not pkg_search_obj:
continue
# Grab matched string
pkgname_without_version = pkg_search_obj.group(1)
if name != pkgname_without_version:
continue
# The package was found; now return its state
if raw_state == '<':
return 'outdated'
elif raw_state == '=' or raw_state == '>':
return 'present'
else:
return False
# no fall-through
# No packages were matched, so return False
return False
def format_action_message(module, action, count):
vars = {"actioned": action,
"count": count}
if module.check_mode:
message = "would have %(actioned)s %(count)d package" % vars
else:
message = "%(actioned)s %(count)d package" % vars
if count == 1:
return message
else:
return message + "s"
def format_pkgin_command(module, command, package=None):
# Not all commands take a package argument, so cover this up by passing
# an empty string. Some commands (e.g. 'update') will ignore extra
# arguments, however this behaviour cannot be relied on for others.
if package is None:
package = ""
if module.params["force"]:
force = "-F"
else:
force = ""
vars = {"pkgin": PKGIN_PATH,
"command": command,
"package": package,
"force": force}
if module.check_mode:
return "%(pkgin)s -n %(command)s %(package)s" % vars
else:
return "%(pkgin)s -y %(force)s %(command)s %(package)s" % vars
def remove_packages(module, packages):
remove_c = 0
# Using a for loop in case of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "remove", package))
if not module.check_mode and query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "removed", remove_c))
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, packages):
install_c = 0
for package in packages:
if query_package(module, package):
continue
rc, out, err = module.run_command(
format_pkgin_command(module, "install", package))
if not module.check_mode and not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg=format_action_message(module, "installed", install_c))
module.exit_json(changed=False, msg="package(s) already present")
def update_package_db(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "update"))
if rc == 0:
if re.search('database for.*is up-to-date\n$', out):
return False, "database is up-to-date"
else:
return True, "updated repository database"
else:
module.fail_json(msg="could not update package db")
def do_upgrade_packages(module, full=False):
if full:
cmd = "full-upgrade"
else:
cmd = "upgrade"
rc, out, err = module.run_command(
format_pkgin_command(module, cmd))
if rc == 0:
if re.search('^nothing to do.\n$', out):
module.exit_json(changed=False, msg="nothing left to upgrade")
else:
module.fail_json(msg="could not %s packages" % cmd)
def upgrade_packages(module):
do_upgrade_packages(module)
def full_upgrade_packages(module):
do_upgrade_packages(module, True)
def clean_cache(module):
rc, out, err = module.run_command(
format_pkgin_command(module, "clean"))
if rc == 0:
# There's no indication if 'clean' actually removed anything,
# so assume it did.
module.exit_json(changed=True, msg="cleaned caches")
else:
module.fail_json(msg="could not clean package cache")
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default="present", choices=["present", "absent"]),
name=dict(aliases=["pkg"], type='list'),
update_cache=dict(default='no', type='bool'),
upgrade=dict(default='no', type='bool'),
full_upgrade=dict(default='no', type='bool'),
clean=dict(default='no', type='bool'),
force=dict(default='no', type='bool')),
required_one_of=[['name', 'update_cache', 'upgrade', 'full_upgrade', 'clean']],
supports_check_mode=True)
global PKGIN_PATH
PKGIN_PATH = module.get_bin_path('pkgin', True, ['/opt/local/bin'])
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
p = module.params
if p["update_cache"]:
c, msg = update_package_db(module)
if not (p['name'] or p["upgrade"] or p["full_upgrade"]):
module.exit_json(changed=c, msg=msg)
if p["upgrade"]:
upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded packages')
if p["full_upgrade"]:
full_upgrade_packages(module)
if not p['name']:
module.exit_json(changed=True, msg='upgraded all packages')
if p["clean"]:
clean_cache(module)
if not p['name']:
module.exit_json(changed=True, msg='cleaned caches')
pkgs = p["name"]
if p["state"] == "present":
install_packages(module, pkgs)
elif p["state"] == "absent":
remove_packages(module, pkgs)
if __name__ == '__main__':
main()
|
michaeljohn32/odoomrp-wip
|
refs/heads/8.0
|
purchase_packaging_info/__init__.py
|
379
|
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from . import models
|
jonparrott/google-cloud-python
|
refs/heads/master
|
bigtable/google/cloud/bigtable_v2/proto/data_pb2_grpc.py
|
591
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
|
aabbox/kbengine
|
refs/heads/master
|
kbe/src/lib/python/Lib/test/test_asyncio/test_tasks.py
|
60
|
"""Tests for tasks.py."""
import re
import sys
import types
import unittest
import weakref
from test import support
from test.script_helper import assert_python_ok
from unittest import mock
import asyncio
from asyncio import coroutines
from asyncio import test_utils
PY34 = (sys.version_info >= (3, 4))
PY35 = (sys.version_info >= (3, 5))
@asyncio.coroutine
def coroutine_function():
pass
def format_coroutine(qualname, state, src, source_traceback, generator=False):
if generator:
state = '%s' % state
else:
state = '%s, defined' % state
if source_traceback is not None:
frame = source_traceback[-1]
return ('coro=<%s() %s at %s> created at %s:%s'
% (qualname, state, src, frame[0], frame[1]))
else:
return 'coro=<%s() %s at %s>' % (qualname, state, src)
class Dummy:
def __repr__(self):
return '<Dummy>'
def __call__(self, *args):
pass
class TaskTests(test_utils.TestCase):
def setUp(self):
self.loop = self.new_test_loop()
def test_task_class(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.Task(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = asyncio.Task(notmuch(), loop=loop)
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_async_coroutine(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t = asyncio.async(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t._loop, self.loop)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
t = asyncio.async(notmuch(), loop=loop)
self.assertIs(t._loop, loop)
loop.run_until_complete(t)
loop.close()
def test_async_future(self):
f_orig = asyncio.Future(loop=self.loop)
f_orig.set_result('ko')
f = asyncio.async(f_orig)
self.loop.run_until_complete(f)
self.assertTrue(f.done())
self.assertEqual(f.result(), 'ko')
self.assertIs(f, f_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
f = asyncio.async(f_orig, loop=loop)
loop.close()
f = asyncio.async(f_orig, loop=self.loop)
self.assertIs(f, f_orig)
def test_async_task(self):
@asyncio.coroutine
def notmuch():
return 'ok'
t_orig = asyncio.Task(notmuch(), loop=self.loop)
t = asyncio.async(t_orig)
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'ok')
self.assertIs(t, t_orig)
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
with self.assertRaises(ValueError):
t = asyncio.async(t_orig, loop=loop)
loop.close()
t = asyncio.async(t_orig, loop=self.loop)
self.assertIs(t, t_orig)
def test_async_neither(self):
with self.assertRaises(TypeError):
asyncio.async('ok')
def test_task_repr(self):
self.loop.set_debug(False)
@asyncio.coroutine
def notmuch():
yield from []
return 'abc'
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
if PY35:
self.assertEqual(notmuch.__qualname__,
'TaskTests.test_task_repr.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__)
filename, lineno = test_utils.get_function_source(notmuch)
src = "%s:%s" % (filename, lineno)
# test coroutine object
gen = notmuch()
if coroutines._DEBUG or PY35:
coro_qualname = 'TaskTests.test_task_repr.<locals>.notmuch'
else:
coro_qualname = 'notmuch'
self.assertEqual(gen.__name__, 'notmuch')
if PY35:
self.assertEqual(gen.__qualname__,
coro_qualname)
# test pending Task
t = asyncio.Task(gen, loop=self.loop)
t.add_done_callback(Dummy())
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback, generator=True)
self.assertEqual(repr(t),
'<Task pending %s cb=[<Dummy>()]>' % coro)
# test cancelling Task
t.cancel() # Does not take immediate effect!
self.assertEqual(repr(t),
'<Task cancelling %s cb=[<Dummy>()]>' % coro)
# test cancelled Task
self.assertRaises(asyncio.CancelledError,
self.loop.run_until_complete, t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
'<Task cancelled %s>' % coro)
# test finished Task
t = asyncio.Task(notmuch(), loop=self.loop)
self.loop.run_until_complete(t)
coro = format_coroutine(coro_qualname, 'done', src,
t._source_traceback)
self.assertEqual(repr(t),
"<Task finished %s result='abc'>" % coro)
def test_task_repr_coro_decorator(self):
self.loop.set_debug(False)
@asyncio.coroutine
def notmuch():
# notmuch() function doesn't use yield from: it will be wrapped by
# @coroutine decorator
return 123
# test coroutine function
self.assertEqual(notmuch.__name__, 'notmuch')
if PY35:
self.assertEqual(notmuch.__qualname__,
'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch')
self.assertEqual(notmuch.__module__, __name__)
# test coroutine object
gen = notmuch()
if coroutines._DEBUG or PY35:
# On Python >= 3.5, generators now inherit the name of the
# function, as expected, and have a qualified name (__qualname__
# attribute).
coro_name = 'notmuch'
coro_qualname = 'TaskTests.test_task_repr_coro_decorator.<locals>.notmuch'
else:
# On Python < 3.5, generators inherit the name of the code, not of
# the function. See: http://bugs.python.org/issue21205
coro_name = coro_qualname = 'coro'
self.assertEqual(gen.__name__, coro_name)
if PY35:
self.assertEqual(gen.__qualname__, coro_qualname)
# test repr(CoroWrapper)
if coroutines._DEBUG:
# format the coroutine object
if coroutines._DEBUG:
filename, lineno = test_utils.get_function_source(notmuch)
frame = gen._source_traceback[-1]
coro = ('%s() running, defined at %s:%s, created at %s:%s'
% (coro_qualname, filename, lineno,
frame[0], frame[1]))
else:
code = gen.gi_code
coro = ('%s() running at %s:%s'
% (coro_qualname, code.co_filename, code.co_firstlineno))
self.assertEqual(repr(gen), '<CoroWrapper %s>' % coro)
# test pending Task
t = asyncio.Task(gen, loop=self.loop)
t.add_done_callback(Dummy())
# format the coroutine object
if coroutines._DEBUG:
src = '%s:%s' % test_utils.get_function_source(notmuch)
else:
code = gen.gi_code
src = '%s:%s' % (code.co_filename, code.co_firstlineno)
coro = format_coroutine(coro_qualname, 'running', src,
t._source_traceback,
generator=not coroutines._DEBUG)
self.assertEqual(repr(t),
'<Task pending %s cb=[<Dummy>()]>' % coro)
self.loop.run_until_complete(t)
def test_task_repr_wait_for(self):
self.loop.set_debug(False)
@asyncio.coroutine
def wait_for(fut):
return (yield from fut)
fut = asyncio.Future(loop=self.loop)
task = asyncio.Task(wait_for(fut), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertRegex(repr(task),
'<Task .* wait_for=%s>' % re.escape(repr(fut)))
fut.set_result(None)
self.loop.run_until_complete(task)
def test_task_basics(self):
@asyncio.coroutine
def outer():
a = yield from inner1()
b = yield from inner2()
return a+b
@asyncio.coroutine
def inner1():
return 42
@asyncio.coroutine
def inner2():
return 1000
t = outer()
self.assertEqual(self.loop.run_until_complete(t), 1042)
def test_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
@asyncio.coroutine
def task():
yield from asyncio.sleep(10.0, loop=loop)
return 12
t = asyncio.Task(task(), loop=loop)
loop.call_soon(t.cancel)
with self.assertRaises(asyncio.CancelledError):
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_yield(self):
@asyncio.coroutine
def task():
yield
yield
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start coro
t.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertTrue(t.cancelled())
self.assertFalse(t.cancel())
def test_cancel_inner_future(self):
f = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from f
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop) # start task
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_both_task_and_inner_future(self):
f = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from f
return 12
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
t.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertTrue(f.cancelled())
self.assertTrue(t.cancelled())
def test_cancel_task_catching(self):
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from fut1
try:
yield from fut2
except asyncio.CancelledError:
return 42
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(t.cancelled())
def test_cancel_task_ignoring(self):
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
fut3 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def task():
yield from fut1
try:
yield from fut2
except asyncio.CancelledError:
pass
res = yield from fut3
return res
t = asyncio.Task(task(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut1) # White-box test.
fut1.set_result(None)
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut2) # White-box test.
t.cancel()
self.assertTrue(fut2.cancelled())
test_utils.run_briefly(self.loop)
self.assertIs(t._fut_waiter, fut3) # White-box test.
fut3.set_result(42)
res = self.loop.run_until_complete(t)
self.assertEqual(res, 42)
self.assertFalse(fut3.cancelled())
self.assertFalse(t.cancelled())
def test_cancel_current_task(self):
loop = asyncio.new_event_loop()
self.set_event_loop(loop)
@asyncio.coroutine
def task():
t.cancel()
self.assertTrue(t._must_cancel) # White-box test.
# The sleep should be cancelled immediately.
yield from asyncio.sleep(100, loop=loop)
return 12
t = asyncio.Task(task(), loop=loop)
self.assertRaises(
asyncio.CancelledError, loop.run_until_complete, t)
self.assertTrue(t.done())
self.assertFalse(t._must_cancel) # White-box test.
self.assertFalse(t.cancel())
def test_stop_while_run_in_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0.1
self.assertAlmostEqual(0.2, when)
when = yield 0.1
self.assertAlmostEqual(0.3, when)
yield 0.1
loop = self.new_test_loop(gen)
x = 0
waiters = []
@asyncio.coroutine
def task():
nonlocal x
while x < 10:
waiters.append(asyncio.sleep(0.1, loop=loop))
yield from waiters[-1]
x += 1
if x == 2:
loop.stop()
t = asyncio.Task(task(), loop=loop)
with self.assertRaises(RuntimeError) as cm:
loop.run_until_complete(t)
self.assertEqual(str(cm.exception),
'Event loop stopped before Future completed.')
self.assertFalse(t.done())
self.assertEqual(x, 2)
self.assertAlmostEqual(0.3, loop.time())
# close generators
for w in waiters:
w.close()
t.cancel()
self.assertRaises(asyncio.CancelledError, loop.run_until_complete, t)
def test_wait_for(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
when = yield 0.1
loop = self.new_test_loop(gen)
foo_running = None
@asyncio.coroutine
def foo():
nonlocal foo_running
foo_running = True
try:
yield from asyncio.sleep(0.2, loop=loop)
finally:
foo_running = False
return 'done'
fut = asyncio.Task(foo(), loop=loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.1, loop=loop))
self.assertTrue(fut.done())
# it should have been cancelled due to the timeout
self.assertTrue(fut.cancelled())
self.assertAlmostEqual(0.1, loop.time())
self.assertEqual(foo_running, False)
def test_wait_for_blocking(self):
loop = self.new_test_loop()
@asyncio.coroutine
def coro():
return 'done'
res = loop.run_until_complete(asyncio.wait_for(coro(),
timeout=None,
loop=loop))
self.assertEqual(res, 'done')
def test_wait_for_with_global_loop(self):
def gen():
when = yield
self.assertAlmostEqual(0.2, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
@asyncio.coroutine
def foo():
yield from asyncio.sleep(0.2, loop=loop)
return 'done'
asyncio.set_event_loop(loop)
try:
fut = asyncio.Task(foo(), loop=loop)
with self.assertRaises(asyncio.TimeoutError):
loop.run_until_complete(asyncio.wait_for(fut, 0.01))
finally:
asyncio.set_event_loop(None)
self.assertAlmostEqual(0.01, loop.time())
self.assertTrue(fut.done())
self.assertTrue(fut.cancelled())
def test_wait_for_race_condition(self):
def gen():
yield 0.1
yield 0.1
yield 0.1
loop = self.new_test_loop(gen)
fut = asyncio.Future(loop=loop)
task = asyncio.wait_for(fut, timeout=0.2, loop=loop)
loop.call_later(0.1, fut.set_result, "ok")
res = loop.run_until_complete(task)
self.assertEqual(res, "ok")
def test_wait(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(res, 42)
self.assertAlmostEqual(0.15, loop.time())
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
self.assertEqual(res, 42)
def test_wait_with_global_loop(self):
def gen():
when = yield
self.assertAlmostEqual(0.01, when)
when = yield 0
self.assertAlmostEqual(0.015, when)
yield 0.015
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.01, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.015, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a])
self.assertEqual(done, set([a, b]))
self.assertEqual(pending, set())
return 42
asyncio.set_event_loop(loop)
res = loop.run_until_complete(
asyncio.Task(foo(), loop=loop))
self.assertEqual(res, 42)
def test_wait_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
c = coro('test')
task = asyncio.Task(
asyncio.wait([c, c, coro('spam')], loop=self.loop),
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
self.assertFalse(pending)
self.assertEqual(set(f.result() for f in done), {'test', 'spam'})
def test_wait_errors(self):
self.assertRaises(
ValueError, self.loop.run_until_complete,
asyncio.wait(set(), loop=self.loop))
# -1 is an invalid return_when value
sleep_coro = asyncio.sleep(10.0, loop=self.loop)
wait_coro = asyncio.wait([sleep_coro], return_when=-1, loop=self.loop)
self.assertRaises(ValueError,
self.loop.run_until_complete, wait_coro)
sleep_coro.close()
def test_wait_first_completed(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED,
loop=loop),
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertFalse(a.done())
self.assertTrue(b.done())
self.assertIsNone(b.result())
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_really_done(self):
# there is possibility that some tasks in the pending list
# became done but their callbacks haven't all been called yet
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
yield
yield
a = asyncio.Task(coro1(), loop=self.loop)
b = asyncio.Task(coro2(), loop=self.loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_COMPLETED,
loop=self.loop),
loop=self.loop)
done, pending = self.loop.run_until_complete(task)
self.assertEqual({a, b}, done)
self.assertTrue(a.done())
self.assertIsNone(a.result())
self.assertTrue(b.done())
self.assertIsNone(b.result())
def test_wait_first_exception(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
# first_exception, task already has exception
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
@asyncio.coroutine
def exc():
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
task = asyncio.Task(
asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION,
loop=loop),
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_first_exception_in_wait(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
when = yield 0
self.assertAlmostEqual(0.01, when)
yield 0.01
loop = self.new_test_loop(gen)
# first_exception, exception during waiting
a = asyncio.Task(asyncio.sleep(10.0, loop=loop), loop=loop)
@asyncio.coroutine
def exc():
yield from asyncio.sleep(0.01, loop=loop)
raise ZeroDivisionError('err')
b = asyncio.Task(exc(), loop=loop)
task = asyncio.wait([b, a], return_when=asyncio.FIRST_EXCEPTION,
loop=loop)
done, pending = loop.run_until_complete(task)
self.assertEqual({b}, done)
self.assertEqual({a}, pending)
self.assertAlmostEqual(0.01, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_with_exception(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
yield 0.15
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
@asyncio.coroutine
def sleeper():
yield from asyncio.sleep(0.15, loop=loop)
raise ZeroDivisionError('really')
b = asyncio.Task(sleeper(), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], loop=loop)
self.assertEqual(len(done), 2)
self.assertEqual(pending, set())
errors = set(f for f in done if f.exception() is not None)
self.assertEqual(len(errors), 1)
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
def test_wait_with_timeout(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.11, when)
yield 0.11
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
@asyncio.coroutine
def foo():
done, pending = yield from asyncio.wait([b, a], timeout=0.11,
loop=loop)
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.11, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_wait_concurrent_complete(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(0.15, when)
when = yield 0
self.assertAlmostEqual(0.1, when)
yield 0.1
loop = self.new_test_loop(gen)
a = asyncio.Task(asyncio.sleep(0.1, loop=loop), loop=loop)
b = asyncio.Task(asyncio.sleep(0.15, loop=loop), loop=loop)
done, pending = loop.run_until_complete(
asyncio.wait([b, a], timeout=0.1, loop=loop))
self.assertEqual(done, set([a]))
self.assertEqual(pending, set([b]))
self.assertAlmostEqual(0.1, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_as_completed(self):
def gen():
yield 0
yield 0
yield 0.01
yield 0
loop = self.new_test_loop(gen)
# disable "slow callback" warning
loop.slow_callback_duration = 1.0
completed = set()
time_shifted = False
@asyncio.coroutine
def sleeper(dt, x):
nonlocal time_shifted
yield from asyncio.sleep(dt, loop=loop)
completed.add(x)
if not time_shifted and 'a' in completed and 'b' in completed:
time_shifted = True
loop.advance_time(0.14)
return x
a = sleeper(0.01, 'a')
b = sleeper(0.01, 'b')
c = sleeper(0.15, 'c')
@asyncio.coroutine
def foo():
values = []
for f in asyncio.as_completed([b, c, a], loop=loop):
values.append((yield from f))
return values
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
self.assertTrue('a' in res[:2])
self.assertTrue('b' in res[:2])
self.assertEqual(res[2], 'c')
# Doing it again should take no time and exercise a different path.
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertAlmostEqual(0.15, loop.time())
def test_as_completed_with_timeout(self):
def gen():
yield
yield 0
yield 0
yield 0.1
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.1, 'a', loop=loop)
b = asyncio.sleep(0.15, 'b', loop=loop)
@asyncio.coroutine
def foo():
values = []
for f in asyncio.as_completed([a, b], timeout=0.12, loop=loop):
if values:
loop.advance_time(0.02)
try:
v = yield from f
values.append((1, v))
except asyncio.TimeoutError as exc:
values.append((2, exc))
return values
res = loop.run_until_complete(asyncio.Task(foo(), loop=loop))
self.assertEqual(len(res), 2, res)
self.assertEqual(res[0], (1, 'a'))
self.assertEqual(res[1][0], 2)
self.assertIsInstance(res[1][1], asyncio.TimeoutError)
self.assertAlmostEqual(0.12, loop.time())
# move forward to close generator
loop.advance_time(10)
loop.run_until_complete(asyncio.wait([a, b], loop=loop))
def test_as_completed_with_unused_timeout(self):
def gen():
yield
yield 0
yield 0.01
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.01, 'a', loop=loop)
@asyncio.coroutine
def foo():
for f in asyncio.as_completed([a], timeout=1, loop=loop):
v = yield from f
self.assertEqual(v, 'a')
loop.run_until_complete(asyncio.Task(foo(), loop=loop))
def test_as_completed_reverse_wait(self):
def gen():
yield 0
yield 0.05
yield 0
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.10, 'b', loop=loop)
fs = {a, b}
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
x = loop.run_until_complete(futs[1])
self.assertEqual(x, 'a')
self.assertAlmostEqual(0.05, loop.time())
loop.advance_time(0.05)
y = loop.run_until_complete(futs[0])
self.assertEqual(y, 'b')
self.assertAlmostEqual(0.10, loop.time())
def test_as_completed_concurrent(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0
self.assertAlmostEqual(0.05, when)
yield 0.05
loop = self.new_test_loop(gen)
a = asyncio.sleep(0.05, 'a', loop=loop)
b = asyncio.sleep(0.05, 'b', loop=loop)
fs = {a, b}
futs = list(asyncio.as_completed(fs, loop=loop))
self.assertEqual(len(futs), 2)
waiter = asyncio.wait(futs, loop=loop)
done, pending = loop.run_until_complete(waiter)
self.assertEqual(set(f.result() for f in done), {'a', 'b'})
def test_as_completed_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
@asyncio.coroutine
def runner():
result = []
c = coro('ham')
for f in asyncio.as_completed([c, c, coro('spam')],
loop=self.loop):
result.append((yield from f))
return result
fut = asyncio.Task(runner(), loop=self.loop)
self.loop.run_until_complete(fut)
result = fut.result()
self.assertEqual(set(result), {'ham', 'spam'})
self.assertEqual(len(result), 2)
def test_sleep(self):
def gen():
when = yield
self.assertAlmostEqual(0.05, when)
when = yield 0.05
self.assertAlmostEqual(0.1, when)
yield 0.05
loop = self.new_test_loop(gen)
@asyncio.coroutine
def sleeper(dt, arg):
yield from asyncio.sleep(dt/2, loop=loop)
res = yield from asyncio.sleep(dt/2, arg, loop=loop)
return res
t = asyncio.Task(sleeper(0.1, 'yeah'), loop=loop)
loop.run_until_complete(t)
self.assertTrue(t.done())
self.assertEqual(t.result(), 'yeah')
self.assertAlmostEqual(0.1, loop.time())
def test_sleep_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
t = asyncio.Task(asyncio.sleep(10.0, 'yeah', loop=loop),
loop=loop)
handle = None
orig_call_later = loop.call_later
def call_later(delay, callback, *args):
nonlocal handle
handle = orig_call_later(delay, callback, *args)
return handle
loop.call_later = call_later
test_utils.run_briefly(loop)
self.assertFalse(handle._cancelled)
t.cancel()
test_utils.run_briefly(loop)
self.assertTrue(handle._cancelled)
def test_task_cancel_sleeping_task(self):
def gen():
when = yield
self.assertAlmostEqual(0.1, when)
when = yield 0
self.assertAlmostEqual(5000, when)
yield 0.1
loop = self.new_test_loop(gen)
@asyncio.coroutine
def sleep(dt):
yield from asyncio.sleep(dt, loop=loop)
@asyncio.coroutine
def doit():
sleeper = asyncio.Task(sleep(5000), loop=loop)
loop.call_later(0.1, sleeper.cancel)
try:
yield from sleeper
except asyncio.CancelledError:
return 'cancelled'
else:
return 'slept in'
doer = doit()
self.assertEqual(loop.run_until_complete(doer), 'cancelled')
self.assertAlmostEqual(0.1, loop.time())
def test_task_cancel_waiter_future(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def coro():
yield from fut
task = asyncio.Task(coro(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertIs(task._fut_waiter, fut)
task.cancel()
test_utils.run_briefly(self.loop)
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, task)
self.assertIsNone(task._fut_waiter)
self.assertTrue(fut.cancelled())
def test_step_in_completed_task(self):
@asyncio.coroutine
def notmuch():
return 'ko'
gen = notmuch()
task = asyncio.Task(gen, loop=self.loop)
task.set_result('ok')
self.assertRaises(AssertionError, task._step)
gen.close()
def test_step_result(self):
@asyncio.coroutine
def notmuch():
yield None
yield 1
return 'ko'
self.assertRaises(
RuntimeError, self.loop.run_until_complete, notmuch())
def test_step_result_future(self):
# If coroutine returns future, task waits on this future.
class Fut(asyncio.Future):
def __init__(self, *args, **kwds):
self.cb_added = False
super().__init__(*args, **kwds)
def add_done_callback(self, fn):
self.cb_added = True
super().add_done_callback(fn)
fut = Fut(loop=self.loop)
result = None
@asyncio.coroutine
def wait_for_future():
nonlocal result
result = yield from fut
t = asyncio.Task(wait_for_future(), loop=self.loop)
test_utils.run_briefly(self.loop)
self.assertTrue(fut.cb_added)
res = object()
fut.set_result(res)
test_utils.run_briefly(self.loop)
self.assertIs(res, result)
self.assertTrue(t.done())
self.assertIsNone(t.result())
def test_step_with_baseexception(self):
@asyncio.coroutine
def notmutch():
raise BaseException()
task = asyncio.Task(notmutch(), loop=self.loop)
self.assertRaises(BaseException, task._step)
self.assertTrue(task.done())
self.assertIsInstance(task.exception(), BaseException)
def test_baseexception_during_cancel(self):
def gen():
when = yield
self.assertAlmostEqual(10.0, when)
yield 0
loop = self.new_test_loop(gen)
@asyncio.coroutine
def sleeper():
yield from asyncio.sleep(10, loop=loop)
base_exc = BaseException()
@asyncio.coroutine
def notmutch():
try:
yield from sleeper()
except asyncio.CancelledError:
raise base_exc
task = asyncio.Task(notmutch(), loop=loop)
test_utils.run_briefly(loop)
task.cancel()
self.assertFalse(task.done())
self.assertRaises(BaseException, test_utils.run_briefly, loop)
self.assertTrue(task.done())
self.assertFalse(task.cancelled())
self.assertIs(task.exception(), base_exc)
def test_iscoroutinefunction(self):
def fn():
pass
self.assertFalse(asyncio.iscoroutinefunction(fn))
def fn1():
yield
self.assertFalse(asyncio.iscoroutinefunction(fn1))
@asyncio.coroutine
def fn2():
yield
self.assertTrue(asyncio.iscoroutinefunction(fn2))
def test_yield_vs_yield_from(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def wait_for_future():
yield fut
task = wait_for_future()
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(task)
self.assertFalse(fut.done())
def test_yield_vs_yield_from_generator(self):
@asyncio.coroutine
def coro():
yield
@asyncio.coroutine
def wait_for_future():
gen = coro()
try:
yield gen
finally:
gen.close()
task = wait_for_future()
self.assertRaises(
RuntimeError,
self.loop.run_until_complete, task)
def test_coroutine_non_gen_function(self):
@asyncio.coroutine
def func():
return 'test'
self.assertTrue(asyncio.iscoroutinefunction(func))
coro = func()
self.assertTrue(asyncio.iscoroutine(coro))
res = self.loop.run_until_complete(coro)
self.assertEqual(res, 'test')
def test_coroutine_non_gen_function_return_future(self):
fut = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def func():
return fut
@asyncio.coroutine
def coro():
fut.set_result('test')
t1 = asyncio.Task(func(), loop=self.loop)
t2 = asyncio.Task(coro(), loop=self.loop)
res = self.loop.run_until_complete(t1)
self.assertEqual(res, 'test')
self.assertIsNone(t2.result())
def test_current_task(self):
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
@asyncio.coroutine
def coro(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task)
task = asyncio.Task(coro(self.loop), loop=self.loop)
self.loop.run_until_complete(task)
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
def test_current_task_with_interleaving_tasks(self):
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
fut1 = asyncio.Future(loop=self.loop)
fut2 = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def coro1(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
yield from fut1
self.assertTrue(asyncio.Task.current_task(loop=loop) is task1)
fut2.set_result(True)
@asyncio.coroutine
def coro2(loop):
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
fut1.set_result(True)
yield from fut2
self.assertTrue(asyncio.Task.current_task(loop=loop) is task2)
task1 = asyncio.Task(coro1(self.loop), loop=self.loop)
task2 = asyncio.Task(coro2(self.loop), loop=self.loop)
self.loop.run_until_complete(asyncio.wait((task1, task2),
loop=self.loop))
self.assertIsNone(asyncio.Task.current_task(loop=self.loop))
# Some thorough tests for cancellation propagation through
# coroutines, tasks and wait().
def test_yield_future_passes_cancel(self):
# Cancelling outer() cancels inner() cancels waiter.
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
try:
yield from waiter
except asyncio.CancelledError:
proof += 1
raise
else:
self.fail('got past sleep() in inner()')
@asyncio.coroutine
def outer():
nonlocal proof
try:
yield from inner()
except asyncio.CancelledError:
proof += 100 # Expect this path.
else:
proof += 10
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.loop.run_until_complete(f)
self.assertEqual(proof, 101)
self.assertTrue(waiter.cancelled())
def test_yield_wait_does_not_shield_cancel(self):
# Cancelling outer() makes wait() return early, leaves inner()
# running.
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
@asyncio.coroutine
def outer():
nonlocal proof
d, p = yield from asyncio.wait([inner()], loop=self.loop)
proof += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
self.assertRaises(
asyncio.CancelledError, self.loop.run_until_complete, f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_result(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
inner.set_result(42)
res = self.loop.run_until_complete(outer)
self.assertEqual(res, 42)
def test_shield_exception(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
exc = RuntimeError('expected')
inner.set_exception(exc)
test_utils.run_briefly(self.loop)
self.assertIs(outer.exception(), exc)
def test_shield_cancel(self):
inner = asyncio.Future(loop=self.loop)
outer = asyncio.shield(inner)
test_utils.run_briefly(self.loop)
inner.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
def test_shield_shortcut(self):
fut = asyncio.Future(loop=self.loop)
fut.set_result(42)
res = self.loop.run_until_complete(asyncio.shield(fut))
self.assertEqual(res, 42)
def test_shield_effect(self):
# Cancelling outer() does not affect inner().
proof = 0
waiter = asyncio.Future(loop=self.loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
@asyncio.coroutine
def outer():
nonlocal proof
yield from asyncio.shield(inner(), loop=self.loop)
proof += 100
f = asyncio.async(outer(), loop=self.loop)
test_utils.run_briefly(self.loop)
f.cancel()
with self.assertRaises(asyncio.CancelledError):
self.loop.run_until_complete(f)
waiter.set_result(None)
test_utils.run_briefly(self.loop)
self.assertEqual(proof, 1)
def test_shield_gather(self):
child1 = asyncio.Future(loop=self.loop)
child2 = asyncio.Future(loop=self.loop)
parent = asyncio.gather(child1, child2, loop=self.loop)
outer = asyncio.shield(parent, loop=self.loop)
test_utils.run_briefly(self.loop)
outer.cancel()
test_utils.run_briefly(self.loop)
self.assertTrue(outer.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
self.assertEqual(parent.result(), [1, 2])
def test_gather_shield(self):
child1 = asyncio.Future(loop=self.loop)
child2 = asyncio.Future(loop=self.loop)
inner1 = asyncio.shield(child1, loop=self.loop)
inner2 = asyncio.shield(child2, loop=self.loop)
parent = asyncio.gather(inner1, inner2, loop=self.loop)
test_utils.run_briefly(self.loop)
parent.cancel()
# This should cancel inner1 and inner2 but bot child1 and child2.
test_utils.run_briefly(self.loop)
self.assertIsInstance(parent.exception(), asyncio.CancelledError)
self.assertTrue(inner1.cancelled())
self.assertTrue(inner2.cancelled())
child1.set_result(1)
child2.set_result(2)
test_utils.run_briefly(self.loop)
def test_as_completed_invalid_args(self):
fut = asyncio.Future(loop=self.loop)
# as_completed() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(fut, loop=self.loop))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.as_completed(coro, loop=self.loop))
coro.close()
def test_wait_invalid_args(self):
fut = asyncio.Future(loop=self.loop)
# wait() expects a list of futures, not a future instance
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(fut, loop=self.loop))
coro = coroutine_function()
self.assertRaises(TypeError, self.loop.run_until_complete,
asyncio.wait(coro, loop=self.loop))
coro.close()
# wait() expects at least a future
self.assertRaises(ValueError, self.loop.run_until_complete,
asyncio.wait([], loop=self.loop))
def test_corowrapper_mocks_generator(self):
def check():
# A function that asserts various things.
# Called twice, with different debug flag values.
@asyncio.coroutine
def coro():
# The actual coroutine.
self.assertTrue(gen.gi_running)
yield from fut
# A completed Future used to run the coroutine.
fut = asyncio.Future(loop=self.loop)
fut.set_result(None)
# Call the coroutine.
gen = coro()
# Check some properties.
self.assertTrue(asyncio.iscoroutine(gen))
self.assertIsInstance(gen.gi_frame, types.FrameType)
self.assertFalse(gen.gi_running)
self.assertIsInstance(gen.gi_code, types.CodeType)
# Run it.
self.loop.run_until_complete(gen)
# The frame should have changed.
self.assertIsNone(gen.gi_frame)
# Save debug flag.
old_debug = asyncio.coroutines._DEBUG
try:
# Test with debug flag cleared.
asyncio.coroutines._DEBUG = False
check()
# Test with debug flag set.
asyncio.coroutines._DEBUG = True
check()
finally:
# Restore original debug flag.
asyncio.coroutines._DEBUG = old_debug
def test_yield_from_corowrapper(self):
old_debug = asyncio.coroutines._DEBUG
asyncio.coroutines._DEBUG = True
try:
@asyncio.coroutine
def t1():
return (yield from t2())
@asyncio.coroutine
def t2():
f = asyncio.Future(loop=self.loop)
asyncio.Task(t3(f), loop=self.loop)
return (yield from f)
@asyncio.coroutine
def t3(f):
f.set_result((1, 2, 3))
task = asyncio.Task(t1(), loop=self.loop)
val = self.loop.run_until_complete(task)
self.assertEqual(val, (1, 2, 3))
finally:
asyncio.coroutines._DEBUG = old_debug
def test_yield_from_corowrapper_send(self):
def foo():
a = yield
return a
def call(arg):
cw = asyncio.coroutines.CoroWrapper(foo(), foo)
cw.send(None)
try:
cw.send(arg)
except StopIteration as ex:
return ex.args[0]
else:
raise AssertionError('StopIteration was expected')
self.assertEqual(call((1, 2)), (1, 2))
self.assertEqual(call('spam'), 'spam')
def test_corowrapper_weakref(self):
wd = weakref.WeakValueDictionary()
def foo(): yield from []
cw = asyncio.coroutines.CoroWrapper(foo(), foo)
wd['cw'] = cw # Would fail without __weakref__ slot.
cw.gen = None # Suppress warning from __del__.
@unittest.skipUnless(PY34,
'need python 3.4 or later')
def test_log_destroyed_pending_task(self):
@asyncio.coroutine
def kill_me(loop):
future = asyncio.Future(loop=loop)
yield from future
# at this point, the only reference to kill_me() task is
# the Task._wakeup() method in future._callbacks
raise Exception("code never reached")
mock_handler = mock.Mock()
self.loop.set_debug(True)
self.loop.set_exception_handler(mock_handler)
# schedule the task
coro = kill_me(self.loop)
task = asyncio.async(coro, loop=self.loop)
self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), {task})
# execute the task so it waits for future
self.loop._run_once()
self.assertEqual(len(self.loop._ready), 0)
# remove the future used in kill_me(), and references to the task
del coro.gi_frame.f_locals['future']
coro = None
source_traceback = task._source_traceback
task = None
# no more reference to kill_me() task: the task is destroyed by the GC
support.gc_collect()
self.assertEqual(asyncio.Task.all_tasks(loop=self.loop), set())
mock_handler.assert_called_with(self.loop, {
'message': 'Task was destroyed but it is pending!',
'task': mock.ANY,
'source_traceback': source_traceback,
})
mock_handler.reset_mock()
@mock.patch('asyncio.coroutines.logger')
def test_coroutine_never_yielded(self, m_log):
debug = asyncio.coroutines._DEBUG
try:
asyncio.coroutines._DEBUG = True
@asyncio.coroutine
def coro_noop():
pass
finally:
asyncio.coroutines._DEBUG = debug
tb_filename = __file__
tb_lineno = sys._getframe().f_lineno + 2
# create a coroutine object but don't use it
coro_noop()
support.gc_collect()
self.assertTrue(m_log.error.called)
message = m_log.error.call_args[0][0]
func_filename, func_lineno = test_utils.get_function_source(coro_noop)
regex = (r'^<CoroWrapper %s\(\) .* at %s:%s, .*> was never yielded from\n'
r'Coroutine object created at \(most recent call last\):\n'
r'.*\n'
r' File "%s", line %s, in test_coroutine_never_yielded\n'
r' coro_noop\(\)$'
% (re.escape(coro_noop.__qualname__),
re.escape(func_filename), func_lineno,
re.escape(tb_filename), tb_lineno))
self.assertRegex(message, re.compile(regex, re.DOTALL))
def test_task_source_traceback(self):
self.loop.set_debug(True)
task = asyncio.Task(coroutine_function(), loop=self.loop)
lineno = sys._getframe().f_lineno - 1
self.assertIsInstance(task._source_traceback, list)
self.assertEqual(task._source_traceback[-1][:3],
(__file__,
lineno,
'test_task_source_traceback'))
self.loop.run_until_complete(task)
class GatherTestsBase:
def setUp(self):
self.one_loop = self.new_test_loop()
self.other_loop = self.new_test_loop()
self.set_event_loop(self.one_loop, cleanup=False)
def _run_loop(self, loop):
while loop._ready:
test_utils.run_briefly(loop)
def _check_success(self, **kwargs):
a, b, c = [asyncio.Future(loop=self.one_loop) for i in range(3)]
fut = asyncio.gather(*self.wrap_futures(a, b, c), **kwargs)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
b.set_result(1)
a.set_result(2)
self._run_loop(self.one_loop)
self.assertEqual(cb.called, False)
self.assertFalse(fut.done())
c.set_result(3)
self._run_loop(self.one_loop)
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [2, 1, 3])
def test_success(self):
self._check_success()
self._check_success(return_exceptions=False)
def test_result_exception_success(self):
self._check_success(return_exceptions=True)
def test_one_exception(self):
a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d, e))
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
a.set_result(1)
b.set_exception(exc)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertIs(fut.exception(), exc)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_return_exceptions(self):
a, b, c, d = [asyncio.Future(loop=self.one_loop) for i in range(4)]
fut = asyncio.gather(*self.wrap_futures(a, b, c, d),
return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
exc = ZeroDivisionError()
exc2 = RuntimeError()
b.set_result(1)
c.set_exception(exc)
a.set_result(3)
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_exception(exc2)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertEqual(fut.result(), [3, 1, exc, exc2])
def test_env_var_debug(self):
code = '\n'.join((
'import asyncio.coroutines',
'print(asyncio.coroutines._DEBUG)'))
# Test with -E to not fail if the unit test was run with
# PYTHONASYNCIODEBUG set to a non-empty string
sts, stdout, stderr = assert_python_ok('-E', '-c', code)
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='')
self.assertEqual(stdout.rstrip(), b'False')
sts, stdout, stderr = assert_python_ok('-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'True')
sts, stdout, stderr = assert_python_ok('-E', '-c', code,
PYTHONASYNCIODEBUG='1')
self.assertEqual(stdout.rstrip(), b'False')
class FutureGatherTests(GatherTestsBase, test_utils.TestCase):
def wrap_futures(self, *futures):
return futures
def _check_empty_sequence(self, seq_or_iter):
asyncio.set_event_loop(self.one_loop)
self.addCleanup(asyncio.set_event_loop, None)
fut = asyncio.gather(*seq_or_iter)
self.assertIsInstance(fut, asyncio.Future)
self.assertIs(fut._loop, self.one_loop)
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
self.assertEqual(fut.result(), [])
fut = asyncio.gather(*seq_or_iter, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
def test_constructor_empty_sequence(self):
self._check_empty_sequence([])
self._check_empty_sequence(())
self._check_empty_sequence(set())
self._check_empty_sequence(iter(""))
def test_constructor_heterogenous_futures(self):
fut1 = asyncio.Future(loop=self.one_loop)
fut2 = asyncio.Future(loop=self.other_loop)
with self.assertRaises(ValueError):
asyncio.gather(fut1, fut2)
with self.assertRaises(ValueError):
asyncio.gather(fut1, loop=self.other_loop)
def test_constructor_homogenous_futures(self):
children = [asyncio.Future(loop=self.other_loop) for i in range(3)]
fut = asyncio.gather(*children)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
fut = asyncio.gather(*children, loop=self.other_loop)
self.assertIs(fut._loop, self.other_loop)
self._run_loop(self.other_loop)
self.assertFalse(fut.done())
def test_one_cancellation(self):
a, b, c, d, e = [asyncio.Future(loop=self.one_loop) for i in range(5)]
fut = asyncio.gather(a, b, c, d, e)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
b.cancel()
self._run_loop(self.one_loop)
self.assertTrue(fut.done())
cb.assert_called_once_with(fut)
self.assertFalse(fut.cancelled())
self.assertIsInstance(fut.exception(), asyncio.CancelledError)
# Does nothing
c.set_result(3)
d.cancel()
e.set_exception(RuntimeError())
e.exception()
def test_result_exception_one_cancellation(self):
a, b, c, d, e, f = [asyncio.Future(loop=self.one_loop)
for i in range(6)]
fut = asyncio.gather(a, b, c, d, e, f, return_exceptions=True)
cb = test_utils.MockCallback()
fut.add_done_callback(cb)
a.set_result(1)
zde = ZeroDivisionError()
b.set_exception(zde)
c.cancel()
self._run_loop(self.one_loop)
self.assertFalse(fut.done())
d.set_result(3)
e.cancel()
rte = RuntimeError()
f.set_exception(rte)
res = self.one_loop.run_until_complete(fut)
self.assertIsInstance(res[2], asyncio.CancelledError)
self.assertIsInstance(res[4], asyncio.CancelledError)
res[2] = res[4] = None
self.assertEqual(res, [1, zde, None, 3, None, rte])
cb.assert_called_once_with(fut)
class CoroutineGatherTests(GatherTestsBase, test_utils.TestCase):
def setUp(self):
super().setUp()
asyncio.set_event_loop(self.one_loop)
def wrap_futures(self, *futures):
coros = []
for fut in futures:
@asyncio.coroutine
def coro(fut=fut):
return (yield from fut)
coros.append(coro())
return coros
def test_constructor_loop_selection(self):
@asyncio.coroutine
def coro():
return 'abc'
gen1 = coro()
gen2 = coro()
fut = asyncio.gather(gen1, gen2)
self.assertIs(fut._loop, self.one_loop)
self.one_loop.run_until_complete(fut)
self.set_event_loop(self.other_loop, cleanup=False)
gen3 = coro()
gen4 = coro()
fut2 = asyncio.gather(gen3, gen4, loop=self.other_loop)
self.assertIs(fut2._loop, self.other_loop)
self.other_loop.run_until_complete(fut2)
def test_duplicate_coroutines(self):
@asyncio.coroutine
def coro(s):
return s
c = coro('abc')
fut = asyncio.gather(c, c, coro('def'), c, loop=self.one_loop)
self._run_loop(self.one_loop)
self.assertEqual(fut.result(), ['abc', 'abc', 'def', 'abc'])
def test_cancellation_broadcast(self):
# Cancelling outer() cancels all children.
proof = 0
waiter = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def inner():
nonlocal proof
yield from waiter
proof += 1
child1 = asyncio.async(inner(), loop=self.one_loop)
child2 = asyncio.async(inner(), loop=self.one_loop)
gatherer = None
@asyncio.coroutine
def outer():
nonlocal proof, gatherer
gatherer = asyncio.gather(child1, child2, loop=self.one_loop)
yield from gatherer
proof += 100
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
self.assertTrue(f.cancel())
with self.assertRaises(asyncio.CancelledError):
self.one_loop.run_until_complete(f)
self.assertFalse(gatherer.cancel())
self.assertTrue(waiter.cancelled())
self.assertTrue(child1.cancelled())
self.assertTrue(child2.cancelled())
test_utils.run_briefly(self.one_loop)
self.assertEqual(proof, 0)
def test_exception_marking(self):
# Test for the first line marked "Mark exception retrieved."
@asyncio.coroutine
def inner(f):
yield from f
raise RuntimeError('should not be ignored')
a = asyncio.Future(loop=self.one_loop)
b = asyncio.Future(loop=self.one_loop)
@asyncio.coroutine
def outer():
yield from asyncio.gather(inner(a), inner(b), loop=self.one_loop)
f = asyncio.async(outer(), loop=self.one_loop)
test_utils.run_briefly(self.one_loop)
a.set_result(None)
test_utils.run_briefly(self.one_loop)
b.set_result(None)
test_utils.run_briefly(self.one_loop)
self.assertIsInstance(f.exception(), RuntimeError)
if __name__ == '__main__':
unittest.main()
|
eespaillat/RoommateManagerApp
|
refs/heads/master
|
roommatemanager/api/test/api/test_api_accept_decline_invitations.py
|
1
|
from django.test import TestCase, RequestFactory
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, AnonymousUser
from roommatemanager.api.models import Group, GroupProfile
from roommatemanager.api import api
import json
class DemoteAdminTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(
username='test', email='test@test.com', password='testing123')
self.group = Group.objects.create(group_name="test_group")
self.group_profile = GroupProfile.objects.create(user=self.user, group=self.group, accepted=True, admin=True)
self.user2 = User.objects.create_user(
username='test2', email='test2@test.com', password='testing123')
self.group_profile_2 = GroupProfile.objects.create(user=self.user2, group=self.group)
def test_accept_invitation_unauthenticated_user_fails(self):
request = self.factory.post(reverse("accept_invitation", kwargs={"group_id": self.group.pk}))
request.user = AnonymousUser()
response = api.accept_invitation(request, group_id=self.group.pk)
self.assertEqual(response.status_code, 403)
def test_decline_invitation_unauthenticated_user_fails(self):
request = self.factory.post(reverse("decline_invitation", kwargs={"group_id": self.group.pk}))
request.user = AnonymousUser()
response = api.decline_invitation(request, group_id=self.group.pk)
self.assertEqual(response.status_code, 403)
def test_accept_invitation(self):
request = self.factory.post(reverse("accept_invitation", kwargs={"group_id": self.group.pk}))
request.user = self.user2
response = api.accept_invitation(request, group_id=self.group.pk)
self.assertEqual(response.status_code, 200)
gp = GroupProfile.objects.get(group=self.group, user=self.user2)
self.assertTrue(gp.accepted)
def test_decline_invitation(self):
request = self.factory.post(reverse("accept_invitation", kwargs={"group_id": self.group.pk}))
request.user = self.user2
response = api.decline_invitation(request, group_id=self.group.pk)
self.assertEqual(response.status_code, 200)
try:
gp = GroupProfile.objects.get(group=self.group, user=self.user2)
except GroupProfile.DoesNotExist:
gp = None
self.assertTrue(gp is None)
|
sombree/android_external_skia
|
refs/heads/cm-12.1
|
tools/compare_codereview.py
|
163
|
#!/usr/bin/python2
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Skia's Chromium Codereview Comparison Script.
This script takes two Codereview URLs, looks at the trybot results for
the two codereviews and compares the results.
Usage:
compare_codereview.py CONTROL_URL ROLL_URL
"""
import collections
import os
import re
import sys
import urllib2
import HTMLParser
class CodeReviewHTMLParser(HTMLParser.HTMLParser):
"""Parses CodeReview web page.
Use the CodeReviewHTMLParser.parse static function to make use of
this class.
This uses the HTMLParser class because it's the best thing in
Python's standard library. We need a little more power than a
regex. [Search for "You can't parse [X]HTML with regex." for more
information.
"""
# pylint: disable=I0011,R0904
@staticmethod
def parse(url):
"""Parses a CodeReview web pages.
Args:
url (string), a codereview URL like this:
'https://codereview.chromium.org/?????????'.
Returns:
A dictionary; the keys are bot_name strings, the values
are CodeReviewHTMLParser.Status objects
"""
parser = CodeReviewHTMLParser()
try:
parser.feed(urllib2.urlopen(url).read())
except (urllib2.URLError,):
print >> sys.stderr, 'Error getting', url
return None
parser.close()
return parser.statuses
# namedtuples are like lightweight structs in Python. The low
# overhead of a tuple, but the ease of use of an object.
Status = collections.namedtuple('Status', ['status', 'url'])
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self._id = None
self._status = None
self._href = None
self._anchor_data = ''
self._currently_parsing_trybotdiv = False
# statuses is a dictionary of CodeReviewHTMLParser.Status
self.statuses = {}
def handle_starttag(self, tag, attrs):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the start of a tag
(e.g. <div id="main">).
The tag argument is the name of the tag converted to lower
case. The attrs argument is a list of (name, value) pairs
containing the attributes found inside the tag's <>
brackets. The name will be translated to lower case, and
quotes in the value have been removed, and character and
entity references have been replaced.
For instance, for the tag <A HREF="http://www.cwi.nl/">, this
method would be called as handle_starttag('a', [('href',
'http://www.cwi.nl/')]).
[[end standard library documentation]]
"""
attrs = dict(attrs)
if tag == 'div':
# We are looking for <div id="tryjobdiv*">.
id_attr = attrs.get('id','')
if id_attr.startswith('tryjobdiv'):
self._id = id_attr
if (self._id and tag == 'a'
and 'build-result' in attrs.get('class', '').split()):
# If we are already inside a <div id="tryjobdiv*">, we
# look for a link if the form
# <a class="build-result" href="*">. Then we save the
# (non-standard) status attribute and the URL.
self._status = attrs.get('status')
self._href = attrs.get('href')
self._currently_parsing_trybotdiv = True
# Start saving anchor data.
def handle_data(self, data):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to process arbitrary data (e.g. text
nodes and the content of <script>...</script> and
<style>...</style>).
[[end standard library documentation]]
"""
# Save the text inside the <a></a> tags. Assume <a> tags
# aren't nested.
if self._currently_parsing_trybotdiv:
self._anchor_data += data
def handle_endtag(self, tag):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the end tag of an element
(e.g. </div>). The tag argument is the name of the tag
converted to lower case.
[[end standard library documentation]]
"""
if tag == 'a' and self._status:
# We take the accumulated self._anchor_data and save it as
# the bot name.
bot = self._anchor_data.strip()
stat = CodeReviewHTMLParser.Status(status=self._status,
url=self._href)
if bot:
# Add to accumulating dictionary.
self.statuses[bot] = stat
# Reset state to search for the next bot.
self._currently_parsing_trybotdiv = False
self._anchor_data = ''
self._status = None
self._href = None
class BuilderHTMLParser(HTMLParser.HTMLParser):
"""parses Trybot web pages.
Use the BuilderHTMLParser.parse static function to make use of
this class.
This uses the HTMLParser class because it's the best thing in
Python's standard library. We need a little more power than a
regex. [Search for "You can't parse [X]HTML with regex." for more
information.
"""
# pylint: disable=I0011,R0904
@staticmethod
def parse(url):
"""Parses a Trybot web page.
Args:
url (string), a trybot result URL.
Returns:
An array of BuilderHTMLParser.Results, each a description
of failure results, along with an optional url
"""
parser = BuilderHTMLParser()
try:
parser.feed(urllib2.urlopen(url).read())
except (urllib2.URLError,):
print >> sys.stderr, 'Error getting', url
return []
parser.close()
return parser.failure_results
Result = collections.namedtuple('Result', ['text', 'url'])
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.failure_results = []
self._current_failure_result = None
self._divlevel = None
self._li_level = 0
self._li_data = ''
self._current_failure = False
self._failure_results_url = ''
def handle_starttag(self, tag, attrs):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the start of a tag
(e.g. <div id="main">).
The tag argument is the name of the tag converted to lower
case. The attrs argument is a list of (name, value) pairs
containing the attributes found inside the tag's <>
brackets. The name will be translated to lower case, and
quotes in the value have been removed, and character and
entity references have been replaced.
For instance, for the tag <A HREF="http://www.cwi.nl/">, this
method would be called as handle_starttag('a', [('href',
'http://www.cwi.nl/')]).
[[end standard library documentation]]
"""
attrs = dict(attrs)
if tag == 'li':
# <li> tags can be nested. So we have to count the
# nest-level for backing out.
self._li_level += 1
return
if tag == 'div' and attrs.get('class') == 'failure result':
# We care about this sort of thing:
# <li>
# <li>
# <li>
# <div class="failure result">...</div>
# </li>
# </li>
# We want this text here.
# </li>
if self._li_level > 0:
self._current_failure = True # Tells us to keep text.
return
if tag == 'a' and self._current_failure:
href = attrs.get('href')
# Sometimes we want to keep the stdio url. We always
# return it, just in case.
if href.endswith('/logs/stdio'):
self._failure_results_url = href
def handle_data(self, data):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to process arbitrary data (e.g. text
nodes and the content of <script>...</script> and
<style>...</style>).
[[end standard library documentation]]
"""
if self._current_failure:
self._li_data += data
def handle_endtag(self, tag):
"""Overrides the HTMLParser method to implement functionality.
[[begin standard library documentation]]
This method is called to handle the end tag of an element
(e.g. </div>). The tag argument is the name of the tag
converted to lower case.
[[end standard library documentation]]
"""
if tag == 'li':
self._li_level -= 1
if 0 == self._li_level:
if self._current_failure:
result = self._li_data.strip()
first = result.split()[0]
if first:
result = re.sub(
r'^%s(\s+%s)+' % (first, first), first, result)
# Sometimes, it repeats the same thing
# multiple times.
result = re.sub(r'unexpected flaky.*', '', result)
# Remove some extra unnecessary text.
result = re.sub(r'\bpreamble\b', '', result)
result = re.sub(r'\bstdio\b', '', result)
url = self._failure_results_url
self.failure_results.append(
BuilderHTMLParser.Result(result, url))
self._current_failure_result = None
# Reset the state.
self._current_failure = False
self._li_data = ''
self._failure_results_url = ''
def printer(indent, string):
"""Print indented, wrapped text.
"""
def wrap_to(line, columns):
"""Wrap a line to the given number of columns, return a list
of strings.
"""
ret = []
nextline = ''
for word in line.split():
if nextline:
if len(nextline) + 1 + len(word) > columns:
ret.append(nextline)
nextline = word
else:
nextline += (' ' + word)
else:
nextline = word
if nextline:
ret.append(nextline)
return ret
out = sys.stdout
spacer = ' '
for line in string.split('\n'):
for i, wrapped_line in enumerate(wrap_to(line, 68 - (2 * indent))):
out.write(spacer * indent)
if i > 0:
out.write(spacer)
out.write(wrapped_line)
out.write('\n')
out.flush()
def main(control_url, roll_url, verbosity=1):
"""Compare two Codereview URLs
Args:
control_url, roll_url: (strings) URL of the format
https://codereview.chromium.org/?????????
verbosity: (int) verbose level. 0, 1, or 2.
"""
# pylint: disable=I0011,R0914,R0912
control = CodeReviewHTMLParser.parse(control_url)
roll = CodeReviewHTMLParser.parse(roll_url)
all_bots = set(control) & set(roll) # Set intersection.
if not all_bots:
print >> sys.stderr, (
'Error: control %s and roll %s have no common trybots.'
% (list(control), list(roll)))
return
control_name = '[control %s]' % control_url.split('/')[-1]
roll_name = '[roll %s]' % roll_url.split('/')[-1]
out = sys.stdout
for bot in sorted(all_bots):
if (roll[bot].status == 'success'):
if verbosity > 1:
printer(0, '==%s==' % bot)
printer(1, 'OK')
continue
if control[bot].status != 'failure' and roll[bot].status != 'failure':
continue
printer(0, '==%s==' % bot)
formatted_results = []
for (status, name, url) in [
(control[bot].status, control_name, control[bot].url),
( roll[bot].status, roll_name, roll[bot].url)]:
lines = []
if status == 'failure':
results = BuilderHTMLParser.parse(url)
for result in results:
formatted_result = re.sub(r'(\S*\.html) ', '\n__\g<1>\n', result.text)
# Strip runtimes.
formatted_result = re.sub(r'\(.*\)', '', formatted_result)
lines.append((2, formatted_result))
if ('compile' in result.text or '...and more' in result.text):
lines.append((3, re.sub('/[^/]*$', '/', url) + result.url))
formatted_results.append(lines)
identical = formatted_results[0] == formatted_results[1]
for (formatted_result, (status, name, url)) in zip(
formatted_results,
[(control[bot].status, control_name, control[bot].url),
(roll[bot].status, roll_name, roll[bot].url)]):
if status != 'failure' and not identical:
printer(1, name)
printer(2, status)
elif status == 'failure':
if identical:
printer(1, control_name + ' and ' + roll_name + ' failed identically')
else:
printer(1, name)
for (indent, line) in formatted_result:
printer(indent, line)
if identical:
break
out.write('\n')
if verbosity > 0:
# Print out summary of all of the bots.
out.write('%11s %11s %4s %s\n\n' %
('CONTROL', 'ROLL', 'DIFF', 'BOT'))
for bot in sorted(all_bots):
if roll[bot].status == 'success':
diff = ''
elif (control[bot].status == 'success' and
roll[bot].status == 'failure'):
diff = '!!!!'
elif ('pending' in control[bot].status or
'pending' in roll[bot].status):
diff = '....'
else:
diff = '****'
out.write('%11s %11s %4s %s\n' % (
control[bot].status, roll[bot].status, diff, bot))
out.write('\n')
out.flush()
if __name__ == '__main__':
if len(sys.argv) < 3:
print >> sys.stderr, __doc__
exit(1)
main(sys.argv[1], sys.argv[2],
int(os.environ.get('COMPARE_CODEREVIEW_VERBOSITY', 1)))
|
syci/website-addons
|
refs/heads/8.0
|
website_sale_search_custom/__openerp__.py
|
3
|
{
'name': "Custom website search",
'summary': 'Check WARNING before installing!',
'version': '1.0.0',
'author': 'Ivan Yelizariev',
'category': 'Custom',
'website': 'https://yelizariev.github.io',
'depends': ['website_sale', 'product_tags'],
'data': [
'views.xml',
],
'installable': True
}
|
cctaylor/googleads-python-lib
|
refs/heads/master
|
examples/adwords/v201506/reporting/download_criteria_report_as_string_with_awql.py
|
3
|
#!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example downloads a criteria performance report as a string with AWQL.
To get report fields, run get_report_fields.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: ReportDefinitionService.mutate
Api: AdWordsOnly
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
from googleads import adwords
# Specify where to download the file here.
PATH = '/tmp/report_download.csv'
def main(client):
# Initialize appropriate service.
report_downloader = client.GetReportDownloader(version='v201506')
# Create report query.
report_query = ('SELECT CampaignId, AdGroupId, Id, Criteria, CriteriaType, '
'FinalUrls, Impressions, Clicks, Cost '
'FROM CRITERIA_PERFORMANCE_REPORT '
'WHERE Status IN [ENABLED, PAUSED] '
'DURING LAST_7_DAYS')
print report_downloader.DownloadReportAsStringWithAwql(
report_query, 'CSV', skip_report_header=False, skip_column_header=False,
skip_report_summary=False, include_zero_impressions=False)
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
|
chrisnorman7/MyGui
|
refs/heads/master
|
MyGui/menu.py
|
1
|
import wx
class Menu(wx.Menu):
"""Generic menu, makes adding stuff a bit easier."""
def __init__(self, frame, name):
"""Creates the menu and sets the frame it'll be bound too as well as it's name."""
self.Frame = frame
self.Name = name
def EasyAppend(self, function, text, help, id = wx.NewId()):
self.frame.Bind(wx.EVT_MENU, function, self.Append(id, text, help))
return id
|
damonkohler/sl4a
|
refs/heads/master
|
python/src/Demo/scripts/pi.py
|
32
|
#! /usr/bin/env python
# Print digits of pi forever.
#
# The algorithm, using Python's 'long' integers ("bignums"), works
# with continued fractions, and was conceived by Lambert Meertens.
#
# See also the ABC Programmer's Handbook, by Geurts, Meertens & Pemberton,
# published by Prentice-Hall (UK) Ltd., 1990.
import sys
def main():
k, a, b, a1, b1 = 2L, 4L, 1L, 12L, 4L
while 1:
# Next approximation
p, q, k = k*k, 2L*k+1L, k+1L
a, b, a1, b1 = a1, b1, p*a+q*a1, p*b+q*b1
# Print common digits
d, d1 = a//b, a1//b1
while d == d1:
output(d)
a, a1 = 10L*(a%b), 10L*(a1%b1)
d, d1 = a//b, a1//b1
def output(d):
# Use write() to avoid spaces between the digits
# Use str() to avoid the 'L'
sys.stdout.write(str(d))
# Flush so the output is seen immediately
sys.stdout.flush()
if __name__ == "__main__":
main()
|
hazrpg/calibre
|
refs/heads/master
|
src/calibre/ebooks/lit/lzx.py
|
24
|
'''
LZX compression/decompression wrapper.
'''
from __future__ import with_statement
__license__ = 'GPL v3'
__copyright__ = '2008, Marshall T. Vandegrift <llasram@gmail.com>'
from calibre import plugins
_lzx, _error = plugins['lzx']
if _lzx is None:
raise RuntimeError('Failed to load the lzx plugin: %s' % _error)
__all__ = ['Compressor', 'Decompressor', 'LZXError']
LZXError = _lzx.LZXError
Compressor = _lzx.Compressor
class Decompressor(object):
def __init__(self, wbits):
self.wbits = wbits
self.blocksize = 1 << wbits
_lzx.init(wbits)
def decompress(self, data, outlen):
return _lzx.decompress(data, outlen)
def reset(self):
return _lzx.reset()
|
aparo/django-nonrel
|
refs/heads/master
|
django/core/serializers/__init__.py
|
36
|
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_query_set)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv" : "path.to.csv.serializer",
"txt" : "path.to.txt.serializer",
}
"""
from django.conf import settings
from django.utils import importlib
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml" : "django.core.serializers.xml_serializer",
"python" : "django.core.serializers.python",
"json" : "django.core.serializers.json",
}
# Check for PyYaml and register the serializer if it's available.
try:
import yaml
BUILTIN_SERIALIZERS["yaml"] = "django.core.serializers.pyyaml"
except ImportError:
pass
_serializers = {}
def register_serializer(format, serializer_module, serializers=None):
""""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
module = importlib.import_module(serializer_module)
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return _serializers.keys()
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.iteritems() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Returns an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is a instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
|
drglove/SickRage
|
refs/heads/master
|
lib/adba/aniDBmaper.py
|
70
|
#!/usr/bin/env python
#
# This file is part of aDBa.
#
# aDBa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# aDBa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with aDBa. If not, see <http://www.gnu.org/licenses/>.
from random import shuffle
class AniDBMaper:
blacklist = ('unused','retired','reserved')
def getAnimeBitsA(self,amask):
map = self.getAnimeMapA()
return self._getBitChain(map,amask)
def getAnimeCodesA(self,aBitChain):
amap = self.getAnimeMapA()
return self._getCodes(amap,aBitChain)
def getFileBitsF(self,fmask):
fmap = self.getFileMapF()
return self._getBitChain(fmap,fmask)
def getFileCodesF(self,bitChainF):
fmap = self.getFileMapF()
return self._getCodes(fmap,bitChainF)
def getFileBitsA(self,amask):
amap = self.getFileMapA()
return self._getBitChain(amap,amask)
def getFileCodesA(self,bitChainA):
amap = self.getFileMapA()
return self._getCodes(amap,bitChainA)
def _getBitChain(self,map,wanted):
"""Return an hex string with the correct bit set corresponding to the wanted fields in the map
"""
bit = 0
for index,field in enumerate(map):
if field in wanted and not field in self.blacklist:
bit = bit ^ (1<<len(map)-index-1)
bit = str(hex(bit)).lstrip("0x").rstrip("L")
bit = ''.join(["0" for unused in xrange(len(map)/4 - len(bit))])+bit
return bit
def _getCodes(self,map,bitChain):
"""Returns a list with the corresponding fields as set in the bitChain (hex string)
"""
codeList=[]
bitChain = int(bitChain,16)
mapLength = len(map)
for i in reversed(range(mapLength)):
if bitChain&(2**i):
codeList.append(map[mapLength-i-1])
return codeList
def getAnimeMapA(self):
# each line is one byte
# only chnage this if the api changes
map = ['aid','unused','year','type','related_aid_list','related_aid_type','category_list','category_weight_list',
'romaji_name','kanji_name','english_name','other_name','short_name_list','synonym_list','retired','retired',
'episodes','highest_episode_number','special_ep_count','air_date','end_date','url','picname','category_id_list',
'rating','vote_count','temp_rating','temp_vote_count','average_review_rating','review_count','award_list','is_18_restricted',
'anime_planet_id','ANN_id','allcinema_id','AnimeNfo_id','unused','unused','unused','date_record_updated',
'character_id_list','creator_id_list','main_creator_id_list','main_creator_name_list','unused','unused','unused','unused',
'specials_count','credits_count','other_count','trailer_count','parody_count','unused','unused','unused']
return map
def getFileMapF(self):
# each line is one byte
# only chnage this if the api changes
map = ['unused','aid','eid','gid','mylist_id','list_other_episodes','IsDeprecated','state',
'size','ed2k','md5','sha1','crc32','unused','unused','reserved',
'quality','source','audio_codec_list','audio_bitrate_list','video_codec','video_bitrate','video_resolution','file_type_extension',
'dub_language','sub_language','length_in_seconds','description','aired_date','unused','unused','anidb_file_name',
'mylist_state','mylist_filestate','mylist_viewed','mylist_viewdate','mylist_storage','mylist_source','mylist_other','unused']
return map
def getFileMapA(self):
# each line is one byte
# only chnage this if the api changes
map = ['anime_total_episodes','highest_episode_number','year','type','related_aid_list','related_aid_type','category_list','reserved',
'romaji_name','kanji_name','english_name','other_name','short_name_list','synonym_list','retired','retired',
'epno','ep_name','ep_romaji_name','ep_kanji_name','episode_rating','episode_vote_count','unused','unused',
'group_name','group_short_name','unused','unused','unused','unused','unused','date_aid_record_updated']
return map
def checkMapping(self,verbos=False):
print "------"
print "File F: "+ str(self.checkMapFileF(verbos))
print "------"
print "File A: "+ str(self.checkMapFileA(verbos))
def checkMapFileF(self,verbos=False):
getGeneralMap = lambda: self.getFileMapF()
getBits = lambda x: self.getFileBitsF(x)
getCodes = lambda x: self.getFileCodesF(x)
return self._checkMapGeneral(getGeneralMap,getBits,getCodes,verbos=verbos)
def checkMapFileA(self,verbos=False):
getGeneralMap = lambda: self.getFileMapA()
getBits = lambda x: self.getFileBitsA(x)
getCodes = lambda x: self.getFileCodesA(x)
return self._checkMapGeneral(getGeneralMap,getBits,getCodes,verbos=verbos)
def _checkMapGeneral(self,getGeneralMap,getBits,getCodes,verbos=False):
map = getGeneralMap()
shuffle(map)
mask = [elem for elem in map if elem not in self.blacklist][:5]
bits = getBits(mask)
mask_re = getCodes(bits)
bits_re = getBits(mask_re)
if verbos:
print mask
print mask_re
print bits
print bits_re
print "bits are:"+ str((bits_re == bits))
print "map is :"+ str((sorted(mask_re) == sorted(mask)))
return (bits_re == bits) and sorted(mask_re) == sorted(mask)
|
scality/cinder
|
refs/heads/master
|
cinder/tests/unit/test_hitachi_hbsd_snm2_iscsi.py
|
20
|
# Copyright (C) 2014, Hitachi, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
Self test for Hitachi Block Storage Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.hitachi import hbsd_basiclib
from cinder.volume.drivers.hitachi import hbsd_common
from cinder.volume.drivers.hitachi import hbsd_iscsi
from cinder.volume.drivers.hitachi import hbsd_snm2
def _exec_hsnm(*args, **kargs):
return HBSDSNM2ISCSIDriverTest.hsnm_vals.get(args)
def _exec_hsnm_init(*args, **kargs):
return HBSDSNM2ISCSIDriverTest.hsnm_vals_init.get(args)
class HBSDSNM2ISCSIDriverTest(test.TestCase):
"""Test HBSDSNM2ISCSIDriver."""
audppool_result = " DP RAID \
Current Utilization Current Over Replication\
Available Current Replication Rotational \
\
Stripe \
Needing Preparation\n\
Pool Tier Mode Level Total Capacity Consumed Capacity \
Percent Provisioning Percent Capacity \
Utilization Percent Type Speed Encryption Status \
\
Reconstruction Progress Size Capacity\n\
30 Disable 1( 1D+1D) 532.0 GB 2.0 GB \
1% 24835% 532.0 GB \
1% SAS 10000rpm N/A Normal \
N/A \
256KB 0.0 GB"
aureplicationlocal_result = "Pair Name LUN Pair \
LUN Status Copy Type Group \
Point-in-Time MU Number\n\
0 10 0 Split( 99%) \
ShadowImage ---:Ungrouped N/A\
"
auluref_result = " Stripe RAID DP Tier \
RAID Rotational Number\n\
LU Capacity Size Group Pool Mode Level Type\
Speed of Paths Status\n\
0 2097152 blocks 256KB 0 0 Enable 0 Normal"
auhgwwn_result = "Port 00 Host Group Security ON\n Detected WWN\n \
Name Port Name Host Group\n\
HBSD-00 10000000C97BCE7A 001:HBSD-01\n\
Assigned WWN\n Name Port Name \
Host Group\n abcdefg 10000000C97BCE7A \
001:HBSD-01"
autargetini_result = "Port 00 Target Security ON\n\
Target Name \
iSCSI Name\n\
001:HBSD-01 \
iqn"
autargetini_result2 = "Port 00 Target Security ON\n\
Target Name \
iSCSI Name"
autargetmap_result = "Mapping Mode = ON\n\
Port Target H-LUN LUN\n\
00 001:HBSD-01 0 1000"
auiscsi_result = "Port 00\n\
Port Number : 3260\n\
Keep Alive Timer[sec.] : 60\n\
MTU : 1500\n\
Transfer Rate : 1Gbps\n\
Link Status : Link Up\n\
Ether Address : 00:00:87:33:D1:3E\n\
IPv4\n\
IPv4 Address : 192.168.0.1\n\
IPv4 Subnet Mask : 255.255.252.0\n\
IPv4 Default Gateway : 0.0.0.0\n\
IPv6 Status : Disable\n\
Connecting Hosts : 0\n\
Result : Normal\n\
VLAN Status : Disable\n\
VLAN ID : N/A\n\
Header Digest : Enable\n\
Data Digest : Enable\n\
Window Scale : Disable"
autargetdef_result = "Port 00\n\
Authentication Mutual\n\
Target Method CHAP Algorithm \
Authentication\n\
001:T000 None --- ---\n\
User Name : ---\n\
iSCSI Name : iqn-target"
hsnm_vals = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, "", ""],
('aureplicationlocal',
'-unit None -create -si -pvol 3 -svol 1 -compsplit -pace normal'):
[1, "", ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -refer -pvol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -refer -svol 3'):
[1, "", "DMEC002015"],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, "", ""],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 1'):
[1, "", ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, "", ""],
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
('auludel', '-unit None -lu 3 -f'): [1, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 256g'): [1, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('auluref', '-unit None -lu 0'): [0, "%s" % auluref_result, ""],
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
('autargetini', '-unit None -refer'):
[0, "%s" % autargetini_result, ""],
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
[0, "", ""],
('autargetmap', '-unit None -refer'):
[0, "%s" % autargetmap_result, ""],
('autargetdef',
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
-authmethod None'):
[0, "", ""],
('autargetdef', '-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 \
-iname iqnX.target -authmethod None'):
[1, "", ""],
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
-ReportFullPortalList enable'):
[0, "", ""],
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
('autargetdef', '-unit None -refer'):
[0, "%s" % autargetdef_result, ""]}
hsnm_vals_init = {
('audppool', '-unit None -refer -g'): [0, "%s" % audppool_result, ""],
('aureplicationlocal',
'-unit None -create -si -pvol 1 -svol 1 -compsplit -pace normal'):
[0, 0, ""],
('aureplicationlocal', '-unit None -refer -pvol 1'):
[0, "%s" % aureplicationlocal_result, ""],
('aureplicationlocal', '-unit None -simplex -si -pvol 1 -svol 0'):
[0, 0, ""],
('auluchgsize', '-unit None -lu 1 -size 256g'):
[0, 0, ""],
('auludel', '-unit None -lu 1 -f'): [0, "", ""],
('auluadd', '-unit None -lu 1 -dppoolno 30 -size 128g'): [0, "", ""],
('auluref', '-unit None'): [0, "%s" % auluref_result, ""],
('autargetmap', '-unit None -add 0 0 1 1 1'): [0, "", ""],
('autargetmap', '-unit None -add 0 0 0 0 1'): [0, "", ""],
('autargetini', '-unit None -refer'):
[0, "%s" % autargetini_result2, ""],
('autargetini', '-unit None -add 0 0 -tno 0 -iname iqn'):
[0, "", ""],
('autargetmap', '-unit None -refer'):
[0, "%s" % autargetmap_result, ""],
('autargetdef',
'-unit None -add 0 0 -tno 0 -talias HBSD-0.0.0.0 -iname iqn.target \
-authmethod None'):
[0, "", ""],
('autargetopt', '-unit None -set 0 0 -talias HBSD-0.0.0.0 \
-ReportFullPortalList enable'):
[0, "", ""],
('auiscsi', '-unit None -refer'): [0, "%s" % auiscsi_result, ""],
('autargetdef', '-unit None -refer'):
[0, "%s" % autargetdef_result, ""],
('auman', '-help'):
[0, "Version 27.50", ""]}
# The following information is passed on to tests, when creating a volume
_VOLUME = {'size': 128, 'volume_type': None, 'source_volid': '0',
'provider_location': '1', 'name': 'test',
'id': 'abcdefg', 'snapshot_id': '0', 'status': 'available'}
test_volume = {'name': 'test_volume', 'size': 128,
'id': 'test-volume-0',
'provider_location': '1', 'status': 'available'}
test_volume_error = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '3', 'status': 'available'}
test_volume_error1 = {'name': 'test_volume_error', 'size': 128,
'id': 'test-volume-error',
'provider_location': None, 'status': 'available'}
test_volume_error2 = {'name': 'test_volume_error', 'size': 256,
'id': 'test-volume-error',
'provider_location': '1', 'status': 'available'}
test_volume_error3 = {'name': 'test_volume3', 'size': 128,
'id': 'test-volume3',
'volume_metadata': [{'key': 'type',
'value': 'V-VOL'}],
'provider_location': '1', 'status': 'available'}
test_volume_error4 = {'name': 'test_volume4', 'size': 128,
'id': 'test-volume2',
'provider_location': '3', 'status': 'available'}
test_snapshot = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0', 'volume': _VOLUME,
'provider_location': '1', 'status': 'available'}
test_snapshot_error2 = {'volume_name': 'test', 'size': 128,
'volume_size': 128, 'name': 'test-snap',
'volume_id': 0, 'id': 'test-snap-0',
'volume': test_volume_error,
'provider_location': None, 'status': 'available'}
UNIT_NAME = 'HUS110_91122819'
test_existing_ref = {'ldev': '0', 'unit_name': UNIT_NAME}
test_existing_none_ldev_ref = {'ldev': None, 'unit_name': UNIT_NAME}
test_existing_invalid_ldev_ref = {'ldev': 'AAA', 'unit_name': UNIT_NAME}
test_existing_no_ldev_ref = {'unit_name': UNIT_NAME}
test_existing_none_unit_ref = {'ldev': '0', 'unit_name': None}
test_existing_invalid_unit_ref = {'ldev': '0', 'unit_name': 'Dummy'}
test_existing_no_unit_ref = {'ldev': '0'}
def __init__(self, *args, **kwargs):
super(HBSDSNM2ISCSIDriverTest, self).__init__(*args, **kwargs)
@mock.patch.object(utils, 'brick_get_connector_properties',
return_value={'ip': '0.0.0.0',
'initiator': 'iqn'})
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm',
side_effect=_exec_hsnm_init)
@mock.patch.object(utils, 'execute',
return_value=['', ''])
def setUp(self, args1, arg2, arg3, arg4):
super(HBSDSNM2ISCSIDriverTest, self).setUp()
self._setup_config()
self._setup_driver()
self.driver.check_param()
self.driver.common.create_lock_file()
self.driver.common.command.connect_storage()
self.driver.max_hostgroups = \
self.driver.common.command.get_max_hostgroups()
self.driver.add_hostgroup()
self.driver.output_param_to_log()
self.driver.do_setup_status.set()
def _setup_config(self):
self.configuration = mock.Mock(conf.Configuration)
self.configuration.hitachi_pool_id = 30
self.configuration.hitachi_thin_pool_id = 31
self.configuration.hitachi_target_ports = "00"
self.configuration.hitachi_debug_level = 0
self.configuration.hitachi_serial_number = None
self.configuration.hitachi_unit_name = "None"
self.configuration.hitachi_group_request = True
self.configuration.hitachi_group_range = "0-1"
self.configuration.config_group = "None"
self.configuration.hitachi_ldev_range = "0-100"
self.configuration.hitachi_default_copy_method = 'FULL'
self.configuration.hitachi_copy_check_interval = 1
self.configuration.hitachi_async_copy_check_interval = 1
self.configuration.hitachi_copy_speed = 3
self.configuration.hitachi_auth_method = None
self.configuration.hitachi_auth_user = "HBSD-CHAP-user"
self.configuration.hitachi_auth_password = "HBSD-CHAP-password"
self.configuration.hitachi_add_chap_user = "False"
def _setup_driver(self):
self.driver = hbsd_iscsi.HBSDISCSIDriver(
configuration=self.configuration)
context = None
db = None
self.driver.common = hbsd_common.HBSDCommon(
self.configuration, self.driver, context, db)
self.driver.common.command = hbsd_snm2.HBSDSNM2(self.configuration)
self.driver.common.horcmgr_flock = \
self.driver.common.command.set_horcmgr_flock()
# API test cases
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume(self, arg1, arg2, arg3):
"""test create_volume."""
ret = self.driver.create_volume(self._VOLUME)
vol = self._VOLUME.copy()
vol['provider_location'] = ret['provider_location']
self.assertEqual('1', vol['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_error(self, arg1, arg2, arg3):
"""test create_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_volume,
self.test_volume_error)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats(self, arg1, arg2):
"""test get_volume_stats."""
stats = self.driver.get_volume_stats(True)
self.assertEqual('Hitachi', stats['vendor_name'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_get_volume_stats_error(self, arg1, arg2):
"""test get_volume_stats."""
self.configuration.hitachi_pool_id = 29
stats = self.driver.get_volume_stats(True)
self.assertEqual({}, stats)
self.configuration.hitachi_pool_id = 30
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume(self, arg1, arg2):
"""test extend_volume."""
self.driver.extend_volume(self._VOLUME, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_extend_volume_error(self, arg1, arg2):
"""test extend_volume."""
self.assertRaises(exception.HBSDError, self.driver.extend_volume,
self.test_volume_error3, 256)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume(self, arg1, arg2):
"""test delete_volume."""
self.driver.delete_volume(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_volume_error(self, arg1, arg2):
"""test delete_volume."""
self.assertRaises(exception.HBSDCmdError,
self.driver.delete_volume,
self.test_volume_error4)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
ret = self.driver.create_volume(self._VOLUME)
ret = self.driver.create_snapshot(self.test_snapshot)
self.assertEqual('1', ret['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_snapshot_metadata',
return_value={'dummy_snapshot_meta': 'snapshot_meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_snapshot_error(self, arg1, arg2, arg3, arg4, arg5):
"""test create_snapshot."""
self.assertRaises(exception.HBSDCmdError,
self.driver.create_snapshot,
self.test_snapshot_error2)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_delete_snapshot_error(self, arg1, arg2):
"""test delete_snapshot."""
self.driver.delete_snapshot(self.test_snapshot_error2)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
vol = self.driver.create_volume_from_snapshot(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_create_volume_from_snapshot_error(self, arg1, arg2, arg3):
"""test create_volume_from_snapshot."""
self.assertRaises(exception.HBSDError,
self.driver.create_volume_from_snapshot,
self.test_volume_error2, self.test_snapshot)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=_VOLUME)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
vol = self.driver.create_cloned_volume(self._VOLUME,
self.test_snapshot)
self.assertIsNotNone(vol)
return
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume_metadata',
return_value={'dummy_volume_meta': 'meta'})
@mock.patch.object(hbsd_common.HBSDCommon, 'get_volume',
return_value=test_volume_error1)
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
def test_create_cloned_volume_error(self, arg1, arg2, arg3, arg4):
"""test create_cloned_volume."""
self.assertRaises(exception.HBSDError,
self.driver.create_cloned_volume,
self._VOLUME, self.test_volume_error1)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection(self, arg1, arg2):
"""test initialize connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqn'}
rc = self.driver.initialize_connection(self._VOLUME, connector)
self.assertEqual('iscsi', rc['driver_volume_type'])
self.assertEqual('iqn-target', rc['data']['target_iqn'])
self.assertEqual(1, rc['data']['target_lun'])
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_initialize_connection_error(self, arg1, arg2):
"""test initialize connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqnX'}
self.assertRaises(exception.HBSDError,
self.driver.initialize_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection(self, arg1):
"""test terminate connection."""
connector = {
'wwpns': '0x100000', 'ip': '0.0.0.0', 'initiator':
'iqn'}
self.driver.terminate_connection(self._VOLUME, connector)
return
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_terminate_connection_error(self, arg1):
"""test terminate connection."""
connector = {'ip': '0.0.0.0'}
self.assertRaises(exception.HBSDError,
self.driver.terminate_connection,
self._VOLUME, connector)
return
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_manage_existing(self, arg1, arg2):
rc = self.driver.manage_existing(self._VOLUME, self.test_existing_ref)
self.assertEqual(0, rc['provider_location'])
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
size = self.driver.manage_existing_get_size(self._VOLUME,
self.test_existing_ref)
self.assertEqual(1, size)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_ldev(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_ldev_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_ldev_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_none_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_none_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_invalid_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_invalid_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
@mock.patch.object(hbsd_common.HBSDCommon, '_update_volume_metadata')
def test_manage_existing_get_size_no_unit_ref(self, arg1, arg2, arg3):
self.configuration.hitachi_unit_name = self.UNIT_NAME
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size, self._VOLUME,
self.test_existing_no_unit_ref)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage(self, arg1, arg2):
self.driver.unmanage(self._VOLUME)
@mock.patch.object(hbsd_basiclib, 'get_process_lock')
@mock.patch.object(hbsd_snm2.HBSDSNM2, 'exec_hsnm', side_effect=_exec_hsnm)
def test_unmanage_busy(self, arg1, arg2):
self.assertRaises(exception.HBSDVolumeIsBusy,
self.driver.unmanage, self.test_volume_error3)
|
rcarrillocruz/ansible
|
refs/heads/devel
|
test/units/modules/network/eos/test_eos_user.py
|
37
|
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.eos import eos_user
from .eos_module import TestEosModule, load_fixture, set_module_args
class TestEosUserModule(TestEosModule):
module = eos_user
def setUp(self):
self.mock_get_config = patch('ansible.modules.network.eos.eos_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_user.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_user_create(self):
set_module_args(dict(username='test', nopassword=True))
commands = ['username test nopassword']
self.execute_module(changed=True, commands=commands)
def test_eos_user_delete(self):
set_module_args(dict(username='ansible', state='absent'))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_password(self):
set_module_args(dict(username='ansible', password='test'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_privilege(self):
set_module_args(dict(username='ansible', privilege=15))
commands = ['username ansible privilege 15']
self.execute_module(changed=True, commands=commands)
def test_eos_user_privilege_invalid(self):
set_module_args(dict(username='ansible', privilege=25))
self.execute_module(failed=True)
def test_eos_user_purge(self):
set_module_args(dict(purge=True))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_role(self):
set_module_args(dict(username='ansible', role='test'))
commands = ['username ansible role test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_sshkey(self):
set_module_args(dict(username='ansible', sshkey='test'))
commands = ['username ansible sshkey test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_changed(self):
set_module_args(dict(username='test', password='test', update_password='on_create'))
commands = ['username test secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_on_create_ok(self):
set_module_args(dict(username='ansible', password='test', update_password='on_create'))
self.execute_module()
def test_eos_user_update_password_always(self):
set_module_args(dict(username='ansible', password='test', update_password='always'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
|
enitihas/SAC-Website
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/compat.py
|
2942
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
jakeclawson/linux
|
refs/heads/master
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
hynekcer/django
|
refs/heads/master
|
django/contrib/gis/db/models/fields.py
|
310
|
from django.contrib.gis import forms
from django.contrib.gis.db.models.lookups import gis_lookups
from django.contrib.gis.db.models.proxy import SpatialProxy
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geometry.backend import Geometry, GeometryException
from django.core.exceptions import ImproperlyConfigured
from django.db.models.expressions import Expression
from django.db.models.fields import Field
from django.utils import six
from django.utils.translation import ugettext_lazy as _
# Local cache of the spatial_ref_sys table, which holds SRID data for each
# spatial database alias. This cache exists so that the database isn't queried
# for SRID info each time a distance query is constructed.
_srid_cache = {}
def get_srid_info(srid, connection):
"""
Returns the units, unit name, and spheroid WKT associated with the
given SRID from the `spatial_ref_sys` (or equivalent) spatial database
table for the given database connection. These results are cached.
"""
global _srid_cache
try:
# The SpatialRefSys model for the spatial backend.
SpatialRefSys = connection.ops.spatial_ref_sys()
except NotImplementedError:
# No `spatial_ref_sys` table in spatial backend (e.g., MySQL).
return None, None, None
if connection.alias not in _srid_cache:
# Initialize SRID dictionary for database if it doesn't exist.
_srid_cache[connection.alias] = {}
if srid not in _srid_cache[connection.alias]:
# Use `SpatialRefSys` model to query for spatial reference info.
sr = SpatialRefSys.objects.using(connection.alias).get(srid=srid)
units, units_name = sr.units
spheroid = SpatialRefSys.get_spheroid(sr.wkt)
_srid_cache[connection.alias][srid] = (units, units_name, spheroid)
return _srid_cache[connection.alias][srid]
class GeoSelectFormatMixin(object):
def select_format(self, compiler, sql, params):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
connection = compiler.connection
srid = compiler.query.get_context('transformed_srid')
if srid:
sel_fmt = '%s(%%s, %s)' % (connection.ops.transform, srid)
else:
sel_fmt = '%s'
if connection.ops.select:
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = connection.ops.select % sel_fmt
return sel_fmt % sql, params
class BaseSpatialField(Field):
"""
The Base GIS Field.
It's used as a base class for GeometryField and RasterField. Defines
properties that are common to all GIS fields such as the characteristics
of the spatial reference system of the field.
"""
description = _("The base GIS field.")
# Geodetic units.
geodetic_units = ('decimal degree', 'degree')
def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs):
"""
The initialization function for base spatial fields. Takes the following
as keyword arguments:
srid:
The spatial reference system identifier, an OGC standard.
Defaults to 4326 (WGS84).
spatial_index:
Indicates whether to create a spatial index. Defaults to True.
Set this instead of 'db_index' for geographic fields since index
creation is different for geometry columns.
"""
# Setting the index flag with the value of the `spatial_index` keyword.
self.spatial_index = spatial_index
# Setting the SRID and getting the units. Unit information must be
# easily available in the field instance for distance queries.
self.srid = srid
# Setting the verbose_name keyword argument with the positional
# first parameter, so this works like normal fields.
kwargs['verbose_name'] = verbose_name
super(BaseSpatialField, self).__init__(**kwargs)
def deconstruct(self):
name, path, args, kwargs = super(BaseSpatialField, self).deconstruct()
# Always include SRID for less fragility; include spatial index if it's
# not the default value.
kwargs['srid'] = self.srid
if self.spatial_index is not True:
kwargs['spatial_index'] = self.spatial_index
return name, path, args, kwargs
def db_type(self, connection):
return connection.ops.geo_db_type(self)
# The following functions are used to get the units, their name, and
# the spheroid corresponding to the SRID of the BaseSpatialField.
def _get_srid_info(self, connection):
# Get attributes from `get_srid_info`.
self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection)
def spheroid(self, connection):
if not hasattr(self, '_spheroid'):
self._get_srid_info(connection)
return self._spheroid
def units(self, connection):
if not hasattr(self, '_units'):
self._get_srid_info(connection)
return self._units
def units_name(self, connection):
if not hasattr(self, '_units_name'):
self._get_srid_info(connection)
return self._units_name
def geodetic(self, connection):
"""
Returns true if this field's SRID corresponds with a coordinate
system that uses non-projected units (e.g., latitude/longitude).
"""
units_name = self.units_name(connection)
# Some backends like MySQL cannot determine units name. In that case,
# test if srid is 4326 (WGS84), even if this is over-simplification.
return units_name.lower() in self.geodetic_units if units_name else self.srid == 4326
def get_placeholder(self, value, compiler, connection):
"""
Returns the placeholder for the spatial column for the
given value.
"""
return connection.ops.get_geom_placeholder(self, value, compiler)
class GeometryField(GeoSelectFormatMixin, BaseSpatialField):
"""
The base Geometry field -- maps to the OpenGIS Specification Geometry type.
"""
description = _("The base Geometry field -- maps to the OpenGIS Specification Geometry type.")
form_class = forms.GeometryField
# The OpenGIS Geometry name.
geom_type = 'GEOMETRY'
def __init__(self, verbose_name=None, dim=2, geography=False, **kwargs):
"""
The initialization function for geometry fields. In addition to the
parameters from BaseSpatialField, it takes the following as keyword
arguments:
dim:
The number of dimensions for this geometry. Defaults to 2.
extent:
Customize the extent, in a 4-tuple of WGS 84 coordinates, for the
geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults
to (-180.0, -90.0, 180.0, 90.0).
tolerance:
Define the tolerance, in meters, to use for the geometry field
entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05.
"""
# Setting the dimension of the geometry field.
self.dim = dim
# Is this a geography rather than a geometry column?
self.geography = geography
# Oracle-specific private attributes for creating the entry in
# `USER_SDO_GEOM_METADATA`
self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0))
self._tolerance = kwargs.pop('tolerance', 0.05)
super(GeometryField, self).__init__(verbose_name=verbose_name, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super(GeometryField, self).deconstruct()
# Include kwargs if they're not the default values.
if self.dim != 2:
kwargs['dim'] = self.dim
if self.geography is not False:
kwargs['geography'] = self.geography
return name, path, args, kwargs
# ### Routines specific to GeometryField ###
def get_distance(self, value, lookup_type, connection):
"""
Returns a distance number in units of the field. For example, if
`D(km=1)` was passed in and the units of the field were in meters,
then 1000 would be returned.
"""
return connection.ops.get_distance(self, value, lookup_type)
def get_prep_value(self, value):
"""
Spatial lookup values are either a parameter that is (or may be
converted to) a geometry, or a sequence of lookup values that
begins with a geometry. This routine will setup the geometry
value properly, and preserve any other lookup parameters before
returning to the caller.
"""
value = super(GeometryField, self).get_prep_value(value)
if isinstance(value, Expression):
return value
elif isinstance(value, (tuple, list)):
geom = value[0]
seq_value = True
else:
geom = value
seq_value = False
# When the input is not a GEOS geometry, attempt to construct one
# from the given string input.
if isinstance(geom, Geometry):
pass
elif isinstance(geom, (bytes, six.string_types)) or hasattr(geom, '__geo_interface__'):
try:
geom = Geometry(geom)
except GeometryException:
raise ValueError('Could not create geometry from lookup value.')
else:
raise ValueError('Cannot use object with type %s for a geometry lookup parameter.' % type(geom).__name__)
# Assigning the SRID value.
geom.srid = self.get_srid(geom)
if seq_value:
lookup_val = [geom]
lookup_val.extend(value[1:])
return tuple(lookup_val)
else:
return geom
def from_db_value(self, value, expression, connection, context):
if value and not isinstance(value, Geometry):
value = Geometry(value)
return value
def get_srid(self, geom):
"""
Returns the default SRID for the given geometry, taking into account
the SRID set for the field. For example, if the input geometry
has no SRID, then that of the field will be returned.
"""
gsrid = geom.srid # SRID of given geometry.
if gsrid is None or self.srid == -1 or (gsrid == -1 and self.srid != -1):
return self.srid
else:
return gsrid
# ### Routines overloaded from Field ###
def contribute_to_class(self, cls, name, **kwargs):
super(GeometryField, self).contribute_to_class(cls, name, **kwargs)
# Setup for lazy-instantiated Geometry object.
setattr(cls, self.attname, SpatialProxy(Geometry, self))
def formfield(self, **kwargs):
defaults = {'form_class': self.form_class,
'geom_type': self.geom_type,
'srid': self.srid,
}
defaults.update(kwargs)
if (self.dim > 2 and 'widget' not in kwargs and
not getattr(defaults['form_class'].widget, 'supports_3d', False)):
defaults['widget'] = forms.Textarea
return super(GeometryField, self).formfield(**defaults)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"""
Prepare for the database lookup, and return any spatial parameters
necessary for the query. This includes wrapping any geometry
parameters with a backend-specific adapter and formatting any distance
parameters into the correct units for the coordinate system of the
field.
"""
# special case for isnull lookup
if lookup_type == 'isnull':
return []
elif lookup_type in self.class_lookups:
# Populating the parameters list, and wrapping the Geometry
# with the Adapter of the spatial backend.
if isinstance(value, (tuple, list)):
params = [connection.ops.Adapter(value[0])]
if self.class_lookups[lookup_type].distance:
# Getting the distance parameter in the units of the field.
params += self.get_distance(value[1:], lookup_type, connection)
elif lookup_type in connection.ops.truncate_params:
# Lookup is one where SQL parameters aren't needed from the
# given lookup value.
pass
else:
params += value[1:]
elif isinstance(value, Expression):
params = []
else:
params = [connection.ops.Adapter(value)]
return params
else:
raise ValueError('%s is not a valid spatial lookup for %s.' %
(lookup_type, self.__class__.__name__))
def get_prep_lookup(self, lookup_type, value):
if lookup_type == 'contains':
# 'contains' name might conflict with the "normal" contains lookup,
# for which the value is not prepared, but left as-is.
return self.get_prep_value(value)
return super(GeometryField, self).get_prep_lookup(lookup_type, value)
def get_db_prep_save(self, value, connection):
"Prepares the value for saving in the database."
if not value:
return None
else:
return connection.ops.Adapter(self.get_prep_value(value))
for klass in gis_lookups.values():
GeometryField.register_lookup(klass)
# The OpenGIS Geometry Type Fields
class PointField(GeometryField):
geom_type = 'POINT'
form_class = forms.PointField
description = _("Point")
class LineStringField(GeometryField):
geom_type = 'LINESTRING'
form_class = forms.LineStringField
description = _("Line string")
class PolygonField(GeometryField):
geom_type = 'POLYGON'
form_class = forms.PolygonField
description = _("Polygon")
class MultiPointField(GeometryField):
geom_type = 'MULTIPOINT'
form_class = forms.MultiPointField
description = _("Multi-point")
class MultiLineStringField(GeometryField):
geom_type = 'MULTILINESTRING'
form_class = forms.MultiLineStringField
description = _("Multi-line string")
class MultiPolygonField(GeometryField):
geom_type = 'MULTIPOLYGON'
form_class = forms.MultiPolygonField
description = _("Multi polygon")
class GeometryCollectionField(GeometryField):
geom_type = 'GEOMETRYCOLLECTION'
form_class = forms.GeometryCollectionField
description = _("Geometry collection")
class ExtentField(GeoSelectFormatMixin, Field):
"Used as a return value from an extent aggregate"
description = _("Extent Aggregate Field")
def get_internal_type(self):
return "ExtentField"
class RasterField(BaseSpatialField):
"""
Raster field for GeoDjango -- evaluates into GDALRaster objects.
"""
description = _("Raster Field")
geom_type = 'RASTER'
def __init__(self, *args, **kwargs):
if not HAS_GDAL:
raise ImproperlyConfigured('RasterField requires GDAL.')
super(RasterField, self).__init__(*args, **kwargs)
def _check_connection(self, connection):
# Make sure raster fields are used only on backends with raster support.
if not connection.features.gis_enabled or not connection.features.supports_raster:
raise ImproperlyConfigured('Raster fields require backends with raster support.')
def db_type(self, connection):
self._check_connection(connection)
return super(RasterField, self).db_type(connection)
def from_db_value(self, value, expression, connection, context):
return connection.ops.parse_raster(value)
def get_db_prep_value(self, value, connection, prepared=False):
self._check_connection(connection)
# Prepare raster for writing to database.
if not prepared:
value = connection.ops.deconstruct_raster(value)
return super(RasterField, self).get_db_prep_value(value, connection, prepared)
def contribute_to_class(self, cls, name, **kwargs):
super(RasterField, self).contribute_to_class(cls, name, **kwargs)
# Importing GDALRaster raises an exception on systems without gdal.
from django.contrib.gis.gdal import GDALRaster
# Setup for lazy-instantiated Raster object. For large querysets, the
# instantiation of all GDALRasters can potentially be expensive. This
# delays the instantiation of the objects to the moment of evaluation
# of the raster attribute.
setattr(cls, self.attname, SpatialProxy(GDALRaster, self))
|
zsiciarz/variablestars.net
|
refs/heads/master
|
observations/tests/test_views.py
|
1
|
from django.contrib import messages
from django.contrib.messages.middleware import MessageMiddleware
from django.contrib.sessions.middleware import SessionMiddleware
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from djet.assertions import (
InstanceAssertionsMixin,
MessagesAssertionsMixin,
StatusCodeAssertionsMixin,
)
from djet.files import create_inmemory_file
from djet.testcases import ViewTestCase
from dj_pagination.middleware import PaginationMiddleware
from ..models import Observation
from .. import views
from variablestars.tests.base import TestDataMixin
class ObservationListViewTestCase(
StatusCodeAssertionsMixin, TestDataMixin, ViewTestCase
):
view_class = views.ObservationListView
middleware_classes = [
PaginationMiddleware,
]
def setUp(self):
super().setUp()
self._create_users()
self._create_stars()
self._create_observations()
def test_list_observations_all_observers(self):
request = self.factory.get()
response = self.view(request)
self.assertContains(response, str(self.observer))
self.assertContains(response, str(self.observer2))
def test_list_observations_single_observer(self):
request = self.factory.get()
response = self.view(request, observer_id=self.observer2.id)
self.assertNotContains(response, str(self.observer))
self.assertContains(response, str(self.observer2))
class AddObservationViewTestCase(
InstanceAssertionsMixin,
MessagesAssertionsMixin,
StatusCodeAssertionsMixin,
TestDataMixin,
ViewTestCase,
):
"""
Tests for ``observations.views.AddObservationView`` class.
"""
view_class = views.AddObservationView
middleware_classes = [
SessionMiddleware,
MessageMiddleware,
]
def setUp(self):
super().setUp()
self._create_users()
def test_response(self):
"""
Check basic properties of the view.
"""
request = self.factory.get(user=self.user)
with self.assertTemplateUsed("observations/add_observation.html"):
response = self.view(request)
self.assertContains(response, _("Add new observation"))
def test_predefined_star(self):
"""
Check that one can add an observation with a predefined choice of star.
"""
self._create_stars()
request = self.factory.get(user=self.user)
response = self.view(request, star_id=self.star.pk)
self.assertContains(response, self.star.name)
def test_form_invalid(self):
"""
Check that invalid observation form displays meaningful errors.
"""
request = self.factory.post(data={}, user=self.user)
response = self.view(request)
self.assertContains(response, _("This field is required."))
def test_form_valid(self):
"""
A valid form creates new observation and redirects back to the form.
"""
self._create_stars()
with self.assert_instance_created(
Observation, star=self.star, jd=2456634.1154, magnitude=7.1
):
request = self.factory.post(
data={"star": self.star.id, "jd": "2456634.1154", "magnitude": "7.1",},
user=self.user,
)
request.observer = self.observer
response = self.view(request)
self.assert_redirect(response, reverse("observations:add_observation"))
self.assert_message_exists(
request, messages.SUCCESS, _("Observation added successfully!")
)
class UploadObservationsViewTestCase(
InstanceAssertionsMixin,
MessagesAssertionsMixin,
StatusCodeAssertionsMixin,
TestDataMixin,
ViewTestCase,
):
"""
Tests for ``observations.views.UploadObservationsView`` class.
"""
view_class = views.UploadObservationsView
middleware_classes = [
SessionMiddleware,
MessageMiddleware,
]
def setUp(self):
super().setUp()
self._create_users()
self._create_stars()
self.lines = [
"#TYPE=VISUAL",
"#OBSCODE=%s" % self.observer.aavso_code,
"#SOFTWARE=variablestars.net",
"#DELIM=,",
"#DATE=JD",
"#OBSTYPE=Visual",
"%s,2450702.1234,<11.1,na,110,113,070613,test3" % self.star.name,
]
def test_response(self):
request = self.factory.get(user=self.user)
with self.assertTemplateUsed("observations/upload_observations.html"):
response = self.view(request)
self.assertContains(response, _("Upload observations"))
def test_no_file(self):
"""
If no file is selected, the form displays an error.
"""
request = self.factory.post(data={"aavso_file": "",}, user=self.user)
response = self.view(request)
self.assertContains(response, _("This field is required."))
def test_correct_file(self):
"""
If the file is valid, observations are created.
"""
contents = "\n".join(self.lines)
aavso_file = create_inmemory_file("data.txt", contents.encode("utf-8"))
with self.assert_instance_created(Observation, star=self.star, notes="test3"):
request = self.factory.post(
data={"aavso_file": aavso_file,}, user=self.user
)
request.observer = self.observer
response = self.view(request)
self.assert_redirect(response)
self.assert_message_exists(
request, messages.SUCCESS, _("File uploaded successfully!")
)
def test_malformed_file(self):
"""
Check that a bad magnitude value raises an exception.
"""
observations_count_before = Observation.objects.count()
self.lines[-1] = "%s,2450702.1234,ASDF,na,110,113,070613,test3" % self.star.name
contents = "\n".join(self.lines)
aavso_file = create_inmemory_file("data.txt", contents.encode("utf-8"))
request = self.factory.post(data={"aavso_file": aavso_file,}, user=self.user)
request.observer = self.observer
response = self.view(request)
self.assert_redirect(response)
self.assert_message_exists(
request, messages.SUCCESS, _("File uploaded successfully!")
)
observations_count_after = Observation.objects.count()
self.assertEqual(observations_count_after, observations_count_before)
|
MiltosD/CEF-ELRC
|
refs/heads/master
|
lib/python2.7/site-packages/django_jenkins/tasks/run_pyflakes.py
|
3
|
# -*- coding: utf-8 -*-
import os
import re
import sys
from pyflakes.scripts import pyflakes
from cStringIO import StringIO
from django_jenkins.functions import relpath
from django_jenkins.tasks import BaseTask, get_apps_locations
class Task(BaseTask):
def __init__(self, test_labels, options):
super(Task, self).__init__(test_labels, options)
self.test_all = options['test_all']
if options.get('pyflakes_file_output', True):
output_dir = options['output_dir']
if not os.path.exists(output_dir):
os.makedirs(output_dir)
self.output = open(os.path.join(output_dir, 'pyflakes.report'), 'w')
else:
self.output = sys.stdout
def teardown_test_environment(self, **kwargs):
locations = get_apps_locations(self.test_labels, self.test_all)
# run pyflakes tool with captured output
old_stdout, pyflakes_output = sys.stdout, StringIO()
sys.stdout = pyflakes_output
try:
for location in locations:
if os.path.isdir(location):
for dirpath, dirnames, filenames in os.walk(relpath(location)):
for filename in filenames:
if filename.endswith('.py'):
pyflakes.checkPath(os.path.join(dirpath, filename))
else:
pyflakes.checkPath(relpath(location))
finally:
sys.stdout = old_stdout
# save report
pyflakes_output.reset()
while True:
line = pyflakes_output.readline()
if not line:
break
message = re.sub(r': ', r': [E] PYFLAKES:', line)
self.output.write(message)
self.output.close()
|
xzYue/odoo
|
refs/heads/8.0
|
addons/website_mail_group/controllers/main.py
|
306
|
# -*- coding: utf-8 -*-
import datetime
from dateutil import relativedelta
from openerp import tools, SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.website.models.website import slug
from openerp.addons.web.http import request
class MailGroup(http.Controller):
_thread_per_page = 20
_replies_per_page = 10
def _get_archives(self, group_id):
MailMessage = request.registry['mail.message']
groups = MailMessage.read_group(
request.cr, request.uid, [('model', '=', 'mail.group'), ('res_id', '=', group_id)], ['subject', 'date'],
groupby="date", orderby="date desc", context=request.context)
for group in groups:
begin_date = datetime.datetime.strptime(group['__domain'][0][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
end_date = datetime.datetime.strptime(group['__domain'][1][2], tools.DEFAULT_SERVER_DATETIME_FORMAT).date()
group['date_begin'] = '%s' % datetime.date.strftime(begin_date, tools.DEFAULT_SERVER_DATE_FORMAT)
group['date_end'] = '%s' % datetime.date.strftime(end_date, tools.DEFAULT_SERVER_DATE_FORMAT)
return groups
@http.route("/groups", type='http', auth="public", website=True)
def view(self, **post):
cr, uid, context = request.cr, request.uid, request.context
group_obj = request.registry.get('mail.group')
mail_message_obj = request.registry.get('mail.message')
group_ids = group_obj.search(cr, uid, [('alias_id', '!=', False), ('alias_id.alias_name', '!=', False)], context=context)
groups = group_obj.browse(cr, uid, group_ids, context)
# compute statistics
month_date = datetime.datetime.today() - relativedelta.relativedelta(months=1)
group_data = dict()
for group in groups:
group_data[group.id] = {
'monthly_message_nbr': mail_message_obj.search(
cr, SUPERUSER_ID,
[('model', '=', 'mail.group'), ('res_id', '=', group.id), ('date', '>=', month_date.strftime(tools.DEFAULT_SERVER_DATETIME_FORMAT))],
count=True, context=context)}
values = {'groups': groups, 'group_data': group_data}
return request.website.render('website_mail_group.mail_groups', values)
@http.route(["/groups/subscription/"], type='json', auth="user")
def subscription(self, group_id=0, action=False, **post):
""" TDE FIXME: seems dead code """
cr, uid, context = request.cr, request.uid, request.context
group_obj = request.registry.get('mail.group')
if action:
group_obj.message_subscribe_users(cr, uid, [group_id], context=context)
else:
group_obj.message_unsubscribe_users(cr, uid, [group_id], context=context)
return []
@http.route([
"/groups/<model('mail.group'):group>",
"/groups/<model('mail.group'):group>/page/<int:page>"
], type='http', auth="public", website=True)
def thread_headers(self, group, page=1, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
thread_obj = request.registry.get('mail.message')
domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id)]
if mode == 'thread':
domain += [('parent_id', '=', False)]
if date_begin and date_end:
domain += [('date', '>=', date_begin), ('date', '<=', date_end)]
thread_count = thread_obj.search_count(cr, uid, domain, context=context)
pager = request.website.pager(
url='/groups/%s' % slug(group),
total=thread_count,
page=page,
step=self._thread_per_page,
url_args={'mode': mode, 'date_begin': date_begin or '', 'date_end': date_end or ''},
)
thread_ids = thread_obj.search(cr, uid, domain, limit=self._thread_per_page, offset=pager['offset'])
messages = thread_obj.browse(cr, uid, thread_ids, context)
values = {
'messages': messages,
'group': group,
'pager': pager,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
}
return request.website.render('website_mail_group.group_messages', values)
@http.route([
'''/groups/<model('mail.group'):group>/<model('mail.message', "[('model','=','mail.group'), ('res_id','=',group[0])]"):message>''',
], type='http', auth="public", website=True)
def thread_discussion(self, group, message, mode='thread', date_begin=None, date_end=None, **post):
cr, uid, context = request.cr, request.uid, request.context
Message = request.registry['mail.message']
if mode == 'thread':
base_domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id), ('parent_id', '=', message.parent_id and message.parent_id.id or False)]
else:
base_domain = [('model', '=', 'mail.group'), ('res_id', '=', group.id)]
next_message = None
next_message_ids = Message.search(cr, uid, base_domain + [('date', '<', message.date)], order="date DESC", limit=1, context=context)
if next_message_ids:
next_message = Message.browse(cr, uid, next_message_ids[0], context=context)
prev_message = None
prev_message_ids = Message.search(cr, uid, base_domain + [('date', '>', message.date)], order="date ASC", limit=1, context=context)
if prev_message_ids:
prev_message = Message.browse(cr, uid, prev_message_ids[0], context=context)
values = {
'message': message,
'group': group,
'mode': mode,
'archives': self._get_archives(group.id),
'date_begin': date_begin,
'date_end': date_end,
'replies_per_page': self._replies_per_page,
'next_message': next_message,
'prev_message': prev_message,
}
return request.website.render('website_mail_group.group_message', values)
@http.route(
'''/groups/<model('mail.group'):group>/<model('mail.message', "[('model','=','mail.group'), ('res_id','=',group[0])]"):message>/get_replies''',
type='json', auth="public", methods=['POST'], website=True)
def render_messages(self, group, message, **post):
last_displayed_id = post.get('last_displayed_id')
if not last_displayed_id:
return False
Message = request.registry['mail.message']
replies_domain = [('id', '<', int(last_displayed_id)), ('parent_id', '=', message.id)]
msg_ids = Message.search(request.cr, request.uid, replies_domain, limit=self._replies_per_page, context=request.context)
msg_count = Message.search(request.cr, request.uid, replies_domain, count=True, context=request.context)
messages = Message.browse(request.cr, request.uid, msg_ids, context=request.context)
values = {
'group': group,
'thread_header': message,
'messages': messages,
'msg_more_count': msg_count - self._replies_per_page,
'replies_per_page': self._replies_per_page,
}
return request.registry['ir.ui.view'].render(request.cr, request.uid, 'website_mail_group.messages_short', values, engine='ir.qweb', context=request.context)
@http.route("/groups/<model('mail.group'):group>/get_alias_info", type='json', auth='public', website=True)
def get_alias_info(self, group, **post):
return {
'alias_name': group.alias_id and group.alias_id.alias_name and group.alias_id.alias_domain and '%s@%s' % (group.alias_id.alias_name, group.alias_id.alias_domain) or False
}
|
darashenka/aem-cmd
|
refs/heads/master
|
tests/test_deploy.py
|
1
|
# coding: utf-8
import tempfile
from distutils.version import Version
import pkg_resources
from mock import patch
from nose.tools import eq_, ok_
import acmd
from test_utils.compat import StringIO
def test_get_current_version():
v = acmd.get_current_version()
eq_(True, isinstance(v, Version))
eq_(acmd.__version__, str(v))
@patch('sys.stdout', new_callable=StringIO)
@patch('sys.stderr', new_callable=StringIO)
def test_setup_rcfile(stderr, stdout):
_, path = tempfile.mkstemp(suffix='.acmd-test.rc')
acmd.setup_rcfile(path)
template = pkg_resources.resource_string('acmd', "data/acmd.rc.template")
with open(path, 'rb') as f:
content = f.read()
ok_(len(content) > 0)
eq_(template, content)
eq_('', stdout.getvalue())
ok_("warning:" in stderr.getvalue())
@patch('sys.stdout', new_callable=StringIO)
@patch('sys.stderr', new_callable=StringIO)
def test_deploy_bash_completion(stderr, stdout):
path = tempfile.mkdtemp(suffix='.acmd.bash_completion.d')
paths = [path]
ret = acmd.deploy_bash_completion(paths=paths)
eq_(path, ret)
eq_('', stdout.getvalue())
eq_('', stderr.getvalue())
@patch('sys.stdout', new_callable=StringIO)
@patch('sys.stderr', new_callable=StringIO)
def test_no_deploy_dirs(stderr, stdout):
path = '/THIS/IS/A/NON/EXISTING/PATH'
ret = acmd.deploy_bash_completion(paths=[path])
eq_(None, ret)
eq_('', stdout.getvalue())
eq_('Could not find bash completion install dir.', stderr.getvalue())
|
maurofaccenda/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/amazon/ec2_metric_alarm.py
|
57
|
#!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'curated'}
DOCUMENTATION = """
module: ec2_metric_alarm
short_description: "Create/update or delete AWS Cloudwatch 'metric alarms'"
description:
- Can create or delete AWS metric alarms.
- Metrics you wish to alarm on must already exist.
version_added: "1.6"
author: "Zacharie Eakin (@zeekin)"
options:
state:
description:
- register or deregister the alarm
required: true
choices: ['present', 'absent']
name:
description:
- Unique name for the alarm
required: true
metric:
description:
- Name of the monitored metric (e.g. CPUUtilization)
- Metric must already exist
required: false
namespace:
description:
- Name of the appropriate namespace ('AWS/EC2', 'System/Linux', etc.), which determines the category it will appear under in cloudwatch
required: false
statistic:
description:
- Operation applied to the metric
- Works in conjunction with period and evaluation_periods to determine the comparison value
required: false
choices: ['SampleCount','Average','Sum','Minimum','Maximum']
comparison:
description:
- Determines how the threshold value is compared
required: false
choices: ['<=','<','>','>=']
threshold:
description:
- Sets the min/max bound for triggering the alarm
required: false
period:
description:
- The time (in seconds) between metric evaluations
required: false
evaluation_periods:
description:
- The number of times in which the metric is evaluated before final calculation
required: false
unit:
description:
- The threshold's unit of measurement
required: false
choices:
- 'Seconds'
- 'Microseconds'
- 'Milliseconds'
- 'Bytes'
- 'Kilobytes'
- 'Megabytes'
- 'Gigabytes'
- 'Terabytes'
- 'Bits'
- 'Kilobits'
- 'Megabits'
- 'Gigabits'
- 'Terabits'
- 'Percent'
- 'Count'
- 'Bytes/Second'
- 'Kilobytes/Second'
- 'Megabytes/Second'
- 'Gigabytes/Second'
- 'Terabytes/Second'
- 'Bits/Second'
- 'Kilobits/Second'
- 'Megabits/Second'
- 'Gigabits/Second'
- 'Terabits/Second'
- 'Count/Second'
- 'None'
description:
description:
- A longer description of the alarm
required: false
dimensions:
description:
- Describes to what the alarm is applied
required: false
alarm_actions:
description:
- A list of the names action(s) taken when the alarm is in the 'alarm' status
required: false
insufficient_data_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'insufficient_data' status
required: false
ok_actions:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
extends_documentation_fragment:
- aws
- ec2
"""
EXAMPLES = '''
- name: create alarm
ec2_metric_alarm:
state: present
region: ap-southeast-2
name: "cpu-low"
metric: "CPUUtilization"
namespace: "AWS/EC2"
statistic: Average
comparison: "<="
threshold: 5.0
period: 300
evaluation_periods: 3
unit: "Percent"
description: "This will alarm when a bamboo slave's cpu usage average is lower than 5% for 15 minutes "
dimensions: {'InstanceId':'i-XXX'}
alarm_actions: ["action1","action2"]
'''
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
from boto.exception import BotoServerError
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
def create_metric_alarm(connection, module):
name = module.params.get('name')
metric = module.params.get('metric')
namespace = module.params.get('namespace')
statistic = module.params.get('statistic')
comparison = module.params.get('comparison')
threshold = module.params.get('threshold')
period = module.params.get('period')
evaluation_periods = module.params.get('evaluation_periods')
unit = module.params.get('unit')
description = module.params.get('description')
dimensions = module.params.get('dimensions')
alarm_actions = module.params.get('alarm_actions')
insufficient_data_actions = module.params.get('insufficient_data_actions')
ok_actions = module.params.get('ok_actions')
alarms = connection.describe_alarms(alarm_names=[name])
if not alarms:
alm = MetricAlarm(
name=name,
metric=metric,
namespace=namespace,
statistic=statistic,
comparison=comparison,
threshold=threshold,
period=period,
evaluation_periods=evaluation_periods,
unit=unit,
description=description,
dimensions=dimensions,
alarm_actions=alarm_actions,
insufficient_data_actions=insufficient_data_actions,
ok_actions=ok_actions
)
try:
connection.create_alarm(alm)
changed = True
alarms = connection.describe_alarms(alarm_names=[name])
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
alarm = alarms[0]
changed = False
for attr in ('comparison','metric','namespace','statistic','threshold','period','evaluation_periods','unit','description'):
if getattr(alarm, attr) != module.params.get(attr):
changed = True
setattr(alarm, attr, module.params.get(attr))
#this is to deal with a current bug where you cannot assign '<=>' to the comparator when modifying an existing alarm
comparison = alarm.comparison
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
if not isinstance(dim1[keys], list):
dim1[keys] = [dim1[keys]]
if keys not in dim2 or dim1[keys] != dim2[keys]:
changed=True
setattr(alarm, 'dimensions', dim1)
for attr in ('alarm_actions','insufficient_data_actions','ok_actions'):
action = module.params.get(attr) or []
# Boto and/or ansible may provide same elements in lists but in different order.
# Compare on sets since they do not need any order.
if set(getattr(alarm, attr)) != set(action):
changed = True
setattr(alarm, attr, module.params.get(attr))
try:
if changed:
connection.create_alarm(alarm)
except BotoServerError as e:
module.fail_json(msg=str(e))
result = alarms[0]
module.exit_json(changed=changed, name=result.name,
actions_enabled=result.actions_enabled,
alarm_actions=result.alarm_actions,
alarm_arn=result.alarm_arn,
comparison=result.comparison,
description=result.description,
dimensions=result.dimensions,
evaluation_periods=result.evaluation_periods,
insufficient_data_actions=result.insufficient_data_actions,
last_updated=result.last_updated,
metric=result.metric,
namespace=result.namespace,
ok_actions=result.ok_actions,
period=result.period,
state_reason=result.state_reason,
state_value=result.state_value,
statistic=result.statistic,
threshold=result.threshold,
unit=result.unit)
def delete_metric_alarm(connection, module):
name = module.params.get('name')
alarms = connection.describe_alarms(alarm_names=[name])
if alarms:
try:
connection.delete_alarms([name])
module.exit_json(changed=True)
except BotoServerError as e:
module.fail_json(msg=str(e))
else:
module.exit_json(changed=False)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
name=dict(required=True, type='str'),
metric=dict(type='str'),
namespace=dict(type='str'),
statistic=dict(type='str', choices=['SampleCount', 'Average', 'Sum', 'Minimum', 'Maximum']),
comparison=dict(type='str', choices=['<=', '<', '>', '>=']),
threshold=dict(type='float'),
period=dict(type='int'),
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes',
'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second',
'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second',
'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if region:
try:
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
sudkannan/xen-hv
|
refs/heads/master
|
dist/install/usr/lib64/python2.6/site-packages/xen/remus/util.py
|
6
|
# utility functions
import fcntl, os, subprocess
class PipeException(Exception):
def __init__(self, message, errno):
self.errno = errno
message = '%s: %d, %s' % (message, errno, os.strerror(errno))
Exception.__init__(self, message)
class Lock(object):
"""advisory lock"""
def __init__(self, filename):
"""lock using filename for synchronization"""
self.filename = filename + '.lock'
self.fd = None
self.lock()
def __del__(self):
self.unlock()
def lock(self):
if self.fd:
return
self.fd = open(self.filename, 'w')
fcntl.lockf(self.fd, fcntl.LOCK_EX)
def unlock(self):
if not self.fd:
return
fcntl.lockf(self.fd, fcntl.LOCK_UN)
self.fd = None
try:
os.remove(self.filename)
except OSError:
# harmless race
pass
def canonifymac(mac):
return ':'.join(['%02x' % int(field, 16) for field in mac.split(':')])
def checkpid(pid):
"""return True if pid is live"""
try:
os.kill(pid, 0)
return True
except OSError:
return False
def runcmd(args, cwd=None):
# TODO: stdin handling
if type(args) == str:
args = args.split(' ')
try:
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=True,
cwd=cwd)
stdout = proc.stdout.read()
stderr = proc.stderr.read()
proc.wait()
if proc.returncode:
print ' '.join(args)
print stderr.strip()
raise PipeException('%s failed' % args[0], proc.returncode)
return stdout
except (OSError, IOError), inst:
raise PipeException('could not run %s' % args[0], inst.errno)
def modprobe(modname):
"""attempt to load kernel module modname"""
try:
runcmd(['modprobe', '-q', modname])
return True
except PipeException:
return False
|
juanyaw/PTVS
|
refs/heads/master
|
Python/Tests/TestData/VirtualEnv/env/Lib/encodings/mbcs.py
|
103
|
""" Python 'mbcs' Codec for Windows
Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,
which was written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
# Import them explicitly to cause an ImportError
# on non-Windows systems
from codecs import mbcs_encode, mbcs_decode
# for IncrementalDecoder, IncrementalEncoder, ...
import codecs
### Codec APIs
encode = mbcs_encode
def decode(input, errors='strict'):
return mbcs_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return mbcs_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = mbcs_decode
class StreamWriter(codecs.StreamWriter):
encode = mbcs_encode
class StreamReader(codecs.StreamReader):
decode = mbcs_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
|
David-R-Walker/newprofiler
|
refs/heads/master
|
newprofiler.py
|
1
|
# Python Profiler v3
# Copyright (c) 2015-2017 David R Walker
# TODO:
# [x] Record only functions in StackLines
# [ ] Handle per-line hotspots as separate structure (not nested) - ?
# [ ] Handle timeline as separate structure
# [x] Use unique stack IDs to dedupe stack tuples
# [ ] Merge profile data method
# [ ] add custom metadata values to profile data (e.g. url, op, user id) for filtering / grouping
# [ ] filter/merge profile data by metadata
# [x] Expose randomize parameter for stochastic sampling
# [x] Add rate control (remove interval)
# - is this more or less misleading if we don't adjust for profiler overhead to achieve rate?
# - not adjusting for drift might be handy for estimating profiler performance/overheads
# [x] Finish linux platform driver (get thread CPU times seems to be unfinished!!)
# [ ] Windows platform driver
# [ ] Tidy up platform drivers and make a nice platform choosing function
# [ ] Convert into proper Python module + split into submodules
# [ ] Basic (temp) dump function (flat) - replace with proper collated version from stack tree
# [ ] Filter out long tail option (collate items with low ticks as 'Other') to remove noise
# [ ] Post process to build stack/call graph (have exporters work from this graph instead of raw data) - ?
# [ ] Record process ID in addition to thread?
# [ ] Option to merge processes
# [ ] Option to merge threads
# [ ] Test performance / optimize on various platforms
# [ ] Serialize (+append?) to file (lock file?)
# [ ] Load from file
# [ ] HTML5 exporter with drill-down
# [ ] Import/exporter framework
# [ ] Export to standard profiler formats (e.g. python, callgrind, firefox ThreadProfile json)
# [ ] Make Python 3 compatible
# [ ] Decorator to wrap a function with profiler
# [ ] Function to watch a function in profiler? (e.g. store code object in dict and check)
# [ ] Option to filter out standard (and custom) libraries? (path prefixes?)
# [ ] Figure out how to play nicely with time.sleep(), etc. - do we need to patch it?
# - EINTR / silent signal interrupts
# - breaks sleep/timeout behaviour in programs - provide optional monkey patches?
# - or just accept that signals break waits, and is fixed eventually by PEP475
# ('serious' code should be handling EINTR anyway?)
# [ ] Figure out how to avoid having to patch thread, wherever possible
# - maybe spawn a test thread on module import to detect if thread IDs match ?
# [x] Make interval private on profiler (or don't store)
# [x] Move all running time stats etc. into _profile_data - already done
import os
import time
import random
from contextlib import contextmanager
# - Scheduler ------------------------------------------------------------------
# Base class for repeated periodic function call
class IntervalScheduler(object):
default_rate = 1
def __init__(self, interval_func, interval=0.01, stochastic=False, func_args=(), func_kwargs={}):
self.interval = interval
self._random = None
if stochastic:
# Our own Random to avoid side effects on shared PRNG
self._random = random.Random()
self._running = False
self._interval_func = interval_func
self._func_args = func_args
self._func_kwargs = func_kwargs
self._init()
def start(self):
if not self.is_running():
self._start()
self._running = True
def stop(self):
if self.is_running():
self._stop()
self._running = False
def is_running(self):
return self._running
def get_next_interval(self):
if self._random:
return (2.0 * self._random.random() * self.interval)
else:
return self.interval
def tick(self, frame):
self._interval_func(*self._func_args, _interrupted_frame=frame, **self._func_kwargs)
# Sub-classes should override the following methods to implement a scheduler
# that will call self.tick() every self.interval seconds.
# If the scheduler interupts a Python frame, it should pass the frame that was
# interrupted to tick(), otherwise it should pass in None.
def _init(self):
pass
def _start(self):
raise NotImplementedError()
def _stop(self):
raise NotImplementedError()
# Uses a separate sleeping thread, which wakes periodically and calls self.tick()
class ThreadIntervalScheduler(IntervalScheduler):
default_rate = 100
def _init(self):
import threading
self._thread = None
self._stopping = False
self._event = threading.Event()
def _start(self):
import threading
self._event.clear()
def thread_func():
while not self._event.is_set():
self._event.wait(timeout=self.get_next_interval())
self.tick(None)
self._thread = threading.Thread(target=thread_func, name='profiler')
self._thread.daemon = True
self._thread.start()
def _stop(self):
self._event.set()
self._thread.join()
self._stopping = False
import signal
# Signals the main thread every interval, which calls the tick() method when
# the timer event is triggered.
# Note that signal handlers are blocked during system calls, library calls, etc.
# in the main thread.
# We compensate for this by keeping track of real, user cpu, and system cpu
# usage between ticks on each thread.
# We prefer ITIMER_REAL, because that will be triggered immediately upon
# returning from a long-blocking system call, so we can add the ticks to the
# most appropriate function.
# However, if the main thread is blocked for a significant period, this will
# reduce the accuracy of samples in other threads, because only the main
# thread handles signals. In such situations, the ThreadIntervalScheduler might
# be more accurate.
# We don't specify an interval and reschedule the next tick ourselves. This
# allows us to dynamically change the sample interval to avoid aliasing, and
# prevents the signal interrupting itself, which can lead to stack errors,
# some strange behaviour when threads are being join()ed, and polluting the
# profile data with stack data from the profiler.
class SignalIntervalScheduler(IntervalScheduler):
default_rate = 1000
timer = signal.ITIMER_REAL
signal = signal.SIGALRM
def _start(self):
def signal_handler(signum, frame):
self.tick(frame)
if self._run:
signal.setitimer(self.timer, self.get_next_interval(), 0)
signal.signal(self.signal, signal_handler)
signal.siginterrupt(self.signal, False)
self._run = True
signal.setitimer(self.timer, self.get_next_interval(), 0)
def _stop(self):
self._run = False
signal.setitimer(self.timer, 0, 0)
# - Platform-specific stuff ----------------------------------------------------
import thread
import threading
class ThreadPlatform(object):
def __init__(self):
self.name = ''
self.lock = threading.Lock()
self._registered_threads = {}
self._original_start_new_thread = thread.start_new_thread
self.platform_init()
def _patch_thread(self):
assert threading.current_thread().name == 'MainThread'
with self.lock:
self._registered_threads[threading.current_thread().ident] = self.get_current_thread_id()
def start_new_thread_wrapper(func, args, kwargs={}):
def thread_func(func, args, kwargs):
system_tid = self.get_current_thread_id()
with self.lock:
self._registered_threads[threading.current_thread().ident] = system_tid
return func(*args, **kwargs)
return self._original_start_new_thread(thread_func, (func, args, kwargs))
thread.start_new_thread = start_new_thread_wrapper
threading._start_new_thread = start_new_thread_wrapper
def _unpatch_thread(self):
with self.lock:
self._registered_threads = {}
thread.start_new_thread = _original_start_new_thread
threading._start_new_thread = _original_start_new_thread
def _get_patched_thread_id(self, python_ident):
#with self.lock:
return self._registered_threads.get(python_ident)
def platform_init(self):
raise NotImplementedError()
def get_thread_id_from_python_ident(self, python_ident):
raise NotImplementedError()
def get_current_thread_id(self):
raise NotImplementedError()
def get_thread_cpu_time(self, thread_id=None):
raise NotImplementedError()
# Single-threaded CPU times using os.times(),
# which actually gives CPU times for the whole
# process.
# Will give bad results if there are actually
# other threads running!
class SingleThreadedPlatform(ThreadPlatform):
def platform_init(self):
pass
def get_thread_id_from_python_ident(self):
return 0
def get_current_thread_id(self):
return 0
def get_thread_cpu_time(self, thread_id=None):
time_info = os.times()
return time_info[0] + time_info[1]
class MacPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library('libc'))
self._mach_thread_self = libc.mach_thread_self
self._mach_thread_self.restype = ctypes.c_uint
# TODO: check these field definitions
class time_value_t(ctypes.Structure):
_fields_ = [
("seconds", ctypes.c_int),
("microseconds",ctypes.c_int)
]
class thread_basic_info(ctypes.Structure):
_fields_ = [
("user_time", time_value_t),
("system_time",time_value_t),
("cpu_usage",ctypes.c_int),
("policy",ctypes.c_int),
("run_state",ctypes.c_int),
("flags",ctypes.c_int),
("suspend_count",ctypes.c_int),
("sleep_time",ctypes.c_int)
]
thread_info = libc.thread_info
thread_info.restype = ctypes.c_int
thread_info.argtypes = [
ctypes.c_uint,
ctypes.c_int,
ctypes.POINTER(thread_basic_info),
ctypes.POINTER(ctypes.c_uint)
]
self._thread_info = thread_info
self._THREAD_BASIC_INFO = 3
self._out_info = thread_basic_info()
self._count = ctypes.c_uint(ctypes.sizeof(self._out_info) / ctypes.sizeof(ctypes.c_uint))
self._patch_thread()
def get_thread_id_from_python_ident(self, python_ident):
return self._get_patched_thread_id(python_ident)
def get_current_thread_id(self):
return self._mach_thread_self()
def get_thread_cpu_time(self, python_ident=None):
import ctypes
# TODO: Optimize with shared structs, sizes, to minimize allocs per tick
if python_ident is None:
thread_id = self.get_current_thread_id()
else:
thread_id = self.get_thread_id_from_python_ident(python_ident)
out_info = self._out_info
result = self._thread_info(
thread_id,
self._THREAD_BASIC_INFO,
ctypes.byref(out_info),
ctypes.byref(self._count),
)
if result != 0:
return 0.0
user_time = out_info.user_time.seconds + out_info.user_time.microseconds / 1000000.0
system_time = out_info.system_time.seconds + out_info.system_time.microseconds / 1000000.0
return user_time + system_time
class LinuxPThreadPlatform(ThreadPlatform):
def platform_init(self):
import ctypes
import ctypes.util
pthread = ctypes.CDLL(ctypes.util.find_library('pthread'))
libc = ctypes.CDLL(ctypes.util.find_library('c'))
pthread_t = ctypes.c_ulong
clockid_t = ctypes.c_long
time_t = ctypes.c_long
NANOSEC = 1.0 / 1e9
CLOCK_THREAD_CPUTIME_ID = 3 # from linux/time.h
class timespec(ctypes.Structure):
_fields_ = [
('tv_sec', time_t),
('tv_nsec', ctypes.c_long),
]
# wrap pthread_self()
pthread_self = pthread.pthread_self
pthread.argtypes = []
pthread_self.restype = pthread_t
# wrap pthread_getcpuclockid()
pthread_getcpuclockid = pthread.pthread_getcpuclockid
pthread_getcpuclockid.argtypes = [pthread_t, ctypes.POINTER(clockid_t)]
pthread_getcpuclockid.restype = clockid_t
# wrap clock_gettime()
clock_gettime = libc.clock_gettime
clock_gettime.argtypes = [clockid_t, ctypes.POINTER(timespec)]
clock_gettime.restype = ctypes.c_int
def get_current_thread_id():
return pthread_self()
def get_thread_cpu_time(thread_id=None):
if thread_id is None:
thread_id = pthread_self()
# First, get the thread's CPU clock ID
clock_id = clockid_t()
error = pthread_getcpuclockid(thread_id, ctypes.pointer(clock_id))
if error:
return None
# Now get time from clock...
result = timespec()
error = clock_gettime(clock_id, ctypes.pointer(result))
if error:
return None
cpu_time = result.tv_sec + result.tv_nsec * NANOSEC
return cpu_time
self._get_current_thread_id = get_current_thread_id
self._get_thread_cpu_time = get_thread_cpu_time
def get_current_thread_id(self):
return self._get_current_thread_id()
def get_thread_cpu_time(thread_id=None):
return self._get_thread_cpu_time(thread_id)
import sys
if sys.platform == 'darwin':
thread_platform = MacPThreadPlatform()
elif sys.platform == 'linux':
thread_platform = LinuxPThreadPlatform()
# TODO: Windows support
else:
try:
import thread
except ImportError:
pass
else:
import warnings
warnings.warn('Multi-threaded CPU times not supported on this platform!')
thread_platform = SingleThreadedPlatform()
# - Sample data ----------------------------------------------------------------
import collections
StackLine = collections.namedtuple('StackLine', ['type', 'name', 'file', 'line', 'data'])
def stack_line_from_frame(frame, stype='func', data=None):
code = frame.f_code
return StackLine(stype, code.co_name, code.co_filename, code.co_firstlineno, data)
class SampleData(object):
__slots__ = ['rtime', 'cputime', 'ticks']
def __init__(self):
self.rtime = 0.0 # Real / wall-clock time
self.cputime = 0.0 # User CPU time (single thread)
self.ticks = 0 # Actual number of samples
def __str__(self):
return 'SampleData<r=%.3f, cpu=%.3f, t=%d>' % (
self.rtime,
self.cputime,
self.ticks
)
def __repr__(self):
return str(self)
class RawProfileData(object):
def __init__(self):
self.stack_line_id_map = {} # Maps StackLines to IDs
self.stack_tuple_id_map = {} # Map tuples of StackLine IDs to IDs
self.stack_data = {} # Maps stack ID tuples to SampleData
self.time_running = 0.0 # Total amount of time sampling has been active
self.total_ticks = 0 # Total number of samples we've taken
def add_sample_data(self, stack_list, rtime, cputime, ticks):
sm = self.stack_line_id_map
sd = self.stack_line_id_map.setdefault
stack_tuple = tuple(
sd(stack_line, len(sm))
for stack_line in stack_list
)
stack_tuple_id = self.stack_tuple_id_map.setdefault(
stack_tuple,
len(self.stack_tuple_id_map),
)
if stack_tuple_id in self.stack_data:
sample_data = self.stack_data[stack_tuple_id]
else:
sample_data = self.stack_data[stack_tuple_id] = SampleData()
sample_data.rtime += rtime
sample_data.cputime += cputime
sample_data.ticks += ticks
self.total_ticks += ticks
def dump(self, sort='rtime'):
assert sort in SampleData.__slots__
# Quick util function to dump raw data in a vaguely-useful format
# TODO: replace with proper text exporter with sort parameters, etc.
print '%s:\n\n %d samples taken in %.3fs:\n' % (
self.__class__.__name__,
self.total_ticks,
self.time_running,
)
print ' Ordered by: %s\n' % sort
# Invert stack -> ID map
stack_line_map = dict([
(v, k)
for k, v
in self.stack_line_id_map.items()
])
stack_map = dict([
(v, k)
for k, v
in self.stack_tuple_id_map.items()
])
lines = [
(getattr(sample_data, sort), stack_id, sample_data)
for stack_id, sample_data
in self.stack_data.items()
]
lines.sort()
lines.reverse()
print ' ticks rtime cputime filename:lineno(function)'
for _, stack_id, sample_data in lines:
stack = stack_map[stack_id]
stack_line = stack_line_map[stack[0]]
print ' %7d % 8.3f % 8.3f %s:%d(%s) : %r' % (
sample_data.ticks,
sample_data.rtime,
sample_data.cputime,
os.path.basename(stack_line.file),
stack_line.line,
stack_line.name,
stack,
)
print
class ThreadClock(object):
__slots__ = ['rtime', 'cputime']
def __init__(self):
self.rtime = 0.0
self.cputime = 0.0
class Profiler(object):
_scheduler_map = {
'signal':SignalIntervalScheduler,
'thread':ThreadIntervalScheduler
}
def __init__(
self,
scheduler_type='signal', # Which scheduler to use
collect_stacks=True, # Collect full call-tree data?
rate=None,
stochastic=False,
):
self.collect_stacks = collect_stacks
assert (
scheduler_type in self._scheduler_map
or isinstance(scheduler_type, IntervalScheduler)
), 'Unknown scheduler type'
self.scheduler_type = scheduler_type
if isinstance(scheduler_type, str):
scheduler_type = self._scheduler_map[scheduler_type]
if rate is None:
rate = scheduler_type.default_rate
self._scheduler = scheduler_type(
self.sample,
interval=1.0/rate,
stochastic=stochastic,
)
self.reset()
def reset(self):
self._profile_data = RawProfileData()
self._thread_clocks = {} # Maps from thread ID to ThreadClock
self._last_tick = 0
self.total_samples = 0
self.sampling_time = 0.0
self._empty_stack = [StackLine(None, 'null', '', 0, None)]
self._start_time = 0.0
def sample(self, _interrupted_frame=None):
sample_time = time.time()
current_frames = sys._current_frames()
current_thread = thread.get_ident()
for thread_ident, frame in current_frames.items():
if thread_ident == current_thread:
frame = _interrupted_frame
if frame is not None:
# 1.7 %
stack = [stack_line_from_frame(frame)]
if self.collect_stacks:
frame = frame.f_back
while frame is not None:
stack.append(stack_line_from_frame(frame))
frame = frame.f_back
stack.append(StackLine('thread', str(thread_ident), '', 0, None)) # todo: include thread name?
# todo: include PID?
# todo: include custom metadata/labels?
# 2.0 %
if thread_ident in self._thread_clocks:
thread_clock = self._thread_clocks[thread_ident]
cputime = thread_platform.get_thread_cpu_time(thread_ident)
else:
thread_clock = self._thread_clocks[thread_ident] = ThreadClock()
cputime = thread_platform.get_thread_cpu_time(thread_ident)
# ~5.5%
self._profile_data.add_sample_data(
stack,
sample_time - self.last_tick,
cputime - thread_clock.cputime,
1
)
thread_clock.cputime = cputime
else:
self._profile_data.add_sample_data(
self._empty_stack, sample_time - self.last_tick, 0.0, 1
)
self.last_tick = sample_time
self.total_samples += 1
self.sampling_time += time.time() - sample_time
def start(self):
import threading
# reset thread clocks...
self._thread_clocks = {}
for thread in threading.enumerate():
thread_clock = ThreadClock()
self._thread_clocks[thread.ident] = thread_clock
cputime = thread_platform.get_thread_cpu_time(thread.ident)
thread_clock.cputime = cputime
self._start_time = self.last_tick = time.time()
self._scheduler.start()
@contextmanager
def activated(self):
try:
self.start()
yield self
finally:
self.stop()
def stop(self):
self._scheduler.stop()
self._profile_data.time_running += time.time() - self._start_time
self._start_time = 0.0
def busy(rate=100):
import time
profiler = Profiler(rate=rate)
with profiler.activated():
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
pass
return profiler
|
kvasukib/groupflow_simulator
|
refs/heads/master
|
pox/pox/openflow/discovery.py
|
2
|
# Copyright 2011-2013 James McCauley
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is loosely based on the discovery component in NOX.
"""
This module discovers the connectivity between OpenFlow switches by sending
out LLDP packets. To be notified of this information, listen to LinkEvents
on core.openflow_discovery.
It's possible that some of this should be abstracted out into a generic
Discovery module, or a Discovery superclass.
"""
from pox.lib.revent import *
from pox.lib.recoco import Timer
from pox.lib.util import dpid_to_str, str_to_bool
from pox.core import core
import pox.openflow.libopenflow_01 as of
import pox.lib.packet as pkt
import struct
import time
from collections import namedtuple
from random import shuffle, random
log = core.getLogger()
class LLDPSender (object):
"""
Sends out discovery packets
"""
SendItem = namedtuple("LLDPSenderItem", ('dpid','port_num','packet'))
#NOTE: This class keeps the packets to send in a flat list, which makes
# adding/removing them on switch join/leave or (especially) port
# status changes relatively expensive. Could easily be improved.
# Maximum times to run the timer per second
_sends_per_sec = 15
def __init__ (self, send_cycle_time, ttl = 120):
"""
Initialize an LLDP packet sender
send_cycle_time is the time (in seconds) that this sender will take to
send every discovery packet. Thus, it should be the link timeout
interval at most.
ttl is the time (in seconds) for which a receiving LLDP agent should
consider the rest of the data to be valid. We don't use this, but
other LLDP agents might. Can't be 0 (this means revoke).
"""
# Packets remaining to be sent in this cycle
self._this_cycle = []
# Packets we've already sent in this cycle
self._next_cycle = []
# Packets to send in a batch
self._send_chunk_size = 1
self._timer = None
self._ttl = ttl
self._send_cycle_time = send_cycle_time
core.listen_to_dependencies(self)
def _handle_openflow_PortStatus (self, event):
"""
Track changes to switch ports
"""
if event.added:
self.add_port(event.dpid, event.port, event.ofp.desc.hw_addr)
elif event.deleted:
self.del_port(event.dpid, event.port)
def _handle_openflow_ConnectionUp (self, event):
self.del_switch(event.dpid, set_timer = False)
ports = [(p.port_no, p.hw_addr) for p in event.ofp.ports]
for port_num, port_addr in ports:
self.add_port(event.dpid, port_num, port_addr, set_timer = False)
self._set_timer()
def _handle_openflow_ConnectionDown (self, event):
self.del_switch(event.dpid)
def del_switch (self, dpid, set_timer = True):
self._this_cycle = [p for p in self._this_cycle if p.dpid != dpid]
self._next_cycle = [p for p in self._next_cycle if p.dpid != dpid]
if set_timer: self._set_timer()
def del_port (self, dpid, port_num, set_timer = True):
if port_num > of.OFPP_MAX: return
self._this_cycle = [p for p in self._this_cycle
if p.dpid != dpid or p.port_num != port_num]
self._next_cycle = [p for p in self._next_cycle
if p.dpid != dpid or p.port_num != port_num]
if set_timer: self._set_timer()
def add_port (self, dpid, port_num, port_addr, set_timer = True):
if port_num > of.OFPP_MAX: return
self.del_port(dpid, port_num, set_timer = False)
self._next_cycle.append(LLDPSender.SendItem(dpid, port_num,
self.create_discovery_packet(dpid, port_num, port_addr)))
if set_timer: self._set_timer()
def _set_timer (self):
if self._timer: self._timer.cancel()
self._timer = None
num_packets = len(self._this_cycle) + len(self._next_cycle)
if num_packets == 0: return
self._send_chunk_size = 1 # One at a time
interval = self._send_cycle_time / float(num_packets)
if interval < 1.0 / self._sends_per_sec:
# Would require too many sends per sec -- send more than one at once
interval = 1.0 / self._sends_per_sec
chunk = float(num_packets) / self._send_cycle_time / self._sends_per_sec
self._send_chunk_size = chunk
self._timer = Timer(interval,
self._timer_handler, recurring=True)
def _timer_handler (self):
"""
Called by a timer to actually send packets.
Picks the first packet off this cycle's list, sends it, and then puts
it on the next-cycle list. When this cycle's list is empty, starts
the next cycle.
"""
num = int(self._send_chunk_size)
fpart = self._send_chunk_size - num
if random() < fpart: num += 1
for _ in range(num):
if len(self._this_cycle) == 0:
self._this_cycle = self._next_cycle
self._next_cycle = []
#shuffle(self._this_cycle)
item = self._this_cycle.pop(0)
self._next_cycle.append(item)
core.openflow.sendToDPID(item.dpid, item.packet)
def create_discovery_packet (self, dpid, port_num, port_addr):
"""
Build discovery packet
"""
chassis_id = pkt.chassis_id(subtype=pkt.chassis_id.SUB_LOCAL)
chassis_id.id = bytes('dpid:' + hex(long(dpid))[2:-1])
# Maybe this should be a MAC. But a MAC of what? Local port, maybe?
port_id = pkt.port_id(subtype=pkt.port_id.SUB_PORT, id=str(port_num))
ttl = pkt.ttl(ttl = self._ttl)
sysdesc = pkt.system_description()
sysdesc.payload = bytes('dpid:' + hex(long(dpid))[2:-1])
discovery_packet = pkt.lldp()
discovery_packet.tlvs.append(chassis_id)
discovery_packet.tlvs.append(port_id)
discovery_packet.tlvs.append(ttl)
discovery_packet.tlvs.append(sysdesc)
discovery_packet.tlvs.append(pkt.end_tlv())
eth = pkt.ethernet(type=pkt.ethernet.LLDP_TYPE)
eth.src = port_addr
eth.dst = pkt.ETHERNET.NDP_MULTICAST
eth.payload = discovery_packet
po = of.ofp_packet_out(action = of.ofp_action_output(port=port_num))
po.data = eth.pack()
return po.pack()
class LinkEvent (Event):
"""
Link up/down event
"""
def __init__ (self, add, link):
Event.__init__(self)
self.link = link
self.added = add
self.removed = not add
def port_for_dpid (self, dpid):
if self.link.dpid1 == dpid:
return self.link.port1
if self.link.dpid2 == dpid:
return self.link.port2
return None
class Link (namedtuple("LinkBase",("dpid1","port1","dpid2","port2"))):
@property
def uni (self):
"""
Returns a "unidirectional" version of this link
The unidirectional versions of symmetric keys will be equal
"""
pairs = list(self.end)
pairs.sort()
return Link(pairs[0][0],pairs[0][1],pairs[1][0],pairs[1][1])
@property
def end (self):
return ((self[0],self[1]),(self[2],self[3]))
def __str__ (self):
return "%s.%s -> %s.%s" % (dpid_to_str(self[0]),self[1],
dpid_to_str(self[2]),self[3])
def __repr__ (self):
return "Link(dpid1=%s,port1=%s, dpid2=%s,port2=%s)" % (self.dpid1,
self.port1, self.dpid2, self.port2)
class Discovery (EventMixin):
"""
Component that attempts to discover network toplogy.
Sends out specially-crafted LLDP packets, and monitors their arrival.
"""
_flow_priority = 65000 # Priority of LLDP-catching flow (if any)
_link_timeout = 10 # How long until we consider a link dead
_timeout_check_period = 5 # How often to check for timeouts
_eventMixin_events = set([
LinkEvent,
])
_core_name = "openflow_discovery" # we want to be core.openflow_discovery
Link = Link
def __init__ (self, install_flow = True, explicit_drop = True,
link_timeout = None, eat_early_packets = False):
self._eat_early_packets = eat_early_packets
self._explicit_drop = explicit_drop
self._install_flow = install_flow
if link_timeout: self._link_timeout = link_timeout
self.adjacency = {} # From Link to time.time() stamp
self._sender = LLDPSender(self.send_cycle_time)
# Listen with a high priority (mostly so we get PacketIns early)
core.listen_to_dependencies(self,
listen_args={'openflow':{'priority':0xffffffff}})
Timer(self._timeout_check_period, self._expire_links, recurring=True)
@property
def send_cycle_time (self):
return self._link_timeout / 10.0
def install_flow (self, con_or_dpid, priority = None):
if priority is None:
priority = self._flow_priority
if isinstance(con_or_dpid, (int,long)):
con = core.openflow.connections.get(con_or_dpid)
if con is None:
log.warn("Can't install flow for %s", dpid_to_str(con_or_dpid))
return False
else:
con = con_or_dpid
match = of.ofp_match(dl_type = pkt.ethernet.LLDP_TYPE,
dl_dst = pkt.ETHERNET.NDP_MULTICAST)
msg = of.ofp_flow_mod()
msg.priority = priority
msg.match = match
msg.actions.append(of.ofp_action_output(port = of.OFPP_CONTROLLER))
con.send(msg)
return True
def _handle_openflow_ConnectionUp (self, event):
if self._install_flow:
# Make sure we get appropriate traffic
log.debug("Installing flow for %s", dpid_to_str(event.dpid))
self.install_flow(event.connection)
def _handle_openflow_ConnectionDown (self, event):
# Delete all links on this switch
self._delete_links([link for link in self.adjacency
if link.dpid1 == event.dpid
or link.dpid2 == event.dpid])
def _expire_links (self):
"""
Remove apparently dead links
"""
now = time.time()
expired = [link for link,timestamp in self.adjacency.iteritems()
if timestamp + self._link_timeout < now]
if expired:
for link in expired:
log.info('link timeout: %s', link)
self._delete_links(expired)
def _handle_openflow_PacketIn (self, event):
"""
Receive and process LLDP packets
"""
packet = event.parsed
if (packet.effective_ethertype != pkt.ethernet.LLDP_TYPE
or packet.dst != pkt.ETHERNET.NDP_MULTICAST):
if not self._eat_early_packets: return
if not event.connection.connect_time: return
enable_time = time.time() - self.send_cycle_time - 1
if event.connection.connect_time > enable_time:
return EventHalt
return
if self._explicit_drop:
if event.ofp.buffer_id is not None:
log.debug("Dropping LLDP packet %i", event.ofp.buffer_id)
msg = of.ofp_packet_out()
msg.buffer_id = event.ofp.buffer_id
msg.in_port = event.port
event.connection.send(msg)
lldph = packet.find(pkt.lldp)
if lldph is None or not lldph.parsed:
log.error("LLDP packet could not be parsed")
return EventHalt
if len(lldph.tlvs) < 3:
log.error("LLDP packet without required three TLVs")
return EventHalt
if lldph.tlvs[0].tlv_type != pkt.lldp.CHASSIS_ID_TLV:
log.error("LLDP packet TLV 1 not CHASSIS_ID")
return EventHalt
if lldph.tlvs[1].tlv_type != pkt.lldp.PORT_ID_TLV:
log.error("LLDP packet TLV 2 not PORT_ID")
return EventHalt
if lldph.tlvs[2].tlv_type != pkt.lldp.TTL_TLV:
log.error("LLDP packet TLV 3 not TTL")
return EventHalt
def lookInSysDesc ():
r = None
for t in lldph.tlvs[3:]:
if t.tlv_type == pkt.lldp.SYSTEM_DESC_TLV:
# This is our favored way...
for line in t.payload.split('\n'):
if line.startswith('dpid:'):
try:
return int(line[5:], 16)
except:
pass
if len(t.payload) == 8:
# Maybe it's a FlowVisor LLDP...
# Do these still exist?
try:
return struct.unpack("!Q", t.payload)[0]
except:
pass
return None
originatorDPID = lookInSysDesc()
if originatorDPID == None:
# We'll look in the CHASSIS ID
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_LOCAL:
if lldph.tlvs[0].id.startswith('dpid:'):
# This is how NOX does it at the time of writing
try:
originatorDPID = int(lldph.tlvs[0].id[5:], 16)
except:
pass
if originatorDPID == None:
if lldph.tlvs[0].subtype == pkt.chassis_id.SUB_MAC:
# Last ditch effort -- we'll hope the DPID was small enough
# to fit into an ethernet address
if len(lldph.tlvs[0].id) == 6:
try:
s = lldph.tlvs[0].id
originatorDPID = struct.unpack("!Q",'\x00\x00' + s)[0]
except:
pass
if originatorDPID == None:
log.warning("Couldn't find a DPID in the LLDP packet")
return EventHalt
if originatorDPID not in core.openflow.connections:
log.info('Received LLDP packet from unknown switch')
return EventHalt
# Get port number from port TLV
if lldph.tlvs[1].subtype != pkt.port_id.SUB_PORT:
log.warning("Thought we found a DPID, but packet didn't have a port")
return EventHalt
originatorPort = None
if lldph.tlvs[1].id.isdigit():
# We expect it to be a decimal value
originatorPort = int(lldph.tlvs[1].id)
elif len(lldph.tlvs[1].id) == 2:
# Maybe it's a 16 bit port number...
try:
originatorPort = struct.unpack("!H", lldph.tlvs[1].id)[0]
except:
pass
if originatorPort is None:
log.warning("Thought we found a DPID, but port number didn't " +
"make sense")
return EventHalt
if (event.dpid, event.port) == (originatorDPID, originatorPort):
log.warning("Port received its own LLDP packet; ignoring")
return EventHalt
link = Discovery.Link(originatorDPID, originatorPort, event.dpid,
event.port)
if link not in self.adjacency:
self.adjacency[link] = time.time()
log.info('link detected: %s', link)
self.raiseEventNoErrors(LinkEvent, True, link)
else:
# Just update timestamp
self.adjacency[link] = time.time()
return EventHalt # Probably nobody else needs this event
def _delete_links (self, links):
for link in links:
self.raiseEventNoErrors(LinkEvent, False, link)
for link in links:
self.adjacency.pop(link, None)
def is_edge_port (self, dpid, port):
"""
Return True if given port does not connect to another switch
"""
for link in self.adjacency:
if link.dpid1 == dpid and link.port1 == port:
return False
if link.dpid2 == dpid and link.port2 == port:
return False
return True
def launch (no_flow = False, explicit_drop = True, link_timeout = None,
eat_early_packets = False):
explicit_drop = str_to_bool(explicit_drop)
eat_early_packets = str_to_bool(eat_early_packets)
install_flow = not str_to_bool(no_flow)
if link_timeout: link_timeout = int(link_timeout)
core.registerNew(Discovery, explicit_drop=explicit_drop,
install_flow=install_flow, link_timeout=link_timeout,
eat_early_packets=eat_early_packets)
|
40123148/w17b
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/this.py
|
948
|
s = """Gur Mra bs Clguba, ol Gvz Crgref
Ornhgvshy vf orggre guna htyl.
Rkcyvpvg vf orggre guna vzcyvpvg.
Fvzcyr vf orggre guna pbzcyrk.
Pbzcyrk vf orggre guna pbzcyvpngrq.
Syng vf orggre guna arfgrq.
Fcnefr vf orggre guna qrafr.
Ernqnovyvgl pbhagf.
Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf.
Nygubhtu cenpgvpnyvgl orngf chevgl.
Reebef fubhyq arire cnff fvyragyl.
Hayrff rkcyvpvgyl fvyraprq.
Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff.
Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg.
Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu.
Abj vf orggre guna arire.
Nygubhtu arire vf bsgra orggre guna *evtug* abj.
Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn.
Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn.
Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!"""
d = {}
for c in (65, 97):
for i in range(26):
d[chr(i+c)] = chr((i+13) % 26 + c)
print("".join([d.get(c, c) for c in s]))
|
Distrotech/scons
|
refs/heads/distrotech-scons
|
src/engine/SCons/Options/EnumOption.py
|
6
|
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def EnumOption(*args, **kw):
global warned
if not warned:
msg = "The EnumOption() function is deprecated; use the EnumVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return SCons.Variables.EnumVariable(*args, **kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
fargalaxy1/geonode-wagtail
|
refs/heads/master
|
geonode/layers/management/commands/importlayers.py
|
13
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from optparse import make_option
from geonode.layers.utils import upload
from geonode.people.utils import get_valid_user
import traceback
import datetime
class Command(BaseCommand):
help = ("Brings a data file or a directory full of data files into a"
" GeoNode site. Layers are added to the Django database, the"
" GeoServer configuration, and the pycsw metadata index.")
args = 'path [path...]'
option_list = BaseCommand.option_list + (
make_option(
'-u',
'--user',
dest="user",
default=None,
help="Name of the user account which should own the imported layers"),
make_option(
'-i',
'--ignore-errors',
action='store_true',
dest='ignore_errors',
default=False,
help='Stop after any errors are encountered.'),
make_option(
'-o',
'--overwrite',
dest='overwrite',
default=False,
action="store_true",
help="Overwrite existing layers if discovered (defaults False)"),
make_option(
'-k',
'--keywords',
dest='keywords',
default="",
help="""The default keywords, separated by comma, for the
imported layer(s). Will be the same for all imported layers
if multiple imports are done in one command"""
),
make_option(
'-c',
'--category',
dest='category',
default=None,
help="""The category for the
imported layer(s). Will be the same for all imported layers
if multiple imports are done in one command"""
),
make_option(
'-r',
'--regions',
dest='regions',
default="",
help="""The default regions, separated by comma, for the
imported layer(s). Will be the same for all imported layers
if multiple imports are done in one command"""
),
make_option(
'-t',
'--title',
dest='title',
default=None,
help="""The title for the
imported layer(s). Will be the same for all imported layers
if multiple imports are done in one command"""
),
make_option(
'-d',
'--date',
dest='date',
default=None,
help=('The date and time for the imported layer(s). Will be the '
'same for all imported layers if multiple imports are done '
'in one command. Use quotes to specify both the date and '
'time in the format \'YYYY-MM-DD HH:MM:SS\'.')
),
make_option(
'-p',
'--private',
dest='private',
default=False,
action="store_true",
help="Make layer viewable only to owner"
),
make_option(
'-m',
'--metadata_uploaded_preserve',
dest='metadata_uploaded_preserve',
default=False,
action="store_true",
help="Force metadata XML to be preserved"
)
)
def handle(self, *args, **options):
verbosity = int(options.get('verbosity'))
# ignore_errors = options.get('ignore_errors')
username = options.get('user')
user = get_valid_user(username)
overwrite = options.get('overwrite')
category = options.get('category', None)
private = options.get('private', False)
title = options.get('title', None)
date = options.get('date', None)
metadata_uploaded_preserve = options.get('metadata_uploaded_preserve',
False)
if verbosity > 0:
console = self.stdout
else:
console = None
if overwrite:
skip = False
else:
skip = True
keywords = options.get('keywords').split(',')
if len(keywords) == 1 and keywords[0] == '':
keywords = []
else:
keywords = map(str.strip, keywords)
regions = options.get('regions').split(',')
if len(regions) == 1 and regions[0] == '':
regions = []
else:
regions = map(str.strip, regions)
start = datetime.datetime.now()
output = []
for path in args:
out = upload(
path,
user=user,
overwrite=overwrite,
skip=skip,
keywords=keywords,
verbosity=verbosity,
console=console,
category=category,
regions=regions,
title=title,
date=date,
private=private,
metadata_uploaded_preserve=metadata_uploaded_preserve)
output.extend(out)
updated = [dict_['file']
for dict_ in output if dict_['status'] == 'updated']
created = [dict_['file']
for dict_ in output if dict_['status'] == 'created']
skipped = [dict_['file']
for dict_ in output if dict_['status'] == 'skipped']
failed = [dict_['file']
for dict_ in output if dict_['status'] == 'failed']
finish = datetime.datetime.now()
td = finish - start
duration = td.microseconds / 1000000 + td.seconds + td.days * 24 * 3600
duration_rounded = round(duration, 2)
if verbosity > 1:
print "\nDetailed report of failures:"
for dict_ in output:
if dict_['status'] == 'failed':
print "\n\n", dict_['file'], "\n================"
traceback.print_exception(dict_['exception_type'],
dict_['error'],
dict_['traceback'])
if verbosity > 0:
print "\n\nFinished processing %d layers in %s seconds.\n" % (
len(output), duration_rounded)
print "%d Created layers" % len(created)
print "%d Updated layers" % len(updated)
print "%d Skipped layers" % len(skipped)
print "%d Failed layers" % len(failed)
if len(output) > 0:
print "%f seconds per layer" % (duration * 1.0 / len(output))
|
wkoathp/glance
|
refs/heads/master
|
glance/tests/functional/test_bin_glance_cache_manage.py
|
5
|
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Functional test case that utilizes the bin/glance-cache-manage CLI tool"""
import datetime
import hashlib
import os
import sys
import httplib2
from oslo.serialization import jsonutils
from oslo_utils import units
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
from six.moves import range
from glance.tests import functional
from glance.tests.utils import execute
from glance.tests.utils import minimal_headers
FIVE_KB = 5 * units.Ki
class TestBinGlanceCacheManage(functional.FunctionalTest):
"""Functional tests for the bin/glance CLI tool"""
def setUp(self):
self.image_cache_driver = "sqlite"
super(TestBinGlanceCacheManage, self).setUp()
self.api_server.deployment_flavor = "cachemanagement"
# NOTE(sirp): This is needed in case we are running the tests under an
# environment in which OS_AUTH_STRATEGY=keystone. The test server we
# spin up won't have keystone support, so we need to switch to the
# NoAuth strategy.
os.environ['OS_AUTH_STRATEGY'] = 'noauth'
os.environ['OS_AUTH_URL'] = ''
def add_image(self, name):
"""
Adds an image with supplied name and returns the newly-created
image identifier.
"""
image_data = "*" * FIVE_KB
headers = minimal_headers(name)
path = "http://%s:%d/v1/images" % ("127.0.0.1", self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'POST', headers=headers,
body=image_data)
self.assertEqual(201, response.status)
data = jsonutils.loads(content)
self.assertEqual(hashlib.md5(image_data).hexdigest(),
data['image']['checksum'])
self.assertEqual(FIVE_KB, data['image']['size'])
self.assertEqual(name, data['image']['name'])
self.assertTrue(data['image']['is_public'])
return data['image']['id']
def is_image_cached(self, image_id):
"""
Return True if supplied image ID is cached, False otherwise
"""
exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable
cmd = "%s --port=%d list-cached" % (exe_cmd, self.api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
return image_id in out
def iso_date(self, image_id):
"""
Return True if supplied image ID is cached, False otherwise
"""
exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable
cmd = "%s --port=%d list-cached" % (exe_cmd, self.api_port)
exitcode, out, err = execute(cmd)
return datetime.datetime.utcnow().strftime("%Y-%m-%d") in out
def test_no_cache_enabled(self):
"""
Test that cache index command works
"""
self.cleanup()
self.api_server.deployment_flavor = ''
self.start_servers() # Not passing in cache_manage in pipeline...
api_port = self.api_port
# Verify decent error message returned
exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable
cmd = "%s --port=%d list-cached" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(1, exitcode)
self.assertTrue('Cache management middleware not enabled on host'
in out.strip())
self.stop_servers()
def test_cache_index(self):
"""
Test that cache index command works
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
# Verify no cached images
exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable
cmd = "%s --port=%d list-cached" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue('No cached images' in out.strip())
ids = {}
# Add a few images and cache the second one of them
# by GETing the image...
for x in range(4):
ids[x] = self.add_image("Image%s" % x)
path = "http://%s:%d/v1/images/%s" % ("127.0.0.1", api_port,
ids[1])
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
self.assertTrue(self.is_image_cached(ids[1]),
"%s is not cached." % ids[1])
self.assertTrue(self.iso_date(ids[1]))
self.stop_servers()
def test_queue(self):
"""
Test that we can queue and fetch images using the
CLI utility
"""
self.cleanup()
self.start_servers(**self.__dict__.copy())
api_port = self.api_port
# Verify no cached images
exe_cmd = '%s -m glance.cmd.cache_manage' % sys.executable
cmd = "%s --port=%d list-cached" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue('No cached images' in out.strip())
# Verify no queued images
cmd = "%s --port=%d list-queued" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue('No queued images' in out.strip())
ids = {}
# Add a few images and cache the second one of them
# by GETing the image...
for x in range(4):
ids[x] = self.add_image("Image%s" % x)
# Queue second image and then cache it
cmd = "%s --port=%d --force queue-image %s" % (
exe_cmd, api_port, ids[1])
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
# Verify queued second image
cmd = "%s --port=%d list-queued" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(ids[1] in out, 'Image %s was not queued!' % ids[1])
# Cache images in the queue by running the prefetcher
cache_config_filepath = os.path.join(self.test_dir, 'etc',
'glance-cache.conf')
cache_file_options = {
'image_cache_dir': self.api_server.image_cache_dir,
'image_cache_driver': self.image_cache_driver,
'registry_port': self.registry_server.bind_port,
'log_file': os.path.join(self.test_dir, 'cache.log'),
'metadata_encryption_key': "012345678901234567890123456789ab"
}
with open(cache_config_filepath, 'w') as cache_file:
cache_file.write("""[DEFAULT]
debug = True
verbose = True
image_cache_dir = %(image_cache_dir)s
image_cache_driver = %(image_cache_driver)s
registry_host = 127.0.0.1
registry_port = %(registry_port)s
metadata_encryption_key = %(metadata_encryption_key)s
log_file = %(log_file)s
""" % cache_file_options)
cmd = ("%s -m glance.cmd.cache_prefetcher --config-file %s" %
(sys.executable, cache_config_filepath))
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertEqual('', out.strip(), out)
# Verify no queued images
cmd = "%s --port=%d list-queued" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue('No queued images' in out.strip())
# Verify second image now cached
cmd = "%s --port=%d list-cached" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(ids[1] in out, 'Image %s was not cached!' % ids[1])
# Queue third image and then delete it from queue
cmd = "%s --port=%d --force queue-image %s" % (
exe_cmd, api_port, ids[2])
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
# Verify queued third image
cmd = "%s --port=%d list-queued" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue(ids[2] in out, 'Image %s was not queued!' % ids[2])
# Delete the image from the queue
cmd = ("%s --port=%d --force "
"delete-queued-image %s") % (exe_cmd, api_port, ids[2])
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
# Verify no queued images
cmd = "%s --port=%d list-queued" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue('No queued images' in out.strip())
# Queue all images
for x in range(4):
cmd = ("%s --port=%d --force "
"queue-image %s") % (exe_cmd, api_port, ids[x])
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
# Verify queued third image
cmd = "%s --port=%d list-queued" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue('Found 3 queued images' in out)
# Delete the image from the queue
cmd = ("%s --port=%d --force "
"delete-all-queued-images") % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
# Verify nothing in queue anymore
cmd = "%s --port=%d list-queued" % (exe_cmd, api_port)
exitcode, out, err = execute(cmd)
self.assertEqual(0, exitcode)
self.assertTrue('No queued images' in out.strip())
# verify two image id when queue-image
cmd = ("%s --port=%d --force "
"queue-image %s %s") % (exe_cmd, api_port, ids[0], ids[1])
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(1, exitcode)
self.assertTrue('Please specify one and only ID of '
'the image you wish to ' in out.strip())
# verify two image id when delete-queued-image
cmd = ("%s --port=%d --force delete-queued-image "
"%s %s") % (exe_cmd, api_port, ids[0], ids[1])
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(1, exitcode)
self.assertTrue('Please specify one and only ID of '
'the image you wish to ' in out.strip())
# verify two image id when delete-cached-image
cmd = ("%s --port=%d --force delete-cached-image "
"%s %s") % (exe_cmd, api_port, ids[0], ids[1])
exitcode, out, err = execute(cmd, raise_error=False)
self.assertEqual(1, exitcode)
self.assertTrue('Please specify one and only ID of '
'the image you wish to ' in out.strip())
self.stop_servers()
|
cschenck/blender_sim
|
refs/heads/master
|
fluid_sim_deps/blender-2.69/2.69/python/lib/python3.3/token.py
|
743
|
"""Token constants (from "token.h")."""
__all__ = ['tok_name', 'ISTERMINAL', 'ISNONTERMINAL', 'ISEOF']
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
LBRACE = 25
RBRACE = 26
EQEQUAL = 27
NOTEQUAL = 28
LESSEQUAL = 29
GREATEREQUAL = 30
TILDE = 31
CIRCUMFLEX = 32
LEFTSHIFT = 33
RIGHTSHIFT = 34
DOUBLESTAR = 35
PLUSEQUAL = 36
MINEQUAL = 37
STAREQUAL = 38
SLASHEQUAL = 39
PERCENTEQUAL = 40
AMPEREQUAL = 41
VBAREQUAL = 42
CIRCUMFLEXEQUAL = 43
LEFTSHIFTEQUAL = 44
RIGHTSHIFTEQUAL = 45
DOUBLESTAREQUAL = 46
DOUBLESLASH = 47
DOUBLESLASHEQUAL = 48
AT = 49
RARROW = 50
ELLIPSIS = 51
OP = 52
ERRORTOKEN = 53
N_TOKENS = 54
NT_OFFSET = 256
#--end constants--
tok_name = {value: name
for name, value in globals().items()
if isinstance(value, int) and not name.startswith('_')}
__all__.extend(tok_name.values())
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def _main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError as err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = sorted(tokens.keys())
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError as err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
_main()
|
joebowen/landing_zone_project
|
refs/heads/master
|
simple_plot.py
|
1
|
import numpy as np
from matplotlib import pyplot as plt
import orhelper
with orhelper.OpenRocketInstance('/home/jbbowen/Desktop/OpenRocket Stuff/OpenRocket-15.03.jar'):
orh = orhelper.Helper()
# Load document, run simulation and get data and events
doc = orh.load_doc('estes_gnome.ork')
sim = doc.getSimulation(0)
orh.run_simulation(sim)
data = orh.get_timeseries(sim, ['Time', 'Altitude', 'Vertical velocity'] )
events = orh.get_events(sim)
# Make a custom plot the simulation
events_to_annotate = ['Motor burnout', 'Apogee', 'Launch rod clearance']
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(data['Time'], data['Altitude'], 'b-')
ax2.plot(data['Time'], data['Vertical velocity'], 'r-')
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Altitude (m)', color='b')
ax2.set_ylabel('Vertical Velocity (m/s)', color='r')
change_color = lambda ax, col : [x.set_color(col) for x in ax.get_yticklabels()]
change_color(ax1, 'b')
change_color(ax2, 'r')
index_at = lambda t : (np.abs(data['Time']-t)).argmin()
for name, time in events.items():
if not name in events_to_annotate: continue
ax1.annotate(name, xy=(time, data['Altitude'][index_at(time)] ), xycoords='data',
xytext=(20, 0), textcoords='offset points',
arrowprops=dict(arrowstyle="->", connectionstyle="arc3")
)
ax1.grid(True)
plt.show()
|
dvitme/odoo-kinesis-athletics
|
refs/heads/master
|
addons/kinesis_athletics_impex_xls/__init__.py
|
2
|
# -*- coding: utf-8 -*-
import report
import wizard
import evaluation
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
spark0001/spark2.1.1
|
refs/heads/master
|
examples/src/main/python/ml/vector_slicer_example.py
|
123
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import VectorSlicer
from pyspark.ml.linalg import Vectors
from pyspark.sql.types import Row
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("VectorSlicerExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
Row(userFeatures=Vectors.sparse(3, {0: -2.0, 1: 2.3})),
Row(userFeatures=Vectors.dense([-2.0, 2.3, 0.0]))])
slicer = VectorSlicer(inputCol="userFeatures", outputCol="features", indices=[1])
output = slicer.transform(df)
output.select("userFeatures", "features").show()
# $example off$
spark.stop()
|
heyglen/MockBlackBox
|
refs/heads/master
|
test/test_device_protocol.py
|
2
|
# -*- coding: utf-8 -*-
from mock_terminal import MockTerminal
from mockblackbox import DeviceConfiguration, DeviceProtocol
from nose.tools import assert_equals, assert_false
from examples import sample_configuration
class TestDeviceProtocol(object):
def setup(self):
self.terminal = MockTerminal()
self.configuration = DeviceConfiguration.get(sample_configuration, is_file_path=False)
def test_default_initialization(self):
protocol = DeviceProtocol(self.terminal, self.configuration)
assert_equals(protocol._login_banner, DeviceProtocol._default_login_banner)
assert_equals(protocol._logout_banner, DeviceProtocol._default_logout_banner)
assert_equals(protocol.context_manager.current.name, DeviceProtocol._default_context)
assert_equals(protocol.context_manager.current.prompt, DeviceProtocol._default_prompt)
def test_initialization(self):
login = 'login01'
logout = 'logout01'
context = 'ctx01'
prompt = 'prompt01'
self.configuration['login_banner'] = login
self.configuration['logout_banner'] = logout
self.configuration['context'] = context
self.configuration['prompt'] = prompt
protocol = DeviceProtocol(self.terminal, self.configuration)
assert_equals(protocol._login_banner, login)
assert_equals(protocol._logout_banner, logout)
assert_equals(protocol.context_manager.current.name, context)
assert_equals(protocol.context_manager.current.prompt, prompt)
assert_equals(id(protocol.terminal), id(self.terminal))
def test_connect(self):
terminal = self.terminal
protocol = DeviceProtocol(self.terminal, self.configuration)
protocol.connect()
assert_equals(terminal.output[-2], DeviceProtocol._default_login_banner)
assert_equals(terminal.output[-1], DeviceProtocol._default_prompt)
assert_equals(len(terminal.output), 2)
def test_logout_on_empty_context(self):
protocol = DeviceProtocol(self.terminal, self.configuration)
protocol.command('exit')
assert_false(self.terminal.connected)
def test_logout(self):
terminal = self.terminal
protocol = DeviceProtocol(self.terminal, self.configuration)
protocol._logout()
assert_equals(terminal.output[-1], DeviceProtocol._default_logout_banner)
assert_false(terminal.connected)
|
aldian/tensorflow
|
refs/heads/master
|
tensorflow/python/training/saver_large_partitioned_variable_test.py
|
141
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for tensorflow.python.training.saver.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
class SaverLargePartitionedVariableTest(test.TestCase):
# Need to do this in a separate test because of the amount of memory needed
# to run this test.
def testLargePartitionedVariables(self):
save_path = os.path.join(self.get_temp_dir(), "large_variable")
var_name = "my_var"
# Saving large partition variable.
with session.Session("", graph=ops.Graph()) as sess:
with ops.device("/cpu:0"):
# Create a partitioned variable which is larger than int32 size but
# split into smaller sized variables.
init = lambda shape, dtype, partition_info: constant_op.constant(
True, dtype, shape)
partitioned_var = partitioned_variables.create_partitioned_variables(
[1 << 31], [4], init, dtype=dtypes.bool, name=var_name)
variables.global_variables_initializer().run()
save = saver.Saver(partitioned_var)
val = save.save(sess, save_path)
self.assertEqual(save_path, val)
if __name__ == "__main__":
test.main()
|
mdaniel/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyCompatibilityInspection/unpackingInNonParenthesizedTuplesInReturnAndYield.py
|
10
|
def gen(xs):
yield 42, <warning descr="Python version 2.7 does not support starred expressions in tuples, lists, and sets"><warning descr="Python versions 3.5, 3.6, 3.7 do not support unpacking without parentheses in yield statements">*xs</warning></warning>
<warning descr="Python version 2.7 does not support this syntax. Delegating to a subgenerator is available since Python 3.3; use explicit iteration over subgenerator instead.">yield from 42, <error descr="Can't use starred expression here"><warning descr="Python version 2.7 does not support starred expressions in tuples, lists, and sets">*xs</warning></error></warning>
def func(xs):
return 42, <warning descr="Python version 2.7 does not support starred expressions in tuples, lists, and sets"><warning descr="Python versions 3.5, 3.6, 3.7 do not support unpacking without parentheses in return statements">*xs</warning></warning>
|
arghasen/Artificial_Intelligience
|
refs/heads/master
|
tutorial/projectParams.py
|
5
|
# projectParams.py
# ----------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
STUDENT_CODE_DEFAULT = 'addition.py,buyLotsOfFruit.py,shopSmart.py'
PROJECT_TEST_CLASSES = 'tutorialTestClasses.py'
PROJECT_NAME = 'Project 0: Tutorial'
BONUS_PIC = False
|
ixiom/phantomjs
|
refs/heads/master
|
src/qt/qtwebkit/Tools/QueueStatusServer/loggers/__init__.py
|
6014
|
# Required for Python to search this directory for module files
|
dimagol/trex-core
|
refs/heads/master
|
scripts/external_libs/urllib3/urllib3/__init__.py
|
7
|
"""
urllib3 - Thread-safe connection pooling and re-using.
"""
from __future__ import absolute_import
import warnings
from .connectionpool import (
HTTPConnectionPool,
HTTPSConnectionPool,
connection_from_url
)
from . import exceptions
from .filepost import encode_multipart_formdata
from .poolmanager import PoolManager, ProxyManager, proxy_from_url
from .response import HTTPResponse
from .util.request import make_headers
from .util.url import get_host
from .util.timeout import Timeout
from .util.retry import Retry
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
__author__ = 'Andrey Petrov (andrey.petrov@shazow.net)'
__license__ = 'MIT'
__version__ = 'dev'
__all__ = (
'HTTPConnectionPool',
'HTTPSConnectionPool',
'PoolManager',
'ProxyManager',
'HTTPResponse',
'Retry',
'Timeout',
'add_stderr_logger',
'connection_from_url',
'disable_warnings',
'encode_multipart_formdata',
'get_host',
'make_headers',
'proxy_from_url',
)
logging.getLogger(__name__).addHandler(NullHandler())
def add_stderr_logger(level=logging.DEBUG):
"""
Helper for quickly adding a StreamHandler to the logger. Useful for
debugging.
Returns the handler after adding it.
"""
# This method needs to be in this __init__.py to get the __name__ correct
# even if urllib3 is vendored within another package.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s'))
logger.addHandler(handler)
logger.setLevel(level)
logger.debug('Added a stderr logging handler to logger: %s', __name__)
return handler
# ... Clean up.
del NullHandler
# All warning filters *must* be appended unless you're really certain that they
# shouldn't be: otherwise, it's very hard for users to use most Python
# mechanisms to silence them.
# SecurityWarning's always go off by default.
warnings.simplefilter('always', exceptions.SecurityWarning, append=True)
# SubjectAltNameWarning's should go off once per host
warnings.simplefilter('default', exceptions.SubjectAltNameWarning, append=True)
# InsecurePlatformWarning's don't vary between requests, so we keep it default.
warnings.simplefilter('default', exceptions.InsecurePlatformWarning,
append=True)
# SNIMissingWarnings should go off only once.
warnings.simplefilter('default', exceptions.SNIMissingWarning, append=True)
def disable_warnings(category=exceptions.HTTPWarning):
"""
Helper for quickly disabling all urllib3 warnings.
"""
warnings.simplefilter('ignore', category)
|
ccnmtl/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.3/tests/regressiontests/get_or_create_regress/tests.py
|
88
|
from django.test import TestCase
from models import Author, Publisher
class GetOrCreateTests(TestCase):
def test_related(self):
p = Publisher.objects.create(name="Acme Publishing")
# Create a book through the publisher.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertTrue(created)
# The publisher should have one book.
self.assertEqual(p.books.count(), 1)
# Try get_or_create again, this time nothing should be created.
book, created = p.books.get_or_create(name="The Book of Ed & Fred")
self.assertFalse(created)
# And the publisher should still have one book.
self.assertEqual(p.books.count(), 1)
# Add an author to the book.
ed, created = book.authors.get_or_create(name="Ed")
self.assertTrue(created)
# The book should have one author.
self.assertEqual(book.authors.count(), 1)
# Try get_or_create again, this time nothing should be created.
ed, created = book.authors.get_or_create(name="Ed")
self.assertFalse(created)
# And the book should still have one author.
self.assertEqual(book.authors.count(), 1)
# Add a second author to the book.
fred, created = book.authors.get_or_create(name="Fred")
self.assertTrue(created)
# The book should have two authors now.
self.assertEqual(book.authors.count(), 2)
# Create an Author not tied to any books.
Author.objects.create(name="Ted")
# There should be three Authors in total. The book object should have two.
self.assertEqual(Author.objects.count(), 3)
self.assertEqual(book.authors.count(), 2)
# Try creating a book through an author.
_, created = ed.books.get_or_create(name="Ed's Recipes", publisher=p)
self.assertTrue(created)
# Now Ed has two Books, Fred just one.
self.assertEqual(ed.books.count(), 2)
self.assertEqual(fred.books.count(), 1)
# Use the publisher's primary key value instead of a model instance.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertTrue(created)
# Try get_or_create again, this time nothing should be created.
_, created = ed.books.get_or_create(name='The Great Book of Ed', publisher_id=p.id)
self.assertFalse(created)
# The publisher should have three books.
self.assertEqual(p.books.count(), 3)
|
40223147/finalexam
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/base.py
|
603
|
#!/usr/bin/env python
## https://bitbucket.org/pygame/pygame/raw/2383b8ab0e2273bc83c545ab9c18fee1f3459c64/pygame/base.py
'''Pygame core routines
Contains the core routines that are used by the rest of the
pygame modules. Its routines are merged directly into the pygame
namespace. This mainly includes the auto-initialization `init` and
`quit` routines.
There is a small module named `locals` that also gets merged into
this namespace. This contains all the constants needed by pygame.
Object constructors also get placed into this namespace, you can
call functions like `Rect` and `Surface` to create objects of
that type. As a convenience, you can import the members of
pygame.locals directly into your module's namespace with::
from pygame.locals import *
Most of the pygame examples do this if you'd like to take a look.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import atexit
import sys
#import SDL
_quitfunctions = []
class error(RuntimeError):
pass
def init():
'''Autoinitialize all imported pygame modules.
Initialize all imported pygame modules. Includes pygame modules
that are not part of the base modules (like font and image).
It does not raise exceptions, but instead silently counts which
modules have failed to init. The return argument contains a count
of the number of modules initialized, and the number of modules
that failed to initialize.
You can always initialize the modules you want by hand. The
modules that need it have an `init` and `quit` routine built in,
which you can call directly. They also have a `get_init` routine
which you can use to doublecheck the initialization. Note that
the manual `init` routines will raise an exception on error. Be
aware that most platforms require the display module to be
initialized before others. This `init` will handle that for you,
but if you initialize by hand, be aware of this constraint.
As with the manual `init` routines. It is safe to call this
`init` as often as you like.
:rtype: int, int
:return: (count_passed, count_failed)
'''
success = 0
fail = 0
#SDL.SDL_Init(SDL.SDL_INIT_EVENTTHREAD | SDL.SDL_INIT_TIMER)
if _video_autoinit():
success += 1
else:
fail += 1
for mod in sys.modules.values():
if hasattr(mod, '__PYGAMEinit__') and callable(mod.__PYGAMEinit__):
try:
mod.__PYGAMEinit__()
success += 1
except:
fail += 1
return success, fail
def register_quit(func):
'''Routine to call when pygame quits.
The given callback routine will be called when pygame is
quitting. Quit callbacks are served on a 'last in, first out'
basis.
'''
_quitfunctions.append(func)
def _video_autoquit():
if SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
SDL.SDL_QuitSubSystem(SDL.SDL_INIT_VIDEO)
def _video_autoinit():
return 1
#if not SDL.SDL_WasInit(SDL.SDL_INIT_VIDEO):
# SDL.SDL_InitSubSystem(SDL.SDL_INIT_VIDEO)
# SDL.SDL_EnableUNICODE(1)
#return 1
def _atexit_quit():
while _quitfunctions:
func = _quitfunctions.pop()
func()
_video_autoquit()
#SDL.SDL_Quit()
def get_sdl_version():
'''Get the version of the linked SDL runtime.
:rtype: int, int, int
:return: major, minor, patch
'''
#v = SDL.SDL_Linked_Version()
#return v.major, v.minor, v.patch
return None, None, None
def quit():
'''Uninitialize all pygame modules.
Uninitialize all pygame modules that have been initialized. Even
if you initialized the module by hand, this `quit` will
uninitialize it for you.
All the pygame modules are uninitialized automatically when your
program exits, so you will usually not need this routine. If you
program plans to keep running after it is done with pygame, then
would be a good time to make this call.
'''
_atexit_quit()
def get_error():
'''Get current error message.
SDL maintains an internal current error message. This message is
usually given to you when an SDL related exception occurs, but
sometimes you may want to call this directly yourself.
:rtype: str
'''
#return SDL.SDL_GetError()
return ''
def _rgba_from_obj(obj):
if not type(obj) in (tuple, list):
return None
if len(obj) == 1:
return _rgba_from_obj(obj[0])
elif len(obj) == 3:
return (int(obj[0]), int(obj[1]), int(obj[2]), 255)
elif len(obj) == 4:
return obj
else:
return None
atexit.register(_atexit_quit)
|
llhe/tensorflow
|
refs/heads/master
|
tensorflow/contrib/distributions/python/kernel_tests/vector_student_t_test.py
|
75
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for MultivariateStudentsT Distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import linalg
from scipy import special
from tensorflow.contrib.distributions.python.ops.vector_student_t import _VectorStudentT
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class _FakeVectorStudentT(object):
"""Fake scipy implementation for Multivariate Student's t-distribution.
Technically we don't need to test the `Vector Student's t-distribution` since
its composed of only unit-tested parts. However this _FakeVectorStudentT
serves as something like an end-to-end test of the
`TransformedDistribution + Affine` API.
Other `Vector*` implementations need only test new code. That we don't need
to test every Vector* distribution is good because there aren't SciPy
analogs and reimplementing everything in NumPy sort of defeats the point of
having the `TransformedDistribution + Affine` API.
"""
def __init__(self, df, loc, scale_tril):
self._df = np.asarray(df)
self._loc = np.asarray(loc)
self._scale_tril = np.asarray(scale_tril)
def log_prob(self, x):
def _compute(df, loc, scale_tril, x):
k = scale_tril.shape[-1]
ildj = np.sum(np.log(np.abs(np.diag(scale_tril))), axis=-1)
logz = ildj + k * (0.5 * np.log(df) +
0.5 * np.log(np.pi) +
special.gammaln(0.5 * df) -
special.gammaln(0.5 * (df + 1.)))
y = linalg.solve_triangular(scale_tril, np.matrix(x - loc).T,
lower=True, overwrite_b=True)
logs = -0.5 * (df + 1.) * np.sum(np.log1p(y**2. / df), axis=-2)
return logs - logz
if not self._df.shape:
return _compute(self._df, self._loc, self._scale_tril, x)
return np.concatenate([
[_compute(self._df[i], self._loc[i], self._scale_tril[i], x[:, i, :])]
for i in range(len(self._df))]).T
def prob(self, x):
return np.exp(self.log_prob(x))
class VectorStudentTTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testProbStaticScalar(self):
with self.test_session():
# Scalar batch_shape.
df = np.asarray(3., dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1], dtype=np.float32)
scale_diag = np.asarray([2.], dtype=np.float32)
scale_tril = np.diag(scale_diag)
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
x = 2. * self._rng.rand(4, 1).astype(np.float32) - 1.
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbStatic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.test_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2, 3], dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df, loc=loc, scale_tril=scale_tril)
with self.test_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransform(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.test_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbScalarBaseDistributionNonScalarTransformDynamic(self):
# Scalar batch_shape.
df = np.asarray(2., dtype=np.float32)
# Non-scalar batch_shape.
loc = np.asarray([[0., 0, 0],
[1, 2, 3],
[1, 0, 1]],
dtype=np.float32)
scale_diag = np.asarray([[1., 2, 3],
[2, 3, 4],
[4, 5, 6]],
dtype=np.float32)
scale_tril = np.concatenate([[np.diag(scale_diag[i])]
for i in range(len(scale_diag))])
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=np.tile(df, reps=len(scale_diag)),
loc=loc,
scale_tril=scale_tril)
with self.test_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransform(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.test_session():
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(),
rtol=0., atol=1e-5)
def testProbNonScalarBaseDistributionScalarTransformDynamic(self):
# Non-scalar batch_shape.
df = np.asarray([1., 2., 3.], dtype=np.float32)
# Scalar batch_shape.
loc = np.asarray([1, 2, 3], dtype=np.float32)
scale_diag = np.asarray([2, 3, 4], dtype=np.float32)
scale_tril = np.diag(scale_diag)
x = 2. * self._rng.rand(4, 3, 3).astype(np.float32) - 1.
expected_mst = _FakeVectorStudentT(
df=df,
loc=np.tile(loc[array_ops.newaxis, :], reps=[len(df), 1]),
scale_tril=np.tile(scale_tril[array_ops.newaxis, :, :],
reps=[len(df), 1, 1]))
with self.test_session():
df_pl = array_ops.placeholder(dtypes.float32, name="df")
loc_pl = array_ops.placeholder(dtypes.float32, name="loc")
scale_diag_pl = array_ops.placeholder(dtypes.float32, name="scale_diag")
feed_dict = {df_pl: df, loc_pl: loc, scale_diag_pl: scale_diag}
actual_mst = _VectorStudentT(df=df, loc=loc, scale_diag=scale_diag,
validate_args=True)
self.assertAllClose(expected_mst.log_prob(x),
actual_mst.log_prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
self.assertAllClose(expected_mst.prob(x),
actual_mst.prob(x).eval(feed_dict=feed_dict),
rtol=0., atol=1e-5)
if __name__ == "__main__":
test.main()
|
rossgoodwin/musapaedia
|
refs/heads/master
|
musapaedia/muse/lib/python2.7/site-packages/pip/commands/list.py
|
84
|
from __future__ import absolute_import
import logging
import warnings
from pip.basecommand import Command
from pip.exceptions import DistributionNotFound
from pip.index import PackageFinder
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions, dist_is_editable
from pip.utils.deprecation import RemovedInPip7Warning
from pip.cmdoptions import make_option_group, index_group
logger = logging.getLogger(__name__)
class ListCommand(Command):
"""
List installed packages, including editables.
Packages are listed in a case-insensitive sorted order.
"""
name = 'list'
usage = """
%prog [options]"""
summary = 'List installed packages.'
def __init__(self, *args, **kw):
super(ListCommand, self).__init__(*args, **kw)
cmd_opts = self.cmd_opts
cmd_opts.add_option(
'-o', '--outdated',
action='store_true',
default=False,
help='List outdated packages (excluding editables)')
cmd_opts.add_option(
'-u', '--uptodate',
action='store_true',
default=False,
help='List uptodate packages (excluding editables)')
cmd_opts.add_option(
'-e', '--editable',
action='store_true',
default=False,
help='List editable projects.')
cmd_opts.add_option(
'-l', '--local',
action='store_true',
default=False,
help=('If in a virtualenv that has global access, do not list '
'globally-installed packages.'),
)
self.cmd_opts.add_option(
'--user',
dest='user',
action='store_true',
default=False,
help='Only output packages installed in user-site.')
cmd_opts.add_option(
'--pre',
action='store_true',
default=False,
help=("Include pre-release and development versions. By default, "
"pip only finds stable versions."),
)
index_opts = make_option_group(index_group, self.parser)
self.parser.insert_option_group(0, index_opts)
self.parser.insert_option_group(0, cmd_opts)
def _build_package_finder(self, options, index_urls, session):
"""
Create a package finder appropriate to this list command.
"""
return PackageFinder(
find_links=options.find_links,
index_urls=index_urls,
allow_external=options.allow_external,
allow_unverified=options.allow_unverified,
allow_all_external=options.allow_all_external,
allow_all_prereleases=options.pre,
trusted_hosts=options.trusted_hosts,
process_dependency_links=options.process_dependency_links,
session=session,
)
def run(self, options, args):
if options.outdated:
self.run_outdated(options)
elif options.uptodate:
self.run_uptodate(options)
elif options.editable:
self.run_editables(options)
else:
self.run_listing(options)
def run_outdated(self, options):
for dist, version in self.find_packages_latests_versions(options):
if version > dist.parsed_version:
logger.info(
'%s (Current: %s Latest: %s)',
dist.project_name, dist.version, version,
)
def find_packages_latests_versions(self, options):
index_urls = [options.index_url] + options.extra_index_urls
if options.no_index:
logger.info('Ignoring indexes: %s', ','.join(index_urls))
index_urls = []
if options.use_mirrors:
warnings.warn(
"--use-mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
if options.mirrors:
warnings.warn(
"--mirrors has been deprecated and will be removed in the "
"future. Explicit uses of --index-url and/or --extra-index-url"
" is suggested.",
RemovedInPip7Warning,
)
index_urls += options.mirrors
dependency_links = []
for dist in get_installed_distributions(local_only=options.local,
user_only=options.user):
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt'),
)
with self._build_session(options) as session:
finder = self._build_package_finder(options, index_urls, session)
finder.add_dependency_links(dependency_links)
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
include_editables=False,
)
for dist in installed_packages:
req = InstallRequirement.from_line(
dist.key, None, isolated=options.isolated_mode,
)
try:
link = finder.find_requirement(req, True)
# If link is None, means installed version is most
# up-to-date
if link is None:
continue
except DistributionNotFound:
continue
else:
remote_version = finder._link_package_versions(
link, req.name
).version
yield dist, remote_version
def run_listing(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
)
self.output_package_listing(installed_packages)
def run_editables(self, options):
installed_packages = get_installed_distributions(
local_only=options.local,
user_only=options.user,
editables_only=True,
)
self.output_package_listing(installed_packages)
def output_package_listing(self, installed_packages):
installed_packages = sorted(
installed_packages,
key=lambda dist: dist.project_name.lower(),
)
for dist in installed_packages:
if dist_is_editable(dist):
line = '%s (%s, %s)' % (
dist.project_name,
dist.version,
dist.location,
)
else:
line = '%s (%s)' % (dist.project_name, dist.version)
logger.info(line)
def run_uptodate(self, options):
uptodate = []
for dist, version in self.find_packages_latests_versions(options):
if dist.parsed_version == version:
uptodate.append(dist)
self.output_package_listing(uptodate)
|
alianmohammad/pd-gem5
|
refs/heads/master
|
src/arch/arm/ArmSystem.py
|
14
|
# Copyright (c) 2009, 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
from m5.params import *
from System import System
class ArmMachineType(Enum):
map = {'RealView_EB' : 827,
'RealView_PBX' : 1901,
'VExpress_EMM' : 2272,
'VExpress_EMM64' : 2272}
class ArmSystem(System):
type = 'ArmSystem'
cxx_header = "arch/arm/system.hh"
load_addr_mask = 0xffffffff
multi_proc = Param.Bool(True, "Multiprocessor system?")
boot_loader = Param.String("", "File that contains the boot loader code if any")
gic_cpu_addr = Param.Addr(0, "Addres of the GIC CPU interface")
flags_addr = Param.Addr(0, "Address of the flags register for MP booting")
have_security = Param.Bool(False,
"True if Security Extensions are implemented")
have_virtualization = Param.Bool(False,
"True if Virtualization Extensions are implemented")
have_lpae = Param.Bool(False, "True if LPAE is implemented")
highest_el_is_64 = Param.Bool(False,
"True if the register width of the highest implemented exception level "
"is 64 bits (ARMv8)")
reset_addr_64 = Param.Addr(0x0,
"Reset address if the highest implemented exception level is 64 bits "
"(ARMv8)")
phys_addr_range_64 = Param.UInt8(40,
"Supported physical address range in bits when using AArch64 (ARMv8)")
have_large_asid_64 = Param.Bool(False,
"True if ASID is 16 bits in AArch64 (ARMv8)")
class GenericArmSystem(ArmSystem):
type = 'GenericArmSystem'
cxx_header = "arch/arm/system.hh"
load_addr_mask = 0x0fffffff
machine_type = Param.ArmMachineType('VExpress_EMM',
"Machine id from http://www.arm.linux.org.uk/developer/machines/")
atags_addr = Param.Addr("Address where default atags structure should " \
"be written")
dtb_filename = Param.String("",
"File that contains the Device Tree Blob. Don't use DTB if empty.")
early_kernel_symbols = Param.Bool(False,
"enable early kernel symbol tables before MMU")
enable_context_switch_stats_dump = Param.Bool(False, "enable stats/task info dumping at context switch boundaries")
panic_on_panic = Param.Bool(False, "Trigger a gem5 panic if the " \
"guest kernel panics")
panic_on_oops = Param.Bool(False, "Trigger a gem5 panic if the " \
"guest kernel oopses")
class LinuxArmSystem(GenericArmSystem):
type = 'LinuxArmSystem'
cxx_header = "arch/arm/linux/system.hh"
class FreebsdArmSystem(GenericArmSystem):
type = 'FreebsdArmSystem'
cxx_header = "arch/arm/freebsd/system.hh"
|
anchor/ceilometer-publisher-vaultaire
|
refs/heads/master
|
ceilometer_publisher_vaultaire/siphash.py
|
1
|
r'''
<MIT License>
Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
</MIT License>
SipHash-2-4 implementation, following the 'hashlib' API:
>>> key = b'0123456789ABCDEF'
>>> SipHash_2_4(key, b'a').hexdigest()
b'864c339cb0dc0fac'
>>> SipHash_2_4(key, b'a').digest()
b'\x86L3\x9c\xb0\xdc\x0f\xac'
>>> SipHash_2_4(key, b'a').hash()
12398370950267227270
>>> SipHash_2_4(key).update(b'a').hash()
12398370950267227270
>>> key = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
>>> SipHash_2_4(key, b'').hash()
8246050544436514353
>>> SipHash_2_4(key, b'').hexdigest()
b'310e0edd47db6f72'
'''
import struct
import binascii
def _doublesipround(v, m):
'''
Internal helper. Xors 'm' to 'v3', runs two rounds of siphash on
vector 'v' and xors 'm' to 'v0'.
>>> _doublesipround((1,2,3,4),0)
(9263201270060220426, 2307743542053503000, 5255419393243893904, 10208987565802066018)
>>> _doublesipround((1,2,3,4),0xff)
(11557575153743626750, 2307780510495171325, 7519994316568162407, 5442382437785464174)
>>> _doublesipround((0,0,0,0),0)
(0, 0, 0, 0)
>>> _doublesipround((0,0,0,0),0xff)
(2368684213854535680, 36416423977725, 2305811110491594975, 15626573430810475768)
'''
a, b, c, d = v
d ^= m
e = (a + b) & 0xffffffffffffffff
i = (((b & 0x7ffffffffffff) << 13) | (b >> 51)) ^ e
f = c + d
j = ((((d) << 16) | (d >> 48)) ^ f ) & 0xffffffffffffffff
h = (f + i) & 0xffffffffffffffff
k = ((e << 32) | (e >> 32)) + j
l = (((i & 0x7fffffffffff) << 17) | (i >> 47)) ^ h
o = (((j << 21) | (j >> 43)) ^ k) & 0xffffffffffffffff
p = (k + l) & 0xffffffffffffffff
q = (((l & 0x7ffffffffffff) << 13) | (l >> 51)) ^ p
r = ((h << 32) | (h >> 32)) + o
s = (((o << 16) | (o >> 48)) ^ r) & 0xffffffffffffffff
t = (r + q) & 0xffffffffffffffff
u = (((p << 32) | (p >> 32)) + s) & 0xffffffffffffffff
return (u ^ m,
(((q & 0x7fffffffffff) << 17) | (q >> 47)) ^ t,
((t & 0xffffffff) << 32) | (t >> 32),
(((s & 0x7ffffffffff) << 21) | (s >> 43)) ^ u)
_zeroes = b'\x00\x00\x00\x00\x00\x00\x00\x00'
_oneQ = struct.Struct('<Q')
_twoQ = struct.Struct('<QQ')
class SipHash_2_4(object):
r'''
>>> SipHash_2_4(b'0123456789ABCDEF', b'a').hash()
12398370950267227270
>>> SipHash_2_4(b'0123456789ABCDEF', b'').hash()
3627314469837380007
>>> SipHash_2_4(b'FEDCBA9876543210', b'').hash()
2007056766899708634
>>> SipHash_2_4(b'FEDCBA9876543210').update(b'').update(b'').hash()
2007056766899708634
>>> SipHash_2_4(b'FEDCBA9876543210', b'a').hash()
6581475155582014123
>>> SipHash_2_4(b'FEDCBA9876543210').update(b'a').hash()
6581475155582014123
>>> SipHash_2_4(b'FEDCBA9876543210').update(b'a').update(b'').hash()
6581475155582014123
>>> SipHash_2_4(b'FEDCBA9876543210').update(b'').update(b'a').hash()
6581475155582014123
>>> a = SipHash_2_4(b'FEDCBA9876543210').update(b'a')
>>> a.hash()
6581475155582014123
>>> b = a.copy()
>>> a.hash(), b.hash()
(6581475155582014123, 6581475155582014123)
>>> a.update(b'a') and None
>>> a.hash(), b.hash()
(3258273892680892829, 6581475155582014123)
'''
digest_size = 16
block_size = 64
s = b''
b = 0
def __init__(self, secret, s=b''):
# key's encoded as little endian
k0, k1 = _twoQ.unpack(secret)
self.v = (0x736f6d6570736575 ^ k0,
0x646f72616e646f6d ^ k1,
0x6c7967656e657261 ^ k0,
0x7465646279746573 ^ k1)
self.update(s)
def update(self, s):
s = self.s + s
lim = (len(s)//8)*8
v = self.v
off = 0
for off in range(0, lim, 8):
m, = _oneQ.unpack_from(s, off)
# print 'v0 %016x' % v[0]
# print 'v1 %016x' % v[1]
# print 'v2 %016x' % v[2]
# print 'v3 %016x' % v[3]
# print 'compress %016x' % m
v = _doublesipround(v, m)
self.v = v
self.b += lim
self.s = s[lim:]
return self
def hash(self):
l = len(self.s)
assert l < 8
b = (((self.b + l) & 0xff) << 56)
b |= _oneQ.unpack_from(self.s+_zeroes)[0]
v = self.v
# print 'v0 %016x' % v[0]
# print 'v1 %016x' % v[1]
# print 'v2 %016x' % v[2]
# print 'v3 %016x' % v[3]
# print 'padding %016x' % b
v = _doublesipround(v, b)
# print 'v0 %016x' % v0
# print 'v1 %016x' % v1
# print 'v2 %016x' % v2
# print 'v3 %016x' % v3
v = list(v)
v[2] ^= 0xff
v = _doublesipround(_doublesipround(v, 0), 0)
return v[0] ^ v[1] ^ v[2] ^ v[3]
def digest(self):
return _oneQ.pack(self.hash())
def hexdigest(self):
return binascii.hexlify(self.digest())
def copy(self):
n = SipHash_2_4(_zeroes * 2)
n.v, n.s, n.b = self.v, self.s, self.b
return n
siphash24 = SipHash_2_4
SipHash24 = SipHash_2_4
if __name__ == "__main__":
# Test vectors as per spec
vectors = [c.encode('utf-8') for c in [
"310e0edd47db6f72", "fd67dc93c539f874", "5a4fa9d909806c0d", "2d7efbd796666785",
"b7877127e09427cf", "8da699cd64557618", "cee3fe586e46c9cb", "37d1018bf50002ab",
"6224939a79f5f593", "b0e4a90bdf82009e", "f3b9dd94c5bb5d7a", "a7ad6b22462fb3f4",
"fbe50e86bc8f1e75", "903d84c02756ea14", "eef27a8e90ca23f7", "e545be4961ca29a1",
"db9bc2577fcc2a3f", "9447be2cf5e99a69", "9cd38d96f0b3c14b", "bd6179a71dc96dbb",
"98eea21af25cd6be", "c7673b2eb0cbf2d0", "883ea3e395675393", "c8ce5ccd8c030ca8",
"94af49f6c650adb8", "eab8858ade92e1bc", "f315bb5bb835d817", "adcf6b0763612e2f",
"a5c91da7acaa4dde", "716595876650a2a6", "28ef495c53a387ad", "42c341d8fa92d832",
"ce7cf2722f512771", "e37859f94623f3a7", "381205bb1ab0e012", "ae97a10fd434e015",
"b4a31508beff4d31", "81396229f0907902", "4d0cf49ee5d4dcca", "5c73336a76d8bf9a",
"d0a704536ba93e0e", "925958fcd6420cad", "a915c29bc8067318", "952b79f3bc0aa6d4",
"f21df2e41d4535f9", "87577519048f53a9", "10a56cf5dfcd9adb", "eb75095ccd986cd0",
"51a9cb9ecba312e6", "96afadfc2ce666c7", "72fe52975a4364ee", "5a1645b276d592a1",
"b274cb8ebf87870a", "6f9bb4203de7b381", "eaecb2a30b22a87f", "9924a43cc1315724",
"bd838d3aafbf8db7", "0b1a2a3265d51aea", "135079a3231ce660", "932b2846e4d70666",
"e1915f5cb1eca46c", "f325965ca16d629f", "575ff28e60381be5", "724506eb4c328a95",
]]
key = ''.join(chr(i) for i in range(16)).encode('utf-8')
plaintext = ''.join(chr(i) for i in range(64)).encode('utf-8')
for i in range(64):
assert SipHash_2_4(key, plaintext[:i]).hexdigest() == vectors[i], \
'failed on test no %i' % i
# Internal doctests
#
# To maintain compatibility with both python 2.x and 3.x in tests
# we need to do a trick. Python 2.x doesn't like b'' notation,
# Python 3.x doesn't have 2222L long integers notation. To
# overcome that we'll pipe both results as well as the intended
# doctest output through an `eval` function before comparison. To
# do it we need to monkeypatch the OutputChecker:
import doctest
EVAL_FLAG = doctest.register_optionflag("EVAL")
OrigOutputChecker = doctest.OutputChecker
def relaxed_eval(s):
if s.strip():
return eval(s)
else:
return None
class MyOutputChecker:
def __init__(self):
self.orig = OrigOutputChecker()
def check_output(self, want, got, optionflags):
if optionflags & EVAL_FLAG:
return relaxed_eval(got) == relaxed_eval(want)
else:
return self.orig.check_output(want, got, optionflags)
def output_difference(self, example, got, optionflags):
return self.orig.output_difference(example, got, optionflags)
doctest.OutputChecker = MyOutputChecker
# Monkey patching done. Go for doctests:
if doctest.testmod(optionflags=EVAL_FLAG)[0] == 0: print("all tests ok")
|
mrunge/openstack_horizon
|
refs/heads/master
|
openstack_horizon/dashboards/project/data_processing/cluster_templates/forms.py
|
1
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.utils.translation import ugettext_lazy as _
from horizon_lib import exceptions
from horizon_lib import forms
from openstack_horizon.api import sahara as saharaclient
from openstack_horizon.dashboards.project.data_processing. \
utils import workflow_helpers
LOG = logging.getLogger(__name__)
class UploadFileForm(forms.SelfHandlingForm,
workflow_helpers.PluginAndVersionMixin):
template_name = forms.CharField(max_length=80,
label=_("Cluster Template Name"))
def __init__(self, request, *args, **kwargs):
super(UploadFileForm, self).__init__(request, *args, **kwargs)
sahara = saharaclient.client(request)
self._generate_plugin_version_fields(sahara)
self.fields['template_file'] = forms.FileField(label=_("Template"))
def handle(self, request, data):
try:
# we can set a limit on file size, but should we?
filecontent = self.files['template_file'].read()
plugin_name = data['plugin_name']
hadoop_version = data.get(plugin_name + "_version")
saharaclient.plugin_convert_to_template(request,
plugin_name,
hadoop_version,
data['template_name'],
filecontent)
return True
except Exception:
exceptions.handle(request,
_("Unable to upload cluster template file"))
return False
|
krzysztofwos/BitcoinUnlimited
|
refs/heads/dev
|
qa/rpc-tests/test_framework/mininode.py
|
8
|
#!/usr/bin/env python3
# Copyright (c) 2010 ArtForz -- public domain half-a-node
# Copyright (c) 2012 Jeff Garzik
# Copyright (c) 2010-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# mininode.py - Bitcoin P2P network half-a-node
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import time
import sys
import random
from binascii import hexlify, unhexlify
from io import BytesIO
from codecs import encode
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
BIP0031_VERSION = 60000
MY_VERSION = 60001 # past bip-31 for ping/pong
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
MAX_INV_SZ = 50000
MAX_BLOCK_SIZE = 1000000
COIN = 100000000 # 1 btc in satoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return struct.pack("B", len(s)) + s
elif len(s) < 0x10000:
return struct.pack("<BH", 253, len(s)) + s
elif len(s) < 0x100000000:
return struct.pack("<BI", 254, len(s)) + s
return struct.pack("<BQ", 255, len(s)) + s
def deser_uint256(f):
r = 0
for i in range(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = b""
for i in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
def uint256_from_str(s):
r = 0
t = struct.unpack("<IIIIIIII", s[:32])
for i in range(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFF) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in range(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = b""
if len(l) < 253:
r = struct.pack("B", len(l))
elif len(l) < 0x10000:
r = struct.pack("<BH", 253, len(l))
elif len(l) < 0x100000000:
r = struct.pack("<BI", 254, len(l))
else:
r = struct.pack("<BQ", 255, len(l))
for i in l:
r += struct.pack("<i", i)
return r
# Deserialize from a hex string representation (eg from RPC)
def FromHex(obj, hex_string):
obj.deserialize(BytesIO(unhexlify(hex_string.encode('ascii'))))
return obj
# Convert a binary-serializable object to hex (eg for submission via RPC)
def ToHex(obj):
return hexlify(obj.serialize()).decode('ascii')
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = b"\x00" * 10 + b"\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = MY_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig=b"", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = b""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=b""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // COIN, self.nValue % COIN,
hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.nVersion = 1
self.vin = []
self.vout = []
self.nLockTime = 0
self.sha256 = None
self.hash = None
else:
self.nVersion = tx.nVersion
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = encode(hash256(self.serialize())[::-1], 'hex_codec').decode('ascii')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000 * COIN:
return False
return True
def __repr__(self):
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 1
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = struct.unpack("<I", f.read(4))[0]
self.sha256 = None
self.hash = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
return r
def calc_sha256(self):
if self.sha256 is None:
r = b""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += struct.pack("<I", self.nNonce)
self.sha256 = uint256_from_str(hash256(r))
self.hash = encode(hash256(r)[::-1], 'hex_codec').decode('ascii')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce)
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in range(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self):
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self):
self.rehash()
target = uint256_from_compact(self.nBits)
while self.sha256 > target:
self.nNonce += 1
self.rehash()
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x nTime=%s nBits=%08x nNonce=%08x vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = b""
self.strStatusBar = b""
self.strReserved = b""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = b""
self.vchSig = b""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = b""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = b"version"
def __init__(self):
self.nVersion = MY_VERSION
self.nServices = 1
self.nTime = int(time.time())
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = b""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = b"verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = b"addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = b"alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = b""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = b"inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = b"getdata"
def __init__(self, inv=None):
self.inv = inv if inv != None else []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = b"getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = b"tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = b"block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = b"getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = b"ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = b"ping"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = b"pong"
def __init__(self, nonce=0):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = b""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = b"mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_mempool()"
class msg_sendheaders(object):
command = b"sendheaders"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return b""
def __repr__(self):
return "msg_sendheaders()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = b"getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = b""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = b"headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = b"reject"
REJECT_MALFORMED = 1
def __init__(self):
self.message = b""
self.code = 0
self.reason = b""
self.data = 0
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.code != self.REJECT_MALFORMED and
(self.message == b"block" or self.message == b"tx")):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
# Helper function
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# deliver_sleep_time is helpful for debugging race conditions in p2p
# tests; it causes message delivery to sleep for the specified time
# before acquiring the global lock and delivering the next message.
self.deliver_sleep_time = None
def set_deliver_sleep_time(self, value):
with mininode_lock:
self.deliver_sleep_time = value
def get_deliver_sleep_time(self):
with mininode_lock:
return self.deliver_sleep_time
# Spin until verack message is received from the node.
# Tests may want to use this as a signal that the test can begin.
# This can be called from the testing thread, so it needs to acquire the
# global lock.
def wait_for_verack(self):
while True:
with mininode_lock:
if self.verack_received:
return
time.sleep(0.05)
def deliver(self, conn, message):
deliver_sleep = self.get_deliver_sleep_time()
if deliver_sleep is not None:
time.sleep(deliver_sleep)
with mininode_lock:
try:
getattr(self, 'on_' + message.command.decode('ascii'))(conn, message)
except:
print("ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0]))
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(MY_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
class SingleNodeConnCB(NodeConnCB):
def __init__(self):
NodeConnCB.__init__(self)
self.connection = None
self.ping_counter = 1
self.last_pong = msg_pong()
def add_connection(self, conn):
self.connection = conn
# Wrapper for the NodeConn's send_message function
def send_message(self, message):
self.connection.send_message(message)
def on_pong(self, conn, message):
self.last_pong = message
# Sync up with the node
def sync_with_ping(self, timeout=30):
def received_pong():
return (self.last_pong.nonce == self.ping_counter)
self.send_message(msg_ping(nonce=self.ping_counter))
success = wait_until(received_pong, timeout)
self.ping_counter += 1
return success
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
b"version": msg_version,
b"verack": msg_verack,
b"addr": msg_addr,
b"alert": msg_alert,
b"inv": msg_inv,
b"getdata": msg_getdata,
b"getblocks": msg_getblocks,
b"tx": msg_tx,
b"block": msg_block,
b"getaddr": msg_getaddr,
b"ping": msg_ping,
b"pong": msg_pong,
b"headers": msg_headers,
b"getheaders": msg_getheaders,
b"reject": msg_reject,
b"mempool": msg_mempool,
}
MAGIC_BYTES = {
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
"testnet3": b"\x0b\x11\x09\x07", # testnet3
"regtest": b"\xfa\xbf\xb5\xda" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", services=1):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = b""
self.recvbuf = b""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version()
vt.nServices = services
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print('MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport))
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
self.sendbuf = b""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
try:
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split(b"\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = BytesIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
except Exception as e:
print('got_data:', repr(e))
# import traceback
# traceback.print_tb(sys.exc_info()[2])
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += b"\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == b"version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap[b'ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap[b'ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
rvanlaar/easy-transifex
|
refs/heads/master
|
src/transifex/transifex/addons/autofetch/forms.py
|
3
|
from django import forms
from autofetch.models import URLInfo
class URLInfoForm(forms.ModelForm):
class Meta:
model = URLInfo
fields = ('source_file_url','auto_update', )
|
janebeckman/gpdb
|
refs/heads/master
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/fts/fts_transitions/test_fts_transitions_01.py
|
7
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import tinctest
from mpp.gpdb.tests.storage.fts.fts_transitions.fts_transitions import FTSTestCase
class FtsTransitionsPart01(FTSTestCase):
''' State of FTS at different fault points
'''
def __init__(self, methodName):
super(FtsTransitionsPart01,self).__init__(methodName)
def test_filerep_sync_ct(self):
'''
@data_provider sync_ct_tests
'''
fault_name = self.test_data[1][0]
fault_type = self.test_data[1][1]
fault_role = self.test_data[1][2]
filerep_state = self.test_data[1][3]
filerep_role = self.test_data[1][4]
tinctest.logger.info("\n ===============================================")
tinctest.logger.info("\n Starting New Test: %s " % self.test_data[0][1])
tinctest.logger.info("\n ===============================================")
self.filerep_sync_ct(fault_name, fault_type, fault_role, filerep_state, filerep_role)
@tinctest.dataProvider('sync_ct_tests')
def test_sync_ct():
data = {'test_01_mirror_sync_postmaster_reset_filerep_sender': ['filerep_sender','panic','mirror','ct','mirror'],
'test_02_mirror_sync_postmaster_reset_filerep_receiver': ['filerep_receiver','panic','mirror','ct','mirror'],
'test_03_mirror_sync_postmaster_reset_filerep_flush': ['filerep_flush','panic','mirror','ct','mirror'],
'test_04_mirror_sync_postmaster_reset_filerep_consumer': ['filerep_consumer','panic','mirror','ct','mirror'],
'test_10_mirror_sync_filerep_process_error_failover': ['filerep_sender','error','mirror','ct','primary'],
'test_11_primary_sync_filerep_process_error_failover': ['filerep_sender','error','primary','ct','mirror'],
'test_14_mirror_sync_dealock_failover': ['filerep_sender','infinite_loop','mirror','ct','primary'],
'test_15_primary_sync_deadlock_failover': ['filerep_sender','infinite_loop','primary','ct','mirror'],
'test_16_primary_sync_filerep_network_failover': ['filerep_receiver','fault','primary','ct','mirror'],
'test_18_primary_sync_process_missing_failover': ['postmaster','panic','primary','ct','mirror'],
'test_42_mirror_sync_filerep_network': ['filerep_receiver','fault','mirror','ct','mirror'],
'test_43_mirror_sync_system_io_failover': ['filerep_flush', 'error', 'mirror','ct','mirror'],
'test_44_mirror_sync_postmaster_missing_failover': ['postmaster', 'panic', 'mirror','ct','mirror'],
'test_postmaster_reset_mpp13689': ['filerep_receiver','fatal','mirror','ct','primary']}
return data
|
jusdng/odoo
|
refs/heads/8.0
|
addons/procurement_jit_stock/__init__.py
|
242
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import procurement_jit_stock
|
blmorris/micropython
|
refs/heads/master
|
tests/basics/try_finally1.py
|
100
|
print("noexc-finally")
try:
print("try")
finally:
print("finally")
print("noexc-finally-finally")
try:
print("try1")
try:
print("try2")
finally:
print("finally2")
finally:
print("finally1")
print()
print("noexc-finally-func-finally")
def func2():
try:
print("try2")
finally:
print("finally2")
try:
print("try1")
func2()
finally:
print("finally1")
print()
print("exc-finally-except")
try:
print("try1")
try:
print("try2")
foo()
except:
print("except2")
finally:
print("finally1")
print()
print("exc-finally-except-filter")
try:
print("try1")
try:
print("try2")
foo()
except NameError:
print("except2")
finally:
print("finally1")
print()
print("exc-except-finally-finally")
try: # top-level catch-all except to not fail script
try:
print("try1")
try:
print("try2")
foo()
finally:
print("finally2")
finally:
print("finally1")
except:
print("catch-all except")
print()
|
laperry1/android_external_chromium_org
|
refs/heads/cm-12.1
|
chrome/common/extensions/PRESUBMIT.py
|
56
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting extensions.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
import fnmatch
import os
import re
EXTENSIONS_PATH = os.path.join('chrome', 'common', 'extensions')
DOCS_PATH = os.path.join(EXTENSIONS_PATH, 'docs')
SERVER2_PATH = os.path.join(DOCS_PATH, 'server2')
API_PATH = os.path.join(EXTENSIONS_PATH, 'api')
TEMPLATES_PATH = os.path.join(DOCS_PATH, 'templates')
PRIVATE_TEMPLATES_PATH = os.path.join(TEMPLATES_PATH, 'private')
PUBLIC_TEMPLATES_PATH = os.path.join(TEMPLATES_PATH, 'public')
INTROS_PATH = os.path.join(TEMPLATES_PATH, 'intros')
ARTICLES_PATH = os.path.join(TEMPLATES_PATH, 'articles')
LOCAL_PUBLIC_TEMPLATES_PATH = os.path.join('docs',
'templates',
'public')
EXTENSIONS_TO_REMOVE_FOR_CLEAN_URLS = ('.md', '.html')
def _ReadFile(filename):
with open(filename) as f:
return f.read()
def _ListFilesInPublic():
all_files = []
for path, dirs, files in os.walk(LOCAL_PUBLIC_TEMPLATES_PATH):
all_files.extend(
os.path.join(path, filename)[len(LOCAL_PUBLIC_TEMPLATES_PATH + os.sep):]
for filename in files)
return all_files
def _UnixName(name):
name = os.path.splitext(name)[0]
s1 = re.sub('([a-z])([A-Z])', r'\1_\2', name)
s2 = re.sub('([A-Z]+)([A-Z][a-z])', r'\1_\2', s1)
return s2.replace('.', '_').lower()
def _FindMatchingTemplates(template_name, template_path_list):
matches = []
unix_name = _UnixName(template_name)
for template in template_path_list:
if unix_name == _UnixName(template.split(os.sep)[-1]):
basename, ext = os.path.splitext(template)
# The docserver expects clean (extensionless) template URLs, so we
# strip some extensions here when generating the list of matches.
if ext in EXTENSIONS_TO_REMOVE_FOR_CLEAN_URLS:
matches.append(basename)
else:
matches.append(template)
return matches
def _SanitizeAPIName(name, api_path):
if not api_path.endswith(os.sep):
api_path += os.sep
filename = os.path.splitext(name)[0][len(api_path):].replace(os.sep, '_')
if 'experimental' in filename:
filename = 'experimental_' + filename.replace('experimental_', '')
return filename
def _CreateIntegrationTestArgs(affected_files):
if (any(fnmatch.fnmatch(name, '%s*.py' % SERVER2_PATH)
for name in affected_files) or
any(fnmatch.fnmatch(name, '%s*' % PRIVATE_TEMPLATES_PATH)
for name in affected_files)):
return ['-a']
args = []
for name in affected_files:
if (fnmatch.fnmatch(name, '%s*' % PUBLIC_TEMPLATES_PATH) or
fnmatch.fnmatch(name, '%s*' % INTROS_PATH) or
fnmatch.fnmatch(name, '%s*' % ARTICLES_PATH)):
args.extend(_FindMatchingTemplates(name.split(os.sep)[-1],
_ListFilesInPublic()))
if fnmatch.fnmatch(name, '%s*' % API_PATH):
args.extend(_FindMatchingTemplates(_SanitizeAPIName(name, API_PATH),
_ListFilesInPublic()))
return args
def _CheckHeadingIDs(input_api):
ids_re = re.compile('<h[23].*id=.*?>')
headings_re = re.compile('<h[23].*?>')
bad_files = []
for name in input_api.AbsoluteLocalPaths():
if not os.path.exists(name):
continue
if (fnmatch.fnmatch(name, '*%s*' % INTROS_PATH) or
fnmatch.fnmatch(name, '*%s*' % ARTICLES_PATH)):
contents = input_api.ReadFile(name)
if (len(re.findall(headings_re, contents)) !=
len(re.findall(ids_re, contents))):
bad_files.append(name)
return bad_files
def _CheckLinks(input_api, output_api, results):
for affected_file in input_api.AffectedFiles():
name = affected_file.LocalPath()
absolute_path = affected_file.AbsoluteLocalPath()
if not os.path.exists(absolute_path):
continue
if (fnmatch.fnmatch(name, '%s*' % PUBLIC_TEMPLATES_PATH) or
fnmatch.fnmatch(name, '%s*' % INTROS_PATH) or
fnmatch.fnmatch(name, '%s*' % ARTICLES_PATH) or
fnmatch.fnmatch(name, '%s*' % API_PATH)):
contents = _ReadFile(absolute_path)
args = []
if input_api.platform == 'win32':
args = [input_api.python_executable]
args.extend([os.path.join('docs', 'server2', 'link_converter.py'),
'-o',
'-f',
absolute_path])
output = input_api.subprocess.check_output(
args,
cwd=input_api.PresubmitLocalPath(),
universal_newlines=True)
if output != contents:
changes = ''
for i, (line1, line2) in enumerate(
zip(contents.split('\n'), output.split('\n'))):
if line1 != line2:
changes = ('%s\nLine %d:\n-%s\n+%s\n' %
(changes, i + 1, line1, line2))
if changes:
results.append(output_api.PresubmitPromptWarning(
'File %s may have an old-style <a> link to an API page. Please '
'run docs/server2/link_converter.py to convert the link[s], or '
'convert them manually.\n\nSuggested changes are: %s' %
(name, changes)))
def _CheckChange(input_api, output_api):
results = [
output_api.PresubmitError('File %s needs an id for each heading.' % name)
for name in _CheckHeadingIDs(input_api)]
try:
integration_test = []
# From depot_tools/presubmit_canned_checks.py:529
if input_api.platform == 'win32':
integration_test = [input_api.python_executable]
integration_test.append(
os.path.join('docs', 'server2', 'integration_test.py'))
integration_test.extend(_CreateIntegrationTestArgs(input_api.LocalPaths()))
input_api.subprocess.check_call(integration_test,
cwd=input_api.PresubmitLocalPath())
except input_api.subprocess.CalledProcessError:
results.append(output_api.PresubmitError('IntegrationTest failed!'))
# TODO(kalman): Re-enable this check, or decide to delete it forever. Now
# that we have multiple directories it no longer works.
# See http://crbug.com/297178.
#_CheckLinks(input_api, output_api, results)
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results += input_api.canned_checks.CheckPatchFormatted(input_api, output_api)
results += _CheckChange(input_api, output_api)
return results
def CheckChangeOnCommit(input_api, output_api):
return _CheckChange(input_api, output_api)
|
cyanna/edx-platform
|
refs/heads/master
|
lms/djangoapps/courseware/features/homepage.py
|
140
|
# pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
from lettuce import world, step
from nose.tools import assert_equals, assert_greater # pylint: disable=no-name-in-module
@step(u'I should see the following links and ids')
def should_see_a_link_called(step):
for link_id_pair in step.hashes:
link_id = link_id_pair['id']
text = link_id_pair['Link']
link = world.browser.find_by_id(link_id)
assert_greater(
len(link),
0,
"Link length is less than 1. ID: {id} Text: {text}".format(id=link_id, text=text)
)
assert_equals(link.text, text)
|
RydrDojo/Ridr_app
|
refs/heads/master
|
pylotVenv/lib/python2.7/site-packages/alembic/templates/generic/env.py
|
76
|
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = None
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url, target_metadata=target_metadata, literal_binds=True)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
|
mrquim/mrquimrepo
|
refs/heads/master
|
plugin.program.indigo/libs/requests/packages/chardet/compat.py
|
2942
|
######################## BEGIN LICENSE BLOCK ########################
# Contributor(s):
# Ian Cordasco - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
if sys.version_info < (3, 0):
base_str = (str, unicode)
else:
base_str = (bytes, str)
def wrap_ord(a):
if sys.version_info < (3, 0) and isinstance(a, base_str):
return ord(a)
else:
return a
|
rjeli/scikit-image
|
refs/heads/master
|
skimage/_shared/tests/test_utils.py
|
24
|
from skimage._shared.utils import copy_func
import numpy.testing as npt
def test_copyfunc():
def foo(a):
return a
bar = copy_func(foo, name='bar')
other = copy_func(foo)
npt.assert_equal(bar.__name__, 'bar')
npt.assert_equal(other.__name__, 'foo')
other.__name__ = 'other'
npt.assert_equal(foo.__name__, 'foo')
if __name__ == "__main__":
npt.run_module_suite()
|
se4u/pylearn2
|
refs/heads/master
|
pylearn2/scripts/plot_monitor.py
|
37
|
#!/usr/bin/env python
"""
usage:
plot_monitor.py model_1.pkl model_2.pkl ... model_n.pkl
Loads any number of .pkl files produced by train.py. Extracts
all of their monitoring channels and prompts the user to select
a subset of them to be plotted.
"""
from __future__ import print_function
__authors__ = "Ian Goodfellow, Harm Aarts"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
import gc
import numpy as np
import sys
from theano.compat.six.moves import input, xrange
from pylearn2.utils import serial
from theano.printing import _TagGenerator
from pylearn2.utils.string_utils import number_aware_alphabetical_key
from pylearn2.utils import contains_nan, contains_inf
import argparse
channels = {}
def unique_substring(s, other, min_size=1):
"""
.. todo::
WRITEME
"""
size = min(len(s), min_size)
while size <= len(s):
for pos in xrange(0,len(s)-size+1):
rval = s[pos:pos+size]
fail = False
for o in other:
if o.find(rval) != -1:
fail = True
break
if not fail:
return rval
size += 1
# no unique substring
return s
def unique_substrings(l, min_size=1):
"""
.. todo::
WRITEME
"""
return [unique_substring(s, [x for x in l if x is not s], min_size)
for s in l]
def main():
"""
.. todo::
WRITEME
"""
parser = argparse.ArgumentParser()
parser.add_argument("--out")
parser.add_argument("model_paths", nargs='+')
parser.add_argument("--yrange", help='The y-range to be used for plotting, e.g. 0:1')
options = parser.parse_args()
model_paths = options.model_paths
if options.out is not None:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
print('generating names...')
model_names = [model_path.replace('.pkl', '!') for model_path in
model_paths]
model_names = unique_substrings(model_names, min_size=10)
model_names = [model_name.replace('!','') for model_name in
model_names]
print('...done')
for i, arg in enumerate(model_paths):
try:
model = serial.load(arg)
except Exception:
if arg.endswith('.yaml'):
print(sys.stderr, arg + " is a yaml config file," +
"you need to load a trained model.", file=sys.stderr)
quit(-1)
raise
this_model_channels = model.monitor.channels
if len(sys.argv) > 2:
postfix = ":" + model_names[i]
else:
postfix = ""
for channel in this_model_channels:
channels[channel+postfix] = this_model_channels[channel]
del model
gc.collect()
while True:
# Make a list of short codes for each channel so user can specify them
# easily
tag_generator = _TagGenerator()
codebook = {}
sorted_codes = []
for channel_name in sorted(channels,
key = number_aware_alphabetical_key):
code = tag_generator.get_tag()
codebook[code] = channel_name
codebook['<'+channel_name+'>'] = channel_name
sorted_codes.append(code)
x_axis = 'example'
print('set x_axis to example')
if len(channels.values()) == 0:
print("there are no channels to plot")
break
# If there is more than one channel in the monitor ask which ones to
# plot
prompt = len(channels.values()) > 1
if prompt:
# Display the codebook
for code in sorted_codes:
print(code + '. ' + codebook[code])
print()
print("Put e, b, s or h in the list somewhere to plot " +
"epochs, batches, seconds, or hours, respectively.")
response = input('Enter a list of channels to plot ' + \
'(example: A, C,F-G, h, <test_err>) or q to quit' + \
' or o for options: ')
if response == 'o':
print('1: smooth all channels')
print('any other response: do nothing, go back to plotting')
response = input('Enter your choice: ')
if response == '1':
for channel in channels.values():
k = 5
new_val_record = []
for i in xrange(len(channel.val_record)):
new_val = 0.
count = 0.
for j in xrange(max(0, i-k), i+1):
new_val += channel.val_record[j]
count += 1.
new_val_record.append(new_val / count)
channel.val_record = new_val_record
continue
if response == 'q':
break
#Remove spaces
response = response.replace(' ','')
#Split into list
codes = response.split(',')
final_codes = set([])
for code in codes:
if code == 'e':
x_axis = 'epoch'
continue
elif code == 'b':
x_axis = 'batche'
elif code == 's':
x_axis = 'second'
elif code == 'h':
x_axis = 'hour'
elif code.startswith('<'):
assert code.endswith('>')
final_codes.add(code)
elif code.find('-') != -1:
#The current list element is a range of codes
rng = code.split('-')
if len(rng) != 2:
print("Input not understood: "+code)
quit(-1)
found = False
for i in xrange(len(sorted_codes)):
if sorted_codes[i] == rng[0]:
found = True
break
if not found:
print("Invalid code: "+rng[0])
quit(-1)
found = False
for j in xrange(i,len(sorted_codes)):
if sorted_codes[j] == rng[1]:
found = True
break
if not found:
print("Invalid code: "+rng[1])
quit(-1)
final_codes = final_codes.union(set(sorted_codes[i:j+1]))
else:
#The current list element is just a single code
final_codes = final_codes.union(set([code]))
# end for code in codes
else:
final_codes ,= set(codebook.keys())
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
styles = list(colors)
styles += [color+'--' for color in colors]
styles += [color+':' for color in colors]
fig = plt.figure()
ax = plt.subplot(1,1,1)
# plot the requested channels
for idx, code in enumerate(sorted(final_codes)):
channel_name= codebook[code]
channel = channels[channel_name]
y = np.asarray(channel.val_record)
if contains_nan(y):
print(channel_name + ' contains NaNs')
if contains_inf(y):
print(channel_name + 'contains infinite values')
if x_axis == 'example':
x = np.asarray(channel.example_record)
elif x_axis == 'batche':
x = np.asarray(channel.batch_record)
elif x_axis == 'epoch':
try:
x = np.asarray(channel.epoch_record)
except AttributeError:
# older saved monitors won't have epoch_record
x = np.arange(len(channel.batch_record))
elif x_axis == 'second':
x = np.asarray(channel.time_record)
elif x_axis == 'hour':
x = np.asarray(channel.time_record) / 3600.
else:
assert False
ax.plot( x,
y,
styles[idx % len(styles)],
marker = '.', # add point margers to lines
label = channel_name)
plt.xlabel('# '+x_axis+'s')
ax.ticklabel_format( scilimits = (-3,3), axis = 'both')
handles, labels = ax.get_legend_handles_labels()
lgd = ax.legend(handles, labels, loc = 'upper left',
bbox_to_anchor = (1.05, 1.02))
# Get the axis positions and the height and width of the legend
plt.draw()
ax_pos = ax.get_position()
pad_width = ax_pos.x0 * fig.get_size_inches()[0]
pad_height = ax_pos.y0 * fig.get_size_inches()[1]
dpi = fig.get_dpi()
lgd_width = ax.get_legend().get_frame().get_width() / dpi
lgd_height = ax.get_legend().get_frame().get_height() / dpi
# Adjust the bounding box to encompass both legend and axis. Axis should be 3x3 inches.
# I had trouble getting everything to align vertically.
ax_width = 3
ax_height = 3
total_width = 2*pad_width + ax_width + lgd_width
total_height = 2*pad_height + np.maximum(ax_height, lgd_height)
fig.set_size_inches(total_width, total_height)
ax.set_position([pad_width/total_width, 1-6*pad_height/total_height, ax_width/total_width, ax_height/total_height])
if(options.yrange is not None):
ymin, ymax = map(float, options.yrange.split(':'))
plt.ylim(ymin, ymax)
if options.out is None:
plt.show()
else:
plt.savefig(options.out)
if not prompt:
break
if __name__ == "__main__":
main()
|
m-housh/phonevalidator
|
refs/heads/master
|
phonevalidator/phonevalidator.py
|
1
|
# -*- coding: utf-8 -*-
from os import environ
from phonenumbers import (
format_number,
is_valid_number,
parse,
NumberParseException,
PhoneNumberFormat,
SUPPORTED_REGIONS,
)
from cerberus import Validator as SuperValidator
class ValidatorMixin(object):
""" A custom cerberus.Validator subclass adding the `phonenumber` constraint
to Cerberus validation's.
:Example:
.. code-block:: python
>>> from phonevalidator import ValidatorMixin
>>> from eve.io.mongo import Validator
>>> from eve import Eve
>>> class MyValidator(Validator, ValidatorMixin):
... ''' Custom validator that adds phone number
... validations.
... '''
... pass
>>> settings = {'DOMAIN': {}}
>>> app = Eve(validator=MyValidator, settings=settings)
"""
def _default_region(self):
""" Return's a default region string.
We will try to get a region from the environment variable
'DEFAULT_PHONE_REGION' if not found or not valid, then
we default to 'US'
"""
region = environ.get('DEFAULT_PHONE_REGION')
if region and region.upper() in SUPPORTED_REGIONS:
return region.upper()
return 'US'
def _is_valid_region(self, region):
""" Return's True if region is valid, False otherwise
"""
if isinstance(region, str) and region.upper() in SUPPORTED_REGIONS:
return True
return False
def _set_region(self, region=None):
""" Set's a region to use to validate phone numbers for this
instance.
"""
if self._is_valid_region(region):
self.region = region.upper()
else:
self.region = self._default_region()
def _set_default_formatter(self):
""" Try's to get a formatter string from the environment
or defaults to `phonenumbers.PhoneNumberFormat.NATIONAL`
"""
env_format = environ.get('DEFAULT_PHONE_FORMAT')
if env_format is not None:
self._set_formatter(
formatter=env_format
)
else:
self.formatter = PhoneNumberFormat.NATIONAL
def _set_formatter(self, formatter=None):
""" Set's formatter for this instance, or defaults to
`phonenumbers.PhoneNumberFormat.NATIONAL`
"""
if formatter is None or not isinstance(formatter, str):
self._set_default_formatter()
else:
self.formatter = getattr(
PhoneNumberFormat,
formatter.upper(),
PhoneNumberFormat.NATIONAL
)
def _validate_formatPhoneNumber(self, formatPhoneNumber, field, value):
""" Fake validate function to let cerberus accept "formatPhoneNumber"
as a keyword in the schema.
"""
pass
def _validate_phoneNumberFormat(self, phoneNumberFormat, field, value):
""" Validates a phoneNumberFormat for a phone number.
:param phoneNumberFormat: a string for accepted format.
:accepted formats: ['NATIONAL',
'INTERNATIONAL',
'RFC3966',
'E164'
]
"""
keys = PhoneNumberFormat.__dict__.keys()
valids = [key for key in keys if not key.startswith('_')]
if phoneNumberFormat.upper() not in valids:
self._error(field,
'Not a valid phone number format: {}'.format(value))
def _validate_region(self, region, field, value):
""" Validates a region for a phone number.
:param region: a region.
"""
if self._is_valid_region(region) is False:
self._error(field,
'Region not valid: {}'.format(region))
def _validate_type_phonenumber(self, field, value):
""" Validates a phone number is valid. Optionally formatting the number.
:param field: field name.
:param value: field value.
"""
# get the region from schema for this field or use default
self._set_region(self.schema[field].get('region'))
try:
phone_number = parse(value, self.region)
# check that it's valid number
if not is_valid_number(phone_number):
self._error(field, 'Phone Number not valid: {}'.format(value))
elif self.schema[field].get('formatPhoneNumber'):
# if the schema's 'formatPhoneNumber' is set to True,
# format the phone number using a formatter derived from
# the schema's 'phoneNumberFormat' value, next checks the
# environmen variable 'PHONE_NUMBER_FORMAT',
# or defaults to 'NATIONAL'.
formatter = self.schema[field].get('phoneNumberFormat')
self._set_formatter(
formatter=formatter
)
self.document[field] = format_number(phone_number,
self.formatter)
except NumberParseException:
self._error(field, 'Phone Number not valid: {}'.format(value))
class Validator(SuperValidator, ValidatorMixin):
""" Extends `cerberus.Validator` and adds the `phonenumber` constraint
to Cerburus validation's.
:Example:
.. code-block:: python
>>> from phonevalidator import Validator
>>> schema = {
... 'phone': {
... 'type': 'phonenumber',
... 'formatPhoneNumber': True,
... 'phoneNumberFormat': 'NATIONAL',
... 'region': 'US'
... }
... }
>>> doc = {'phone': '5135555555'}
>>> v = Validator(schema)
>>> v.validate(doc)
True
>>> v.document
{'phone': '(513) 555-5555'}
>>> doc = {'phone': 'gibberish'}
>>> v.validate(doc)
False
"""
pass
|
joshisa/taiga-back
|
refs/heads/master
|
taiga/timeline/permissions.py
|
20
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base.api.permissions import (TaigaResourcePermission, HasProjectPerm,
AllowAny)
class UserTimelinePermission(TaigaResourcePermission):
retrieve_perms = AllowAny()
class ProjectTimelinePermission(TaigaResourcePermission):
retrieve_perms = HasProjectPerm('view_project')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.