repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
Cinntax/home-assistant
|
refs/heads/dev
|
homeassistant/components/rainbird/switch.py
|
6
|
"""Support for Rain Bird Irrigation system LNK WiFi Module."""
import logging
from pyrainbird import AvailableStations, RainbirdController
import voluptuous as vol
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import ATTR_ENTITY_ID, CONF_FRIENDLY_NAME, CONF_TRIGGER_TIME
from homeassistant.helpers import config_validation as cv
from . import CONF_ZONES, DATA_RAINBIRD, DOMAIN, RAINBIRD_CONTROLLER
_LOGGER = logging.getLogger(__name__)
ATTR_DURATION = "duration"
SERVICE_START_IRRIGATION = "start_irrigation"
SERVICE_SCHEMA_IRRIGATION = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_DURATION): vol.All(vol.Coerce(float), vol.Range(min=0)),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Rain Bird switches over a Rain Bird controller."""
if discovery_info is None:
return
controller: RainbirdController = hass.data[DATA_RAINBIRD][
discovery_info[RAINBIRD_CONTROLLER]
]
available_stations: AvailableStations = controller.get_available_stations()
if not (available_stations and available_stations.stations):
return
devices = []
for zone in range(1, available_stations.stations.count + 1):
if available_stations.stations.active(zone):
zone_config = discovery_info.get(CONF_ZONES, {}).get(zone, {})
time = zone_config.get(CONF_TRIGGER_TIME, discovery_info[CONF_TRIGGER_TIME])
name = zone_config.get(CONF_FRIENDLY_NAME)
devices.append(
RainBirdSwitch(
controller,
zone,
time,
name if name else "Sprinkler {}".format(zone),
)
)
add_entities(devices, True)
def start_irrigation(service):
entity_id = service.data[ATTR_ENTITY_ID]
duration = service.data[ATTR_DURATION]
for device in devices:
if device.entity_id == entity_id:
device.turn_on(duration=duration)
hass.services.register(
DOMAIN,
SERVICE_START_IRRIGATION,
start_irrigation,
schema=SERVICE_SCHEMA_IRRIGATION,
)
class RainBirdSwitch(SwitchDevice):
"""Representation of a Rain Bird switch."""
def __init__(self, controller: RainbirdController, zone, time, name):
"""Initialize a Rain Bird Switch Device."""
self._rainbird = controller
self._zone = zone
self._name = name
self._state = None
self._duration = time
self._attributes = {ATTR_DURATION: self._duration, "zone": self._zone}
@property
def device_state_attributes(self):
"""Return state attributes."""
return self._attributes
@property
def name(self):
"""Get the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Turn the switch on."""
if self._rainbird.irrigate_zone(
int(self._zone),
int(kwargs[ATTR_DURATION] if ATTR_DURATION in kwargs else self._duration),
):
self._state = True
def turn_off(self, **kwargs):
"""Turn the switch off."""
if self._rainbird.stop_irrigation():
self._state = False
def update(self):
"""Update switch status."""
self._state = self._rainbird.get_zone_state(self._zone)
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
|
unreal666/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/xxxymovies.py
|
38
|
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
int_or_none,
)
class XXXYMoviesIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?xxxymovies\.com/videos/(?P<id>\d+)/(?P<display_id>[^/]+)'
_TEST = {
'url': 'http://xxxymovies.com/videos/138669/ecstatic-orgasm-sofcore/',
'md5': '810b1bdbbffff89dd13bdb369fe7be4b',
'info_dict': {
'id': '138669',
'display_id': 'ecstatic-orgasm-sofcore',
'ext': 'mp4',
'title': 'Ecstatic Orgasm Sofcore',
'duration': 931,
'categories': list,
'view_count': int,
'like_count': int,
'dislike_count': int,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
video_url = self._search_regex(
r"video_url\s*:\s*'([^']+)'", webpage, 'video URL')
title = self._html_search_regex(
[r'<div[^>]+\bclass="block_header"[^>]*>\s*<h1>([^<]+)<',
r'<title>(.*?)\s*-\s*(?:XXXYMovies\.com|XXX\s+Movies)</title>'],
webpage, 'title')
thumbnail = self._search_regex(
r"preview_url\s*:\s*'([^']+)'",
webpage, 'thumbnail', fatal=False)
categories = self._html_search_meta(
'keywords', webpage, 'categories', default='').split(',')
duration = parse_duration(self._search_regex(
r'<span>Duration:</span>\s*(\d+:\d+)',
webpage, 'duration', fatal=False))
view_count = int_or_none(self._html_search_regex(
r'<div class="video_views">\s*(\d+)',
webpage, 'view count', fatal=False))
like_count = int_or_none(self._search_regex(
r'>\s*Likes? <b>\((\d+)\)',
webpage, 'like count', fatal=False))
dislike_count = int_or_none(self._search_regex(
r'>\s*Dislike <b>\((\d+)\)</b>',
webpage, 'dislike count', fatal=False))
age_limit = self._rta_search(webpage)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'categories': categories,
'duration': duration,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'age_limit': age_limit,
}
|
prezi/gunicorn
|
refs/heads/master
|
tests/requests/valid/005.py
|
40
|
request = {
"method": "GET",
"uri": uri("/forums/1/topics/2375?page=1#posts-17408"),
"version": (1, 1),
"headers": [],
"body": b""
}
|
Shouqun/node-gn
|
refs/heads/master
|
tools/depot_tools/third_party/boto/roboto/awsqueryrequest.py
|
70
|
# Copyright (c) 2010 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2010, Eucalyptus Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import sys
import os
import boto
import optparse
import copy
import boto.exception
import boto.roboto.awsqueryservice
import bdb
import traceback
try:
import epdb as debugger
except ImportError:
import pdb as debugger
def boto_except_hook(debugger_flag, debug_flag):
def excepthook(typ, value, tb):
if typ is bdb.BdbQuit:
sys.exit(1)
sys.excepthook = sys.__excepthook__
if debugger_flag and sys.stdout.isatty() and sys.stdin.isatty():
if debugger.__name__ == 'epdb':
debugger.post_mortem(tb, typ, value)
else:
debugger.post_mortem(tb)
elif debug_flag:
print traceback.print_tb(tb)
sys.exit(1)
else:
print value
sys.exit(1)
return excepthook
class Line(object):
def __init__(self, fmt, data, label):
self.fmt = fmt
self.data = data
self.label = label
self.line = '%s\t' % label
self.printed = False
def append(self, datum):
self.line += '%s\t' % datum
def print_it(self):
if not self.printed:
print self.line
self.printed = True
class RequiredParamError(boto.exception.BotoClientError):
def __init__(self, required):
self.required = required
s = 'Required parameters are missing: %s' % self.required
boto.exception.BotoClientError.__init__(self, s)
class EncoderError(boto.exception.BotoClientError):
def __init__(self, error_msg):
s = 'Error encoding value (%s)' % error_msg
boto.exception.BotoClientError.__init__(self, s)
class FilterError(boto.exception.BotoClientError):
def __init__(self, filters):
self.filters = filters
s = 'Unknown filters: %s' % self.filters
boto.exception.BotoClientError.__init__(self, s)
class Encoder:
@classmethod
def encode(cls, p, rp, v, label=None):
if p.name.startswith('_'):
return
try:
mthd = getattr(cls, 'encode_'+p.ptype)
mthd(p, rp, v, label)
except AttributeError:
raise EncoderError('Unknown type: %s' % p.ptype)
@classmethod
def encode_string(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
encode_file = encode_string
encode_enum = encode_string
@classmethod
def encode_integer(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = '%d' % v
@classmethod
def encode_boolean(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
if v:
v = 'true'
else:
v = 'false'
rp[label] = v
@classmethod
def encode_datetime(cls, p, rp, v, l):
if l:
label = l
else:
label = p.name
rp[label] = v
@classmethod
def encode_array(cls, p, rp, v, l):
v = boto.utils.mklist(v)
if l:
label = l
else:
label = p.name
label = label + '.%d'
for i, value in enumerate(v):
rp[label%(i+1)] = value
class AWSQueryRequest(object):
ServiceClass = None
Description = ''
Params = []
Args = []
Filters = []
Response = {}
CLITypeMap = {'string' : 'string',
'integer' : 'int',
'int' : 'int',
'enum' : 'choice',
'datetime' : 'string',
'dateTime' : 'string',
'file' : 'string',
'boolean' : None}
@classmethod
def name(cls):
return cls.__name__
def __init__(self, **args):
self.args = args
self.parser = None
self.cli_options = None
self.cli_args = None
self.cli_output_format = None
self.connection = None
self.list_markers = []
self.item_markers = []
self.request_params = {}
self.connection_args = None
def __repr__(self):
return self.name()
def get_connection(self, **args):
if self.connection is None:
self.connection = self.ServiceClass(**args)
return self.connection
@property
def status(self):
retval = None
if self.http_response is not None:
retval = self.http_response.status
return retval
@property
def reason(self):
retval = None
if self.http_response is not None:
retval = self.http_response.reason
return retval
@property
def request_id(self):
retval = None
if self.aws_response is not None:
retval = getattr(self.aws_response, 'requestId')
return retval
def process_filters(self):
filters = self.args.get('filters', [])
filter_names = [f['name'] for f in self.Filters]
unknown_filters = [f for f in filters if f not in filter_names]
if unknown_filters:
raise FilterError('Unknown filters: %s' % unknown_filters)
for i, filter in enumerate(self.Filters):
name = filter['name']
if name in filters:
self.request_params['Filter.%d.Name' % (i+1)] = name
for j, value in enumerate(boto.utils.mklist(filters[name])):
Encoder.encode(filter, self.request_params, value,
'Filter.%d.Value.%d' % (i+1, j+1))
def process_args(self, **args):
"""
Responsible for walking through Params defined for the request and:
* Matching them with keyword parameters passed to the request
constructor or via the command line.
* Checking to see if all required parameters have been specified
and raising an exception, if not.
* Encoding each value into the set of request parameters that will
be sent in the request to the AWS service.
"""
self.args.update(args)
self.connection_args = copy.copy(self.args)
if 'debug' in self.args and self.args['debug'] >= 2:
boto.set_stream_logger(self.name())
required = [p.name for p in self.Params+self.Args if not p.optional]
for param in self.Params+self.Args:
if param.long_name:
python_name = param.long_name.replace('-', '_')
else:
python_name = boto.utils.pythonize_name(param.name, '_')
value = None
if python_name in self.args:
value = self.args[python_name]
if value is None:
value = param.default
if value is not None:
if param.name in required:
required.remove(param.name)
if param.request_param:
if param.encoder:
param.encoder(param, self.request_params, value)
else:
Encoder.encode(param, self.request_params, value)
if python_name in self.args:
del self.connection_args[python_name]
if required:
l = []
for p in self.Params+self.Args:
if p.name in required:
if p.short_name and p.long_name:
l.append('(%s, %s)' % (p.optparse_short_name,
p.optparse_long_name))
elif p.short_name:
l.append('(%s)' % p.optparse_short_name)
else:
l.append('(%s)' % p.optparse_long_name)
raise RequiredParamError(','.join(l))
boto.log.debug('request_params: %s' % self.request_params)
self.process_markers(self.Response)
def process_markers(self, fmt, prev_name=None):
if fmt and fmt['type'] == 'object':
for prop in fmt['properties']:
self.process_markers(prop, fmt['name'])
elif fmt and fmt['type'] == 'array':
self.list_markers.append(prev_name)
self.item_markers.append(fmt['name'])
def send(self, verb='GET', **args):
self.process_args(**args)
self.process_filters()
conn = self.get_connection(**self.connection_args)
self.http_response = conn.make_request(self.name(),
self.request_params,
verb=verb)
self.body = self.http_response.read()
boto.log.debug(self.body)
if self.http_response.status == 200:
self.aws_response = boto.jsonresponse.Element(list_marker=self.list_markers,
item_marker=self.item_markers)
h = boto.jsonresponse.XmlHandler(self.aws_response, self)
h.parse(self.body)
return self.aws_response
else:
boto.log.error('%s %s' % (self.http_response.status,
self.http_response.reason))
boto.log.error('%s' % self.body)
raise conn.ResponseError(self.http_response.status,
self.http_response.reason,
self.body)
def add_standard_options(self):
group = optparse.OptionGroup(self.parser, 'Standard Options')
# add standard options that all commands get
group.add_option('-D', '--debug', action='store_true',
help='Turn on all debugging output')
group.add_option('--debugger', action='store_true',
default=False,
help='Enable interactive debugger on error')
group.add_option('-U', '--url', action='store',
help='Override service URL with value provided')
group.add_option('--region', action='store',
help='Name of the region to connect to')
group.add_option('-I', '--access-key-id', action='store',
help='Override access key value')
group.add_option('-S', '--secret-key', action='store',
help='Override secret key value')
group.add_option('--version', action='store_true',
help='Display version string')
if self.Filters:
self.group.add_option('--help-filters', action='store_true',
help='Display list of available filters')
self.group.add_option('--filter', action='append',
metavar=' name=value',
help='A filter for limiting the results')
self.parser.add_option_group(group)
def process_standard_options(self, options, args, d):
if hasattr(options, 'help_filters') and options.help_filters:
print 'Available filters:'
for filter in self.Filters:
print '%s\t%s' % (filter.name, filter.doc)
sys.exit(0)
if options.debug:
self.args['debug'] = 2
if options.url:
self.args['url'] = options.url
if options.region:
self.args['region'] = options.region
if options.access_key_id:
self.args['aws_access_key_id'] = options.access_key_id
if options.secret_key:
self.args['aws_secret_access_key'] = options.secret_key
if options.version:
# TODO - Where should the version # come from?
print 'version x.xx'
exit(0)
sys.excepthook = boto_except_hook(options.debugger,
options.debug)
def get_usage(self):
s = 'usage: %prog [options] '
l = [ a.long_name for a in self.Args ]
s += ' '.join(l)
for a in self.Args:
if a.doc:
s += '\n\n\t%s - %s' % (a.long_name, a.doc)
return s
def build_cli_parser(self):
self.parser = optparse.OptionParser(description=self.Description,
usage=self.get_usage())
self.add_standard_options()
for param in self.Params:
ptype = action = choices = None
if param.ptype in self.CLITypeMap:
ptype = self.CLITypeMap[param.ptype]
action = 'store'
if param.ptype == 'boolean':
action = 'store_true'
elif param.ptype == 'array':
if len(param.items) == 1:
ptype = param.items[0]['type']
action = 'append'
elif param.cardinality != 1:
action = 'append'
if ptype or action == 'store_true':
if param.short_name:
self.parser.add_option(param.optparse_short_name,
param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
elif param.long_name:
self.parser.add_option(param.optparse_long_name,
action=action, type=ptype,
choices=param.choices,
help=param.doc)
def do_cli(self):
if not self.parser:
self.build_cli_parser()
self.cli_options, self.cli_args = self.parser.parse_args()
d = {}
self.process_standard_options(self.cli_options, self.cli_args, d)
for param in self.Params:
if param.long_name:
p_name = param.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(param.name)
value = getattr(self.cli_options, p_name)
if param.ptype == 'file' and value:
if value == '-':
value = sys.stdin.read()
else:
path = os.path.expanduser(value)
path = os.path.expandvars(path)
if os.path.isfile(path):
fp = open(path)
value = fp.read()
fp.close()
else:
self.parser.error('Unable to read file: %s' % path)
d[p_name] = value
for arg in self.Args:
if arg.long_name:
p_name = arg.long_name.replace('-', '_')
else:
p_name = boto.utils.pythonize_name(arg.name)
value = None
if arg.cardinality == 1:
if len(self.cli_args) >= 1:
value = self.cli_args[0]
else:
value = self.cli_args
d[p_name] = value
self.args.update(d)
if hasattr(self.cli_options, 'filter') and self.cli_options.filter:
d = {}
for filter in self.cli_options.filter:
name, value = filter.split('=')
d[name] = value
if 'filters' in self.args:
self.args['filters'].update(d)
else:
self.args['filters'] = d
try:
response = self.main()
self.cli_formatter(response)
except RequiredParamError, e:
print e
sys.exit(1)
except self.ServiceClass.ResponseError, err:
print 'Error(%s): %s' % (err.error_code, err.error_message)
sys.exit(1)
except boto.roboto.awsqueryservice.NoCredentialsError, err:
print 'Unable to find credentials.'
sys.exit(1)
except Exception, e:
print e
sys.exit(1)
def _generic_cli_formatter(self, fmt, data, label=''):
if fmt['type'] == 'object':
for prop in fmt['properties']:
if 'name' in fmt:
if fmt['name'] in data:
data = data[fmt['name']]
if fmt['name'] in self.list_markers:
label = fmt['name']
if label[-1] == 's':
label = label[0:-1]
label = label.upper()
self._generic_cli_formatter(prop, data, label)
elif fmt['type'] == 'array':
for item in data:
line = Line(fmt, item, label)
if isinstance(item, dict):
for field_name in item:
line.append(item[field_name])
elif isinstance(item, basestring):
line.append(item)
line.print_it()
def cli_formatter(self, data):
"""
This method is responsible for formatting the output for the
command line interface. The default behavior is to call the
generic CLI formatter which attempts to print something
reasonable. If you want specific formatting, you should
override this method and do your own thing.
:type data: dict
:param data: The data returned by AWS.
"""
if data:
self._generic_cli_formatter(self.Response, data)
|
swcurran/tfrs
|
refs/heads/master
|
backend/api/models/OrganizationActionsType.py
|
1
|
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline compliance reporting for transportation fuel suppliers in accordance with the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from auditable.models import Auditable
class OrganizationActionsType(Auditable):
the_type = models.CharField(max_length=25)
description = models.CharField(max_length=1000, blank=True, null=True)
effective_date = models.DateField(blank=True, null=True)
expiration_date = models.DateField(blank=True, null=True)
display_order = models.IntegerField()
class Meta:
db_table = 'organization_actions_type'
|
dfalt974/SickRage
|
refs/heads/master
|
lib/past/builtins/__init__.py
|
62
|
"""
A resurrection of some old functions from Python 2 for use in Python 3. These
should be used sparingly, to help with porting efforts, since code using them
is no longer standard Python 3 code.
This module provides the following:
1. Implementations of these builtin functions which have no equivalent on Py3:
- apply
- chr
- cmp
- execfile
2. Aliases:
- intern <- sys.intern
- raw_input <- input
- reduce <- functools.reduce
- reload <- imp.reload
- unichr <- chr
- unicode <- str
- xrange <- range
3. List-producing versions of the corresponding Python 3 iterator-producing functions:
- filter
- map
- range
- zip
4. Forward-ported Py2 types:
- basestring
- dict
- str
- long
- unicode
"""
from future.utils import PY3
from past.builtins.noniterators import (filter, map, range, reduce, zip)
# from past.builtins.misc import (ascii, hex, input, oct, open)
if PY3:
from past.types import (basestring,
olddict as dict,
oldstr as str,
long,
unicode)
else:
from __builtin__ import (basestring, dict, str, long, unicode)
from past.builtins.misc import (apply, chr, cmp, execfile, intern, oct,
raw_input, reload, unichr, unicode, xrange)
from past import utils
if utils.PY3:
# We only import names that shadow the builtins on Py3. No other namespace
# pollution on Py3.
# Only shadow builtins on Py3; no new names
__all__ = ['filter', 'map', 'range', 'reduce', 'zip',
'basestring', 'dict', 'str', 'long', 'unicode',
'apply', 'chr', 'cmp', 'execfile', 'intern', 'raw_input',
'reload', 'unichr', 'xrange'
]
else:
# No namespace pollution on Py2
__all__ = []
|
keithroe/vtkoptix
|
refs/heads/master
|
ThirdParty/Twisted/twisted/web/http_headers.py
|
44
|
# -*- test-case-name: twisted.web.test.test_http_headers
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
An API for storing HTTP header names and values.
"""
from __future__ import division, absolute_import
from collections import MutableMapping
from twisted.python.compat import comparable, cmp
def _dashCapitalize(name):
"""
Return a byte string which is capitalized using '-' as a word separator.
@param name: The name of the header to capitalize.
@type name: C{bytes}
@return: The given header capitalized using '-' as a word separator.
@rtype: C{bytes}
"""
return b'-'.join([word.capitalize() for word in name.split(b'-')])
class _DictHeaders(MutableMapping):
"""
A C{dict}-like wrapper around L{Headers} to provide backwards compatibility
for L{twisted.web.http.Request.received_headers} and
L{twisted.web.http.Request.headers} which used to be plain C{dict}
instances.
@type _headers: L{Headers}
@ivar _headers: The real header storage object.
"""
def __init__(self, headers):
self._headers = headers
def __getitem__(self, key):
"""
Return the last value for header of C{key}.
"""
if self._headers.hasHeader(key):
return self._headers.getRawHeaders(key)[-1]
raise KeyError(key)
def __setitem__(self, key, value):
"""
Set the given header.
"""
self._headers.setRawHeaders(key, [value])
def __delitem__(self, key):
"""
Delete the given header.
"""
if self._headers.hasHeader(key):
self._headers.removeHeader(key)
else:
raise KeyError(key)
def __iter__(self):
"""
Return an iterator of the lowercase name of each header present.
"""
for k, v in self._headers.getAllRawHeaders():
yield k.lower()
def __len__(self):
"""
Return the number of distinct headers present.
"""
# XXX Too many _
return len(self._headers._rawHeaders)
# Extra methods that MutableMapping doesn't care about but that we do.
def copy(self):
"""
Return a C{dict} mapping each header name to the last corresponding
header value.
"""
return dict(self.items())
def has_key(self, key):
"""
Return C{True} if C{key} is a header in this collection, C{False}
otherwise.
"""
return key in self
@comparable
class Headers(object):
"""
This class stores the HTTP headers as both a parsed representation
and the raw string representation. It converts between the two on
demand.
@cvar _caseMappings: A C{dict} that maps lowercase header names
to their canonicalized representation.
@ivar _rawHeaders: A C{dict} mapping header names as C{bytes} to C{lists} of
header values as C{bytes}.
"""
_caseMappings = {
b'content-md5': b'Content-MD5',
b'dnt': b'DNT',
b'etag': b'ETag',
b'p3p': b'P3P',
b'te': b'TE',
b'www-authenticate': b'WWW-Authenticate',
b'x-xss-protection': b'X-XSS-Protection'}
def __init__(self, rawHeaders=None):
self._rawHeaders = {}
if rawHeaders is not None:
for name, values in rawHeaders.items():
self.setRawHeaders(name, values[:])
def __repr__(self):
"""
Return a string fully describing the headers set on this object.
"""
return '%s(%r)' % (self.__class__.__name__, self._rawHeaders,)
def __cmp__(self, other):
"""
Define L{Headers} instances as being equal to each other if they have
the same raw headers.
"""
if isinstance(other, Headers):
return cmp(
sorted(self._rawHeaders.items()),
sorted(other._rawHeaders.items()))
return NotImplemented
def copy(self):
"""
Return a copy of itself with the same headers set.
"""
return self.__class__(self._rawHeaders)
def hasHeader(self, name):
"""
Check for the existence of a given header.
@type name: C{bytes}
@param name: The name of the HTTP header to check for.
@rtype: C{bool}
@return: C{True} if the header exists, otherwise C{False}.
"""
return name.lower() in self._rawHeaders
def removeHeader(self, name):
"""
Remove the named header from this header object.
@type name: C{bytes}
@param name: The name of the HTTP header to remove.
@return: C{None}
"""
self._rawHeaders.pop(name.lower(), None)
def setRawHeaders(self, name, values):
"""
Sets the raw representation of the given header.
@type name: C{bytes}
@param name: The name of the HTTP header to set the values for.
@type values: C{list}
@param values: A list of strings each one being a header value of
the given name.
@return: C{None}
"""
if not isinstance(values, list):
raise TypeError("Header entry %r should be list but found "
"instance of %r instead" % (name, type(values)))
self._rawHeaders[name.lower()] = values
def addRawHeader(self, name, value):
"""
Add a new raw value for the given header.
@type name: C{bytes}
@param name: The name of the header for which to set the value.
@type value: C{bytes}
@param value: The value to set for the named header.
"""
values = self.getRawHeaders(name)
if values is None:
self.setRawHeaders(name, [value])
else:
values.append(value)
def getRawHeaders(self, name, default=None):
"""
Returns a list of headers matching the given name as the raw string
given.
@type name: C{bytes}
@param name: The name of the HTTP header to get the values of.
@param default: The value to return if no header with the given C{name}
exists.
@rtype: C{list}
@return: A C{list} of values for the given header.
"""
return self._rawHeaders.get(name.lower(), default)
def getAllRawHeaders(self):
"""
Return an iterator of key, value pairs of all headers contained in this
object, as strings. The keys are capitalized in canonical
capitalization.
"""
for k, v in self._rawHeaders.items():
yield self._canonicalNameCaps(k), v
def _canonicalNameCaps(self, name):
"""
Return the canonical name for the given header.
@type name: C{bytes}
@param name: The all-lowercase header name to capitalize in its
canonical form.
@rtype: C{bytes}
@return: The canonical name of the header.
"""
return self._caseMappings.get(name, _dashCapitalize(name))
__all__ = ['Headers']
|
jmgc/myhdl-numeric
|
refs/heads/numeric
|
example/cookbook/bitonic/bitonic.py
|
1
|
import subprocess
from myhdl import *
from myhdl.conversion import analyze
DESCENDING, ASCENDING = False, True
def compare(a1, a2, z1, z2, dir):
@always_comb
def logic():
z1.next = a1
z2.next = a2
if dir == (a1 > a2):
z1.next = a2
z2.next = a1
return logic
def feedthru(a, z):
@always_comb
def logic():
z.next = a
return logic
def bitonicMerge(a, z, dir):
n = len(a)
k = n//2
w = len(a[0])
if n > 1:
t = [Signal(intbv(0)[w:]) for i in range(n)]
comp = [compare(a[i], a[i+k], t[i], t[i+k], dir) for i in range(k)]
loMerge = bitonicMerge(t[:k], z[:k], dir)
hiMerge = bitonicMerge(t[k:], z[k:], dir)
return comp, loMerge, hiMerge
else:
feed = feedthru(a[0], z[0])
return feed
def bitonicSort(a, z, dir):
n = len(a)
k = n//2
w = len(a[0])
if n > 1:
t = [Signal(intbv(0)[w:]) for i in range(n)]
loSort = bitonicSort(a[:k], t[:k], ASCENDING)
hiSort = bitonicSort(a[k:], t[k:], DESCENDING)
merge = bitonicMerge(t, z, dir)
return loSort, hiSort, merge
else:
feed = feedthru(a[0], z[0])
return feed
def Array8Sorter(a0, a1, a2, a3, a4, a5, a6, a7,
z0, z1, z2, z3, z4, z5, z6, z7):
a = [a0, a1, a2, a3, a4, a5, a6, a7]
z = [z0, z1, z2, z3, z4, z5, z6, z7]
sort = bitonicSort(a, z, ASCENDING)
return sort
def Array8Sorter_v(a0, a1, a2, a3, a4, a5, a6, a7,
z0, z1, z2, z3, z4, z5, z6, z7):
analyze.simulator = 'iverilog'
toVerilog(Array8Sorter, a0, a1, a2, a3, a4, a5, a6, a7,
z0, z1, z2, z3, z4, z5, z6, z7)
analyze(Array8Sorter, a0, a1, a2, a3, a4, a5, a6, a7,
z0, z1, z2, z3, z4, z5, z6, z7)
# cmd = "cver -q +loadvpi=../../../cosimulation/cver/myhdl_vpi:vpi_compat_bootstrap " + \
# "Array8Sorter.v tb_Array8Sorter.v"
subprocess.call("iverilog -o Array8Sorter.o Array8Sorter.v tb_Array8Sorter.v", shell=True)
cmd = "vvp -m ../../../cosimulation/icarus/myhdl.vpi Array8Sorter.o"
return Cosimulation(cmd, **locals())
|
sinhrks/scikit-learn
|
refs/heads/master
|
sklearn/datasets/setup.py
|
306
|
import numpy
import os
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('datasets', parent_package, top_path)
config.add_data_dir('data')
config.add_data_dir('descr')
config.add_data_dir('images')
config.add_data_dir(os.path.join('tests', 'data'))
config.add_extension('_svmlight_format',
sources=['_svmlight_format.c'],
include_dirs=[numpy.get_include()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
jackytu/newbrandx
|
refs/heads/rankx
|
src/oscar/apps/checkout/session.py
|
4
|
from decimal import Decimal as D
from django import http
from django.contrib import messages
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from oscar.core import prices
from oscar.core.loading import get_class, get_model
from . import exceptions
Repository = get_class('shipping.repository', 'Repository')
OrderTotalCalculator = get_class(
'checkout.calculators', 'OrderTotalCalculator')
CheckoutSessionData = get_class(
'checkout.utils', 'CheckoutSessionData')
ShippingAddress = get_model('order', 'ShippingAddress')
BillingAddress = get_model('order', 'BillingAddress')
UserAddress = get_model('address', 'UserAddress')
class CheckoutSessionMixin(object):
"""
Mixin to provide common functionality shared between checkout views.
All checkout views subclass this mixin. It ensures that all relevant
checkout information is available in the template context.
"""
# A pre-condition is a condition that MUST be met in order for a view
# to be available. If it isn't then the customer should be redirected
# to a view *earlier* in the chain.
# pre_conditions is a list of method names that get executed before the
# normal flow of the view. Each method should check some condition has been
# met. If not, then an exception is raised that indicates the URL the
# customer will be redirected to.
pre_conditions = None
# A *skip* condition is a condition that MUST NOT be met in order for a
# view to be available. If the condition is met, this means the view MUST
# be skipped and the customer should be redirected to a view *later* in
# the chain.
# Skip conditions work similar to pre-conditions, and get evaluated after
# pre-conditions have been evaluated.
skip_conditions = None
def dispatch(self, request, *args, **kwargs):
# Assign the checkout session manager so it's available in all checkout
# views.
self.checkout_session = CheckoutSessionData(request)
# Enforce any pre-conditions for the view.
try:
self.check_pre_conditions(request)
except exceptions.FailedPreCondition as e:
for message in e.messages:
messages.warning(request, message)
return http.HttpResponseRedirect(e.url)
# Check if this view should be skipped
try:
self.check_skip_conditions(request)
except exceptions.PassedSkipCondition as e:
return http.HttpResponseRedirect(e.url)
return super(CheckoutSessionMixin, self).dispatch(
request, *args, **kwargs)
def check_pre_conditions(self, request):
pre_conditions = self.get_pre_conditions(request)
for method_name in pre_conditions:
if not hasattr(self, method_name):
raise ImproperlyConfigured(
"There is no method '%s' to call as a pre-condition" % (
method_name))
getattr(self, method_name)(request)
def get_pre_conditions(self, request):
"""
Return the pre-condition method names to run for this view
"""
if self.pre_conditions is None:
return []
return self.pre_conditions
def check_skip_conditions(self, request):
skip_conditions = self.get_skip_conditions(request)
for method_name in skip_conditions:
if not hasattr(self, method_name):
raise ImproperlyConfigured(
"There is no method '%s' to call as a skip-condition" % (
method_name))
getattr(self, method_name)(request)
def get_skip_conditions(self, request):
"""
Return the skip-condition method names to run for this view
"""
if self.skip_conditions is None:
return []
return self.skip_conditions
# Re-usable pre-condition validators
def check_basket_is_not_empty(self, request):
if request.basket.is_empty:
raise exceptions.FailedPreCondition(
url=reverse('basket:summary'),
message=_(
"You need to add some items to your basket to checkout")
)
def check_basket_is_valid(self, request):
"""
Check that the basket is permitted to be submitted as an order. That
is, all the basket lines are available to buy - nothing has gone out of
stock since it was added to the basket.
"""
messages = []
strategy = request.strategy
for line in request.basket.all_lines():
result = strategy.fetch_for_line(line)
is_permitted, reason = result.availability.is_purchase_permitted(
line.quantity)
if not is_permitted:
# Create a more meaningful message to show on the basket page
msg = _(
"'%(title)s' is no longer available to buy (%(reason)s). "
"Please adjust your basket to continue"
) % {
'title': line.product.get_title(),
'reason': reason}
messages.append(msg)
if messages:
raise exceptions.FailedPreCondition(
url=reverse('basket:summary'),
messages=messages
)
def check_user_email_is_captured(self, request):
if not request.user.is_authenticated() \
and not self.checkout_session.get_guest_email():
raise exceptions.FailedPreCondition(
url=reverse('checkout:index'),
message=_(
"Please either sign in or enter your email address")
)
def check_shipping_data_is_captured(self, request):
if not request.basket.is_shipping_required():
# Even without shipping being required, we still need to check that
# a shipping method code has been set.
if not self.checkout_session.is_shipping_method_set(
self.request.basket):
raise exceptions.FailedPreCondition(
url=reverse('checkout:shipping-method'),
)
return
# Basket requires shipping: check address and method are captured and
# valid.
self.check_a_valid_shipping_address_is_captured()
self.check_a_valid_shipping_method_is_captured()
def check_a_valid_shipping_address_is_captured(self):
# Check that shipping address has been completed
if not self.checkout_session.is_shipping_address_set():
raise exceptions.FailedPreCondition(
url=reverse('checkout:shipping-address'),
message=_("Please choose a shipping address")
)
# Check that the previously chosen shipping address is still valid
shipping_address = self.get_shipping_address(
basket=self.request.basket)
if not shipping_address:
raise exceptions.FailedPreCondition(
url=reverse('checkout:shipping-address'),
message=_("Your previously chosen shipping address is "
"no longer valid. Please choose another one")
)
def check_a_valid_shipping_method_is_captured(self):
# Check that shipping method has been set
if not self.checkout_session.is_shipping_method_set(
self.request.basket):
raise exceptions.FailedPreCondition(
url=reverse('checkout:shipping-method'),
message=_("Please choose a shipping method")
)
# Check that a *valid* shipping method has been set
shipping_address = self.get_shipping_address(
basket=self.request.basket)
shipping_method = self.get_shipping_method(
basket=self.request.basket,
shipping_address=shipping_address)
if not shipping_method:
raise exceptions.FailedPreCondition(
url=reverse('checkout:shipping-method'),
message=_("Your previously chosen shipping method is "
"no longer valid. Please choose another one")
)
def check_payment_data_is_captured(self, request):
# We don't collect payment data by default so we don't have anything to
# validate here. If your shop requires forms to be submitted on the
# payment details page, then override this method to check that the
# relevant data is available. Often just enforcing that the preview
# view is only accessible from a POST request is sufficient.
pass
# Re-usable skip conditions
def skip_unless_basket_requires_shipping(self, request):
# Check to see that a shipping address is actually required. It may
# not be if the basket is purely downloads
if not request.basket.is_shipping_required():
raise exceptions.PassedSkipCondition(
url=reverse('checkout:shipping-method')
)
def skip_unless_payment_is_required(self, request):
# Check to see if payment is actually required for this order.
shipping_address = self.get_shipping_address(request.basket)
shipping_method = self.get_shipping_method(
request.basket, shipping_address)
if shipping_method:
shipping_charge = shipping_method.calculate(request.basket)
else:
# It's unusual to get here as a shipping method should be set by
# the time this skip-condition is called. In the absence of any
# other evidence, we assume the shipping charge is zero.
shipping_charge = prices.Price(excl_tax=D('0.00'), tax=D('0.00'))
total = self.get_order_totals(request.basket, shipping_charge)
if total.excl_tax == D('0.00'):
raise exceptions.PassedSkipCondition(
url=reverse('checkout:preview')
)
# Helpers
def get_context_data(self, **kwargs):
# Use the proposed submission as template context data. Flatten the
# order kwargs so they are easily available too.
ctx = self.build_submission(**kwargs)
ctx.update(kwargs)
ctx.update(ctx['order_kwargs'])
return ctx
def build_submission(self, **kwargs):
"""
Return a dict of data that contains everything required for an order
submission. This includes payment details (if any).
This can be the right place to perform tax lookups and apply them to
the basket.
"""
basket = kwargs.get('basket', self.request.basket)
shipping_address = self.get_shipping_address(basket)
shipping_method = self.get_shipping_method(
basket, shipping_address)
billing_address = self.get_billing_address(shipping_address)
if not shipping_method:
total = shipping_charge = None
else:
shipping_charge = shipping_method.calculate(basket)
total = self.get_order_totals(
basket, shipping_charge=shipping_charge)
submission = {
'user': self.request.user,
'basket': basket,
'shipping_address': shipping_address,
'shipping_method': shipping_method,
'shipping_charge': shipping_charge,
'billing_address': billing_address,
'order_total': total,
'order_kwargs': {},
'payment_kwargs': {}}
# If there is a billing address, add it to the payment kwargs as calls
# to payment gateways generally require the billing address. Note, that
# it normally makes sense to pass the form instance that captures the
# billing address information. That way, if payment fails, you can
# render bound forms in the template to make re-submission easier.
if billing_address:
submission['payment_kwargs']['billing_address'] = billing_address
# Allow overrides to be passed in
submission.update(kwargs)
# Set guest email after overrides as we need to update the order_kwargs
# entry.
if (not submission['user'].is_authenticated() and
'guest_email' not in submission['order_kwargs']):
email = self.checkout_session.get_guest_email()
submission['order_kwargs']['guest_email'] = email
return submission
def get_shipping_address(self, basket):
"""
Return the (unsaved) shipping address for this checkout session.
If the shipping address was entered manually, then we instantiate a
``ShippingAddress`` model with the appropriate form data (which is
saved in the session).
If the shipping address was selected from the user's address book,
then we convert the ``UserAddress`` to a ``ShippingAddress``.
The ``ShippingAddress`` instance is not saved as sometimes you need a
shipping address instance before the order is placed. For example, if
you are submitting fraud information as part of a payment request.
The ``OrderPlacementMixin.create_shipping_address`` method is
responsible for saving a shipping address when an order is placed.
"""
if not basket.is_shipping_required():
return None
addr_data = self.checkout_session.new_shipping_address_fields()
if addr_data:
# Load address data into a blank shipping address model
return ShippingAddress(**addr_data)
addr_id = self.checkout_session.shipping_user_address_id()
if addr_id:
try:
address = UserAddress._default_manager.get(pk=addr_id)
except UserAddress.DoesNotExist:
# An address was selected but now it has disappeared. This can
# happen if the customer flushes their address book midway
# through checkout. No idea why they would do this but it can
# happen. Checkouts are highly vulnerable to race conditions
# like this.
return None
else:
# Copy user address data into a blank shipping address instance
shipping_addr = ShippingAddress()
address.populate_alternative_model(shipping_addr)
return shipping_addr
def get_shipping_method(self, basket, shipping_address=None, **kwargs):
"""
Return the selected shipping method instance from this checkout session
The shipping address is passed as we need to check that the method
stored in the session is still valid for the shipping address.
"""
code = self.checkout_session.shipping_method_code(basket)
methods = Repository().get_shipping_methods(
basket=basket, user=self.request.user,
shipping_addr=shipping_address, request=self.request)
for method in methods:
if method.code == code:
return method
def get_billing_address(self, shipping_address):
"""
Return an unsaved instance of the billing address (if one exists)
This method only returns a billing address if the session has been used
to store billing address information. It's also possible to capture
billing address information as part of the payment details forms, which
never get stored in the session. In that circumstance, the billing
address can be set directly in the build_submission dict (see Oscar's
demo site for an example of this approach).
"""
if not self.checkout_session.is_billing_address_set():
return None
if self.checkout_session.is_billing_address_same_as_shipping():
if shipping_address:
address = BillingAddress()
shipping_address.populate_alternative_model(address)
return address
addr_data = self.checkout_session.new_billing_address_fields()
if addr_data:
# A new billing address has been entered - load address data into a
# blank billing address model.
return BillingAddress(**addr_data)
addr_id = self.checkout_session.billing_user_address_id()
if addr_id:
# An address from the user's address book has been selected as the
# billing address - load it and convert it into a billing address
# instance.
try:
user_address = UserAddress._default_manager.get(pk=addr_id)
except UserAddress.DoesNotExist:
# An address was selected but now it has disappeared. This can
# happen if the customer flushes their address book midway
# through checkout. No idea why they would do this but it can
# happen. Checkouts are highly vulnerable to race conditions
# like this.
return None
else:
# Copy user address data into a blank shipping address instance
billing_address = BillingAddress()
user_address.populate_alternative_model(billing_address)
return billing_address
def get_order_totals(self, basket, shipping_charge, **kwargs):
"""
Returns the total for the order with and without tax
"""
return OrderTotalCalculator(self.request).calculate(
basket, shipping_charge, **kwargs)
|
vmendez/DIRAC
|
refs/heads/integration
|
FrameworkSystem/private/logging/backends/RemoteBackend.py
|
13
|
# $HeadURL$
__RCSID__ = "$Id$"
"""This Backend sends the Log Messages to a Log Server
It will only report to the server ERROR, EXCEPTION, FATAL
and ALWAYS messages.
"""
import threading
import Queue
from DIRAC.Core.Utilities import Time, Network
from DIRAC.FrameworkSystem.private.logging.backends.BaseBackend import BaseBackend
from DIRAC.FrameworkSystem.private.logging.LogLevels import LogLevels
class RemoteBackend( BaseBackend, threading.Thread ):
def __init__( self, optionsDictionary ):
BaseBackend.__init__(self, optionsDictionary)
threading.Thread.__init__( self )
self.__interactive = optionsDictionary[ 'Interactive' ]
self.__sleep = optionsDictionary[ 'SleepTime' ]
self._messageQueue = Queue.Queue()
self._Transactions = []
self._alive = True
self._site = optionsDictionary[ 'Site' ]
self._hostname = Network.getFQDN()
self._logLevels = LogLevels()
self._negativeLevel = self._logLevels.getLevelValue( 'ERROR' )
self._positiveLevel = self._logLevels.getLevelValue( 'ALWAYS' )
self._maxBundledMessages = 20
self.setDaemon(1)
self.start()
def doMessage( self, messageObject ):
self._messageQueue.put( messageObject )
def run( self ):
import time
while self._alive:
self._bundleMessages()
time.sleep( self.__sleep )
def _bundleMessages( self ):
while not self._messageQueue.empty():
bundle = []
while ( len( bundle ) < self._maxBundledMessages ) and \
( not self._messageQueue.empty() ):
message = self._messageQueue.get()
if self._testLevel( message.getLevel() ):
bundle.append( message.toTuple() )
if len( bundle ):
self._sendMessageToServer( bundle )
if len( self._Transactions ):
self._sendMessageToServer()
def _sendMessageToServer( self, messageBundle=None ):
from DIRAC.Core.DISET.RPCClient import RPCClient
if messageBundle:
self._Transactions.append( messageBundle )
TransactionsLength = len( self._Transactions )
if TransactionsLength > 100:
del self._Transactions[:TransactionsLength-100]
TransactionsLength = 100
try:
oSock = RPCClient( "Framework/SystemLogging" )
except Exception,v:
return False
while TransactionsLength:
result = oSock.addMessages( self._Transactions[0],
self._site, self._hostname )
if result['OK']:
TransactionsLength = TransactionsLength - 1
self._Transactions.pop(0)
else:
return False
return True
def _testLevel( self, sLevel ):
messageLevel = self._logLevels.getLevelValue( sLevel )
return messageLevel <= self._negativeLevel or \
messageLevel >= self._positiveLevel
def flush( self ):
self._alive = False
if not self.__interactive and self._sendMessageToServer():
while not self._messageQueue.empty():
self._bundleMessages()
|
molokov/mezzanine
|
refs/heads/master
|
mezzanine/core/checks.py
|
5
|
from __future__ import unicode_literals
import pprint
from django import VERSION as DJANGO_VERSION
from django.conf import global_settings
from django.core.checks import Warning, register
from mezzanine.conf import settings
from mezzanine.utils.conf import middlewares_or_subclasses_installed
from mezzanine.utils.sites import SITE_PERMISSION_MIDDLEWARE
@register()
def check_template_settings(app_configs, **kwargs):
issues = []
if not settings.TEMPLATES:
suggested_config = _build_suggested_template_config(settings)
declaration = 'TEMPLATES = '
config_formatted = pprint.pformat(suggested_config)
config_formatted = "\n".join(' ' * len(declaration) + line
for line in config_formatted.splitlines())
config_formatted = declaration + config_formatted[len(declaration):]
issues.append(Warning(
"Please update your settings to use the TEMPLATES setting rather "
"than the deprecated individual TEMPLATE_ settings. The latter "
"are unsupported and correct behaviour is not guaranteed. Here's "
"a suggestion based on on your existing configuration:\n\n%s\n"
% config_formatted,
id="mezzanine.core.W01"
))
if settings.DEBUG != settings.TEMPLATE_DEBUG:
issues.append(Warning(
"TEMPLATE_DEBUG and DEBUG settings have different values, "
"which may not be what you want. Mezzanine used to fix this "
"for you, but doesn't any more. Update your settings.py to "
"use the TEMPLATES setting to have template debugging "
"controlled by the DEBUG setting.",
id="mezzanine.core.W02"
))
else:
loader_tags_built_in = any(
'mezzanine.template.loader_tags'
in config.get('OPTIONS', {}).get('builtins', {})
for config in settings.TEMPLATES
)
if not DJANGO_VERSION < (1, 9) and not loader_tags_built_in:
issues.append(Warning(
"You haven't included 'mezzanine.template.loader_tags' as a "
"builtin in any of your template configurations. Mezzanine's "
"'overextends' tag will not be available in your templates.",
id="mezzanine.core.W03"
))
return issues
def _build_suggested_template_config(settings):
suggested_templates_config = {
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"builtins": [
"mezzanine.template.loader_tags",
],
},
}
def set_setting(name, value, unconditional=False):
if value or unconditional:
suggested_templates_config[name] = value
def set_option(name, value):
if value:
suggested_templates_config["OPTIONS"][name.lower()] = value
def get_debug(_):
if settings.TEMPLATE_DEBUG != settings.DEBUG:
return settings.TEMPLATE_DEBUG
def get_default(default):
def getter(name):
value = getattr(settings, name)
if value == getattr(global_settings, name):
value = default
return value
return getter
default_context_processors = [
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
]
def get_loaders(_):
"""
Django's default TEMPLATES setting doesn't specify loaders, instead
dynamically sets a default based on whether or not APP_DIRS is True.
We check here if the existing TEMPLATE_LOADERS setting matches one
of those default cases, and omit the 'loaders' option if so.
"""
template_loaders = list(settings.TEMPLATE_LOADERS)
default_loaders = list(global_settings.TEMPLATE_LOADERS)
if template_loaders == default_loaders:
# Equivalent to Django's default with APP_DIRS True
template_loaders = None
app_dirs = True
elif template_loaders == default_loaders[:1]:
# Equivalent to Django's default with APP_DIRS False
template_loaders = None
app_dirs = False
else:
# This project has a custom loaders setting, which we'll use.
# Custom loaders are incompatible with APP_DIRS.
app_dirs = False
return template_loaders, app_dirs
def set_loaders(name, value):
template_loaders, app_dirs = value
set_option(name, template_loaders)
set_setting('APP_DIRS', app_dirs, unconditional=True)
old_settings = [
('ALLOWED_INCLUDE_ROOTS', settings.__getattr__, set_option),
('TEMPLATE_STRING_IF_INVALID', settings.__getattr__, set_option),
('TEMPLATE_DIRS', settings.__getattr__, set_setting),
('TEMPLATE_CONTEXT_PROCESSORS',
get_default(default_context_processors), set_option),
('TEMPLATE_DEBUG', get_debug, set_option),
('TEMPLATE_LOADERS', get_loaders, set_loaders),
]
def convert_setting_name(old_name):
return old_name.rpartition('TEMPLATE_')[2]
for setting_name, getter, setter in old_settings:
value = getter(setting_name)
new_setting_name = convert_setting_name(setting_name)
setter(new_setting_name, value)
return [suggested_templates_config]
@register()
def check_sites_middleware(app_configs, **kwargs):
if not middlewares_or_subclasses_installed([SITE_PERMISSION_MIDDLEWARE]):
return [Warning(SITE_PERMISSION_MIDDLEWARE +
" missing from settings.MIDDLEWARE - per site"
" permissions not applied",
id="mezzanine.core.W04")]
return []
|
ativelkov/murano-api
|
refs/heads/master
|
murano/db/services/sessions.py
|
1
|
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from murano.common.helpers import token_sanitizer
from murano.common import rpc
from murano.db import models
from murano.db import session as db_session
SessionState = collections.namedtuple('SessionState', [
'open', 'deploying', 'deployed'
])(
open='open', deploying='deploying', deployed='deployed'
)
class SessionServices(object):
@staticmethod
def get_sessions(environment_id, state=None):
"""
Get list of sessions for specified environment
:param environment_id: Environment Id
:param state: glazierapi.db.services.environments.EnvironmentStatus
:return: Sessions for specified Environment, if SessionState is
not defined all sessions for specified environment is returned.
"""
unit = db_session.get_session()
# Here we duplicate logic for reducing calls to database
# Checks for validation is same as in validate.
environment = unit.query(models.Environment).get(environment_id)
return unit.query(models.Session).filter(
#Get all session for this environment
models.Session.environment_id == environment_id,
#in this state, if state is not specified return in all states
models.Session.state.in_(SessionState
if state is None else [state]),
#Only sessions with same version as current env version are valid
models.Session.version == environment.version
).all()
@staticmethod
def create(environment_id, user_id):
"""
Creates session object for specific environment for specified user.
:param environment_id: Environment Id
:param user_id: User Id
:return: Created session
"""
unit = db_session.get_session()
environment = unit.query(models.Environment).get(environment_id)
session = models.Session()
session.environment_id = environment.id
session.user_id = user_id
session.state = SessionState.open
# used for checking if other sessions was deployed before this one
session.version = environment.version
# all changes to environment is stored here, and translated to
# environment only after deployment completed
session.description = environment.description
with unit.begin():
unit.add(session)
return session
@staticmethod
def validate(session):
"""
Session is valid only if no other session for same
environment was already deployed on in deploying state,
:param session: Session for validation
"""
#if other session is deploying now current session is invalid
unit = db_session.get_session()
#if environment version is higher then version on which current session
#is created then other session was already deployed
current_env = unit.query(models.Environment).\
get(session.environment_id)
if current_env.version > session.version:
return False
#if other session is deploying now current session is invalid
other_is_deploying = unit.query(models.Session).filter_by(
environment_id=session.environment_id, state=SessionState.deploying
).count() > 0
if session.state == SessionState.open and other_is_deploying:
return False
return True
@staticmethod
def deploy(session, unit, token):
"""
Prepares environment for deployment and send deployment command to
orchestration engine
:param session: session that is going to be deployed
:param unit: SQLalchemy session
:param token: auth token that is going to be used by orchestration
"""
#Set X-Auth-Token for conductor
environment = unit.query(models.Environment).get(
session.environment_id)
task = {
'action': {
'object_id': environment.id,
'method': 'deploy',
'args': {}
},
'model': session.description,
'token': token,
'tenant_id': environment.tenant_id
}
task['model']['Objects']['?']['id'] = environment.id
task['model']['Objects']['applications'] = \
task['model']['Objects'].get('services', [])
if 'services' in task['model']['Objects']:
del task['model']['Objects']['services']
session.state = SessionState.deploying
deployment = models.Deployment()
deployment.environment_id = session.environment_id
deployment.description = token_sanitizer.TokenSanitizer().sanitize(
dict(session.description.get('Objects')))
status = models.Status()
status.text = "Deployment scheduled"
status.level = "info"
deployment.statuses.append(status)
with unit.begin():
unit.add(session)
unit.add(deployment)
rpc.engine().handle_task(task)
|
zephirefaith/AI_Fall15_Assignments
|
refs/heads/master
|
A2/lib/networkx/readwrite/json_graph/tests/test_tree.py
|
63
|
import json
from nose.tools import assert_equal, assert_raises, assert_not_equal, assert_true, raises
import networkx as nx
from networkx.readwrite.json_graph import *
class TestTree:
def test_graph(self):
G=nx.DiGraph()
G.add_nodes_from([1,2,3],color='red')
G.add_edge(1,2,foo=7)
G.add_edge(1,3,foo=10)
G.add_edge(3,4,foo=10)
H = tree_graph(tree_data(G,1))
nx.is_isomorphic(G,H)
def test_graph_attributes(self):
G=nx.DiGraph()
G.add_nodes_from([1,2,3],color='red')
G.add_edge(1,2,foo=7)
G.add_edge(1,3,foo=10)
G.add_edge(3,4,foo=10)
H = tree_graph(tree_data(G,1))
assert_equal(H.node[1]['color'],'red')
d = json.dumps(tree_data(G,1))
H = tree_graph(json.loads(d))
assert_equal(H.node[1]['color'],'red')
@raises(nx.NetworkXError)
def test_exception(self):
G = nx.MultiDiGraph()
G.add_node(0)
attrs = dict(id='node', children='node')
tree_data(G, 0, attrs)
|
gossion/azure-quickstart-templates
|
refs/heads/master
|
cloudera-director-on-centos/scripts/director_user_passwd.py
|
102
|
#! /usr/bin/env python
# Copyright (c) 2016 Cloudera, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Simple script that shows how to use the Cloudera Director API to initialize
# the environment and instance templates
from urllib2 import HTTPError
from cloudera.director.latest.models import Login, User
from cloudera.director.common.client import ApiClient
from cloudera.director.latest import AuthenticationApi, UsersApi
import sys
import logging
# logging starts
logging.basicConfig(filename='/var/log/cloudera-azure-initialize.log', level=logging.DEBUG)
logging.info('started')
class ExitCodes(object):
"""
Exit code definition
"""
OK = 0
ERROR = 1
def prepare_user(username, password):
"""
Create a new user account (admin) for Cloudera Director Server
:param username: Username for the new account
:param password: Password for the new account
:return: API exit code
"""
# Cloudera Director server runs at http://127.0.0.1:7189
try:
logging.info('Creating new admin user for Cloudera Director Server')
client = ApiClient("http://localhost:7189")
AuthenticationApi(client).login(
Login(username="admin", password="admin")) # create new login base on user input
users_api = UsersApi(client)
# Admin user by default has both roles
users_api.create(User(username=username, password=password, enabled=True,
roles=["ROLE_ADMIN", "ROLE_READONLY"]))
logging.info('Successfully created new admin user %s.' % dirUsername)
except HTTPError, e:
logging.error("Failed to create user '%s'. %s" % (username, e.msg))
return ExitCodes.ERROR
# delete existing admin user using the new account
try:
logging.info("Deleting default user 'admin' for Cloudera Director Server")
client = ApiClient("http://localhost:7189")
AuthenticationApi(client).login(Login(username=username, password=password))
users_api = UsersApi(client)
users_api.delete("admin")
logging.info("Successfully deleted default user 'admin'")
return ExitCodes.OK
except HTTPError, e:
logging.error("Failed to delete default user 'admin'. %s" % e.msg)
return ExitCodes.ERROR
dirUsername = sys.argv[1]
dirPassword = sys.argv[2]
sys.exit(prepare_user(dirUsername, dirPassword))
|
ChenJunor/hue
|
refs/heads/master
|
desktop/core/ext-py/pysqlite/doc/includes/sqlite3/executescript.py
|
49
|
from pysqlite2 import dbapi2 as sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
cur.executescript("""
create table person(
firstname,
lastname,
age
);
create table book(
title,
author,
published
);
insert into book(title, author, published)
values (
'Dirk Gently''s Holistic Detective Agency',
'Douglas Adams',
1987
);
""")
|
DarthMaulware/EquationGroupLeaks
|
refs/heads/master
|
Leak #5 - Lost In Translation/windows/Resources/Python/Core/Lib/lib2to3/pgen2/grammar.py
|
1
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: grammar.py
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
import pickle
from . import token, tokenize
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [
(0, 'EMPTY')]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, 'wb')
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, 'rb')
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def copy(self):
"""
Copy the grammar.
"""
new = self.__class__()
for dict_attr in ('symbol2number', 'number2symbol', 'dfas', 'keywords', 'tokens',
'symbol2label'):
setattr(new, dict_attr, getattr(self, dict_attr).copy())
new.labels = self.labels[:]
new.states = self.states[:]
new.start = self.start
return new
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print 's2n'
pprint(self.symbol2number)
print 'n2s'
pprint(self.number2symbol)
print 'states'
pprint(self.states)
print 'dfas'
pprint(self.dfas)
print 'labels'
pprint(self.labels)
print 'start', self.start
opmap_raw = '\n( LPAR\n) RPAR\n[ LSQB\n] RSQB\n: COLON\n, COMMA\n; SEMI\n+ PLUS\n- MINUS\n* STAR\n/ SLASH\n| VBAR\n& AMPER\n< LESS\n> GREATER\n= EQUAL\n. DOT\n% PERCENT\n` BACKQUOTE\n{ LBRACE\n} RBRACE\n@ AT\n== EQEQUAL\n!= NOTEQUAL\n<> NOTEQUAL\n<= LESSEQUAL\n>= GREATEREQUAL\n~ TILDE\n^ CIRCUMFLEX\n<< LEFTSHIFT\n>> RIGHTSHIFT\n** DOUBLESTAR\n+= PLUSEQUAL\n-= MINEQUAL\n*= STAREQUAL\n/= SLASHEQUAL\n%= PERCENTEQUAL\n&= AMPEREQUAL\n|= VBAREQUAL\n^= CIRCUMFLEXEQUAL\n<<= LEFTSHIFTEQUAL\n>>= RIGHTSHIFTEQUAL\n**= DOUBLESTAREQUAL\n// DOUBLESLASH\n//= DOUBLESLASHEQUAL\n-> RARROW\n'
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
|
goldcase/systemd
|
refs/heads/master
|
tools/make-man-index.py
|
125
|
# -*- Mode: python; coding: utf-8; indent-tabs-mode: nil -*- */
#
# This file is part of systemd.
#
# Copyright 2012 Lennart Poettering
# Copyright 2013 Zbigniew Jędrzejewski-Szmek
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# systemd is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with systemd; If not, see <http://www.gnu.org/licenses/>.
import collections
import sys
import re
from xml_helper import *
MDASH = ' — ' if sys.version_info.major >= 3 else ' -- '
TEMPLATE = '''\
<refentry id="systemd.index" conditional="HAVE_PYTHON">
<refentryinfo>
<title>systemd.index</title>
<productname>systemd</productname>
<authorgroup>
<author>
<contrib>Developer</contrib>
<firstname>Lennart</firstname>
<surname>Poettering</surname>
<email>lennart@poettering.net</email>
</author>
</authorgroup>
</refentryinfo>
<refmeta>
<refentrytitle>systemd.index</refentrytitle>
<manvolnum>7</manvolnum>
</refmeta>
<refnamediv>
<refname>systemd.index</refname>
<refpurpose>List all manpages from the systemd project</refpurpose>
</refnamediv>
</refentry>
'''
SUMMARY = '''\
<refsect1>
<title>See Also</title>
<para>
<citerefentry><refentrytitle>systemd.directives</refentrytitle><manvolnum>7</manvolnum></citerefentry>
</para>
<para id='counts' />
</refsect1>
'''
COUNTS = '\
This index contains {count} entries, referring to {pages} individual manual pages.'
def check_id(page, t):
id = t.getroot().get('id')
if not re.search('/' + id + '[.]', page):
raise ValueError("id='{}' is not the same as page name '{}'".format(id, page))
def make_index(pages):
index = collections.defaultdict(list)
for p in pages:
t = xml_parse(p)
check_id(p, t)
section = t.find('./refmeta/manvolnum').text
refname = t.find('./refnamediv/refname').text
purpose = ' '.join(t.find('./refnamediv/refpurpose').text.split())
for f in t.findall('./refnamediv/refname'):
infos = (f.text, section, purpose, refname)
index[f.text[0].upper()].append(infos)
return index
def add_letter(template, letter, pages):
refsect1 = tree.SubElement(template, 'refsect1')
title = tree.SubElement(refsect1, 'title')
title.text = letter
para = tree.SubElement(refsect1, 'para')
for info in sorted(pages, key=lambda info: str.lower(info[0])):
refname, section, purpose, realname = info
b = tree.SubElement(para, 'citerefentry')
c = tree.SubElement(b, 'refentrytitle')
c.text = refname
d = tree.SubElement(b, 'manvolnum')
d.text = section
b.tail = MDASH + purpose # + ' (' + p + ')'
tree.SubElement(para, 'sbr')
def add_summary(template, indexpages):
count = 0
pages = set()
for group in indexpages:
count += len(group)
for info in group:
refname, section, purpose, realname = info
pages.add((realname, section))
refsect1 = tree.fromstring(SUMMARY)
template.append(refsect1)
para = template.find(".//para[@id='counts']")
para.text = COUNTS.format(count=count, pages=len(pages))
def make_page(*xml_files):
template = tree.fromstring(TEMPLATE)
index = make_index(xml_files)
for letter in sorted(index):
add_letter(template, letter, index[letter])
add_summary(template, index.values())
return template
if __name__ == '__main__':
with open(sys.argv[1], 'wb') as f:
f.write(xml_print(make_page(*sys.argv[2:])))
|
hectoruelo/scrapy
|
refs/heads/master
|
scrapy/http/common.py
|
188
|
def obsolete_setter(setter, attrname):
def newsetter(self, value):
c = self.__class__.__name__
msg = "%s.%s is not modifiable, use %s.replace() instead" % (c, attrname, c)
raise AttributeError(msg)
return newsetter
|
lilida/teletraan
|
refs/heads/master
|
deploy-sentinel/mysite/mysite/settings.py
|
3
|
# Copyright 2016 Pinterest, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Django settings for mysite project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '5_y&4p=teg@cc8dn==sxvj$!ol&b9gmp=$k#(!i%zg%feu%95g'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
|
qt911025/pw_module_system
|
refs/heads/pw
|
header_item_modifiers.py
|
6
|
imod_plain = 0
imod_cracked = 1
imod_rusty = 2
imod_bent = 3
imod_chipped = 4
imod_battered = 5
imod_poor = 6
imod_crude = 7
imod_old = 8
imod_cheap = 9
imod_fine = 10
imod_well_made = 11
imod_sharp = 12
imod_balanced = 13
imod_tempered = 14
imod_deadly = 15
imod_exquisite = 16
imod_masterwork = 17
imod_heavy = 18
imod_strong = 19
imod_powerful = 20
imod_tattered = 21
imod_ragged = 22
imod_rough = 23
imod_sturdy = 24
imod_thick = 25
imod_hardened = 26
imod_reinforced = 27
imod_superb = 28
imod_lordly = 29
imod_lame = 30
imod_swaybacked = 31
imod_stubborn = 32
imod_timid = 33
imod_meek = 34
imod_spirited = 35
imod_champion = 36
imod_fresh = 37
imod_day_old = 38
imod_two_day_old = 39
imod_smelling = 40
imod_rotten = 41
imod_large_bag = 42
imodbit_plain = 1
imodbit_cracked = 2
imodbit_rusty = 4
imodbit_bent = 8
imodbit_chipped = 16
imodbit_battered = 32
imodbit_poor = 64
imodbit_crude = 128
imodbit_old = 256
imodbit_cheap = 512
imodbit_fine = 1024
imodbit_well_made = 2048
imodbit_sharp = 4096
imodbit_balanced = 8192
imodbit_tempered = 16384
imodbit_deadly = 32768
imodbit_exquisite = 65536
imodbit_masterwork = 131072
imodbit_heavy = 262144
imodbit_strong = 524288
imodbit_powerful = 1048576
imodbit_tattered = 2097152
imodbit_ragged = 4194304
imodbit_rough = 8388608
imodbit_sturdy = 16777216
imodbit_thick = 33554432
imodbit_hardened = 67108864
imodbit_reinforced = 134217728
imodbit_superb = 268435456
imodbit_lordly = 536870912
imodbit_lame = 1073741824
imodbit_swaybacked = 2147483648
imodbit_stubborn = 4294967296
imodbit_timid = 8589934592
imodbit_meek = 17179869184
imodbit_spirited = 34359738368
imodbit_champion = 68719476736
imodbit_fresh = 137438953472
imodbit_day_old = 274877906944
imodbit_two_day_old = 549755813888
imodbit_smelling = 1099511627776
imodbit_rotten = 2199023255552
imodbit_large_bag = 4398046511104
|
colinmcd94/kickdata
|
refs/heads/master
|
lib/requests/packages/urllib3/packages/ordered_dict.py
|
2039
|
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
kms123/PyKata
|
refs/heads/master
|
advent_2016/day_01_2_test.py
|
1
|
import unittest
from day_01_2 import calculate_positions
class CalculateLocationTests(unittest.TestCase):
def test_location_list_for_one_movement(self):
result = calculate_positions(['L1'])
self.assertEqual([(0, 0), (-1, 0)], result)
def test_location_list_for_long_movement(self):
result = calculate_positions(['L4'])
self.assertEqual([(0, 0), (-1, 0), (-2, 0), (-3, 0), (-4, 0)], result)
def test_location_list_for_many_movements(self):
result = calculate_positions(['L1', 'R3', 'R2', 'L1'])
self.assertEqual([(0, 0), (-1, 0), (-1, 1), (-1, 2), (-1, 3), (0, 3), (1, 3), (1, 4)], result)
def test_location_list_for_duplicate_locations(self):
result = calculate_positions(['R1', 'R1', 'R1', 'R3', 'L1', 'L4'])
print result
self.assertEqual([(0, 0), (1, 0), (1, -1), (0, -1), (0, 0)], result)
class RunWithInput(unittest.TestCase):
def test_run_with_input(self):
text = 'L2, L5, L5, R5, L2, L4, R1, R1, L4, R2, R1, L1, L4, R1, L4, L4, R5, R3, R1, L1, R1, L5, L1, R5, L4, R2, L5, L3, L3, R3, L3, R4, R4, L2, L5, R1, R2, L2, L1, R3, R4, L193, R3, L5, R45, L1, R4, R79, L5, L5, R5, R1, L4, R3, R3, L4, R185, L5, L3, L1, R5, L2, R1, R3, R2, L3, L4, L2, R2, L3, L2, L2, L3, L5, R3, R4, L5, R1, R2, L2, R4, R3, L4, L3, L1, R3, R2, R1, R1, L3, R4, L5, R2, R1, R3, L3, L2, L2, R2, R1, R2, R3, L3, L3, R4, L4, R4, R4, R4, L3, L1, L2, R5, R2, R2, R2, L4, L3, L4, R4, L5, L4, R2, L4, L4, R4, R1, R5, L2, L4, L5, L3, L2, L4, L4, R3, L3, L4, R1, L2, R3, L2, R1, R2, R5, L4, L2, L1, L3, R2, R3, L2, L1, L5, L2, L1, R4'
raw_movements = text.split()
movements = []
for item in raw_movements:
movements.append(item.rstrip(','))
positions = calculate_positions(movements)
duplicate_point = positions[-1]
print abs(duplicate_point[0]) + abs(duplicate_point[1])
|
rmfitzpatrick/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/openstack/os_keystone_domain_facts.py
|
19
|
#!/usr/bin/python
# Copyright (c) 2016 Hewlett-Packard Enterprise Corporation
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: os_keystone_domain_facts
short_description: Retrieve facts about one or more OpenStack domains
extends_documentation_fragment: openstack
version_added: "2.1"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
description:
- Retrieve facts about a one or more OpenStack domains
requirements:
- "python >= 2.6"
- "shade"
options:
name:
description:
- Name or ID of the domain
required: true
filters:
description:
- A dictionary of meta data to use for further filtering. Elements of
this dictionary may be additional dictionaries.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatibility
required: false
'''
EXAMPLES = '''
# Gather facts about previously created domain
- os_keystone_domain_facts:
cloud: awesomecloud
- debug:
var: openstack_domains
# Gather facts about a previously created domain by name
- os_keystone_domain_facts:
cloud: awesomecloud
name: demodomain
- debug:
var: openstack_domains
# Gather facts about a previously created domain with filter
- os_keystone_domain_facts:
cloud: awesomecloud
name: demodomain
filters:
enabled: False
- debug:
var: openstack_domains
'''
RETURN = '''
openstack_domains:
description: has all the OpenStack facts about domains
returned: always, but can be null
type: complex
contains:
id:
description: Unique UUID.
returned: success
type: string
name:
description: Name given to the domain.
returned: success
type: string
description:
description: Description of the domain.
returned: success
type: string
enabled:
description: Flag to indicate if the domain is enabled.
returned: success
type: bool
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import openstack_full_argument_spec, openstack_module_kwargs
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=False, default=None),
filters=dict(required=False, type='dict', default=None),
)
module_kwargs = openstack_module_kwargs(
mutually_exclusive=[
['name', 'filters'],
]
)
module = AnsibleModule(argument_spec, **module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
try:
name = module.params['name']
filters = module.params['filters']
opcloud = shade.operator_cloud(**module.params)
if name:
# Let's suppose user is passing domain ID
try:
domains = opcloud.get_domain(name)
except:
domains = opcloud.search_domains(filters={'name': name})
else:
domains = opcloud.search_domains(filters)
module.exit_json(changed=False, ansible_facts=dict(
openstack_domains=domains))
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
|
turbokongen/home-assistant
|
refs/heads/dev
|
homeassistant/components/pencom/__init__.py
|
36
|
"""The pencom component."""
|
aperson/TweetPoster
|
refs/heads/master
|
TweetPoster/rehost.py
|
4
|
import re
import json
import requests
from bs4 import BeautifulSoup
import TweetPoster
class ImageHost(object):
url_re = None
def extract(self, url):
"""
Takes a URL, rehosts an image and returns a new URL.
"""
raise NotImplementedError
@classmethod
def rehost(self, image_url):
try:
r = requests.post(
'http://api.imgur.com/2/upload.json',
params={
'key': TweetPoster.config['imgur']['key'],
'image': image_url
}
)
if not r.status_code == 200:
print r.json()['error']['message']
return None
return r.json()['upload']['links']['original']
except (ValueError, requests.exceptions.RequestException):
return None
class PicTwitterCom(object):
@classmethod
def extract(self, url):
if not url.endswith(':large'):
url = url + ':large'
return ImageHost.rehost(url)
class Instagram(ImageHost):
url_re = 'https?://instagram.com/p/[\w_-]+/'
def extract(self, url):
try:
r = requests.get(url)
except requests.exceptions.RequestException:
return None
j = re.search('("display_src":".*?")', r.content)
if j:
j = json.loads('{' + j.group(1) + '}')
return self.rehost(j['display_src'])
class YFrog(ImageHost):
url_re = 'https?://yfrog.com/\w+'
def extract(self, url):
url = url.replace('://', '://twitter.')
try:
r = requests.get(url, params={'sa': 0})
except requests.exceptions.RequestException:
return None
soup = BeautifulSoup(r.content)
photo = soup.find(id='input-direct')['value']
return self.rehost(photo)
class Twitpic(ImageHost):
url_re = 'https?://twitpic.com/\w+'
def extract(self, url):
url = url + '/full'
try:
r = requests.get(url)
soup = BeautifulSoup(r.content)
except:
return None
img = soup.find(id='media-full').find('img')
return self.rehost(img['src'])
class Puush(ImageHost):
url_re = 'https?://puu.sh/[\w0-9]+'
def extract(self, url):
return self.rehost(url)
class Facebook(ImageHost):
url_re = 'https?://facebook.com/photo.php\?fbid=[0-9]+$'
def extract(self, url):
try:
r = requests.get(url)
except requests.exceptions.RequestException:
return None
soup = BeautifulSoup(r.content)
img = soup.find(id='fbPhotoImage')
return self.rehost(img['src'])
|
dreamsxin/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/ctypes/test/test_simplesubclasses.py
|
170
|
import unittest
from ctypes import *
class MyInt(c_int):
def __eq__(self, other):
if type(other) != MyInt:
return NotImplementedError
return self.value == other.value
class Test(unittest.TestCase):
def test_compare(self):
self.assertEqual(MyInt(3), MyInt(3))
self.assertNotEqual(MyInt(42), MyInt(43))
def test_ignore_retval(self):
# Test if the return value of a callback is ignored
# if restype is None
proto = CFUNCTYPE(None)
def func():
return (1, "abc", None)
cb = proto(func)
self.assertEqual(None, cb())
def test_int_callback(self):
args = []
def func(arg):
args.append(arg)
return arg
cb = CFUNCTYPE(None, MyInt)(func)
self.assertEqual(None, cb(42))
self.assertEqual(type(args[-1]), MyInt)
cb = CFUNCTYPE(c_int, c_int)(func)
self.assertEqual(42, cb(42))
self.assertEqual(type(args[-1]), int)
def test_int_struct(self):
class X(Structure):
_fields_ = [("x", MyInt)]
self.assertEqual(X().x, MyInt())
s = X()
s.x = MyInt(42)
self.assertEqual(s.x, MyInt(42))
if __name__ == "__main__":
unittest.main()
|
keiono/cxio_python
|
refs/heads/master
|
cxio/cx_writer.py
|
1
|
import json
import decimal
import collections
from cxio.cx_constants import CxConstants
class CxWriter(object):
def __init__(self, out):
if out is None:
raise AssertionError('output stream must not be none')
self.__out = out
self.__pre_meta_data = []
self.__post_meta_data = []
self.__aspect_element_counts = {}
self.__started = False
self.__ended = False
self.__fragment_started = False
self.__first = True
self.__in_fragment = False
def add_pre_meta_data(self, pre_meta_data):
if pre_meta_data is None:
raise AssertionError('pre meta data must not be none')
if self.__ended:
raise IOError('already ended')
if self.__started:
raise IOError('already started')
self.__add_meta_data(self.__pre_meta_data, pre_meta_data)
def add_post_meta_data(self, post_meta_data):
if post_meta_data is None:
raise AssertionError('post meta data must not be none')
if self.__ended:
raise IOError('already ended')
self.__add_meta_data(self.__post_meta_data, post_meta_data)
def start(self):
if self.__ended:
raise IOError('already ended')
if self.__started:
raise IOError('already started')
self.__started = True
self.__out.write('[')
if len(self.__pre_meta_data) > 0:
self.__write_meta_data(self.__pre_meta_data)
def end(self):
if self.__ended:
raise IOError('already ended')
if not self.__started:
raise IOError('not started')
if self.__fragment_started:
raise IOError('fragment not ended')
if len(self.__post_meta_data) > 0:
self.__write_meta_data(self.__post_meta_data)
self.__ended = True
self.__started = False
self.__out.write('\n')
self.__out.write(']')
def start_aspect_fragment(self, aspect_name):
if aspect_name is None:
raise AssertionError('aspect name data must not be none')
if self.__ended:
raise IOError('already ended')
if not self.__started:
raise IOError('not started')
if self.__fragment_started:
raise IOError('fragment already started')
self.__fragment_started = True
if self.__first:
self.__first = False
else:
self.__out.write(', ')
self.__out.write('\n')
self.__out.write(' { ')
self.__out.write('"')
self.__out.write(aspect_name)
self.__out.write('"')
self.__out.write(':')
self.__out.write(' ')
self.__out.write('[')
self.__out.write(' ')
self.__out.write('\n')
def end_aspect_fragment(self):
if self.__ended:
raise IOError('already ended')
if not self.__fragment_started:
raise IOError('fragment not started')
self.__fragment_started = False
self.__out.write(' ')
self.__out.write(']')
self.__out.write('\n')
self.__out.write(' }')
self.__in_fragment = False
def write_aspect_element(self, element):
if self.__ended:
raise IOError('already ended')
if not self.__fragment_started:
raise IOError('fragment not started')
if self.__in_fragment is True:
self.__out.write(', ')
self.__out.write('\n')
self.__out.write(' ')
self.__out.write(self.__aspect_element_to_json(element))
self.__in_fragment = True
my_name = element.get_name()
if my_name not in self.__aspect_element_counts:
self.__aspect_element_counts[my_name] = 1
else:
self.__aspect_element_counts[my_name] += 1
def __write_meta_data(self, meta_data):
self.start_aspect_fragment(CxConstants.META_DATA)
for e in meta_data:
self.write_aspect_element(e)
self.end_aspect_fragment()
def get_aspect_element_counts(self):
return self.__aspect_element_counts
@staticmethod
def __aspect_element_to_json(aspect_element):
return json.dumps(aspect_element.get_data(), cls=DecimalEncoder)
@staticmethod
def __add_meta_data(meta_data, add_me):
if isinstance(add_me, collections.Iterable):
meta_data.extend(add_me)
else:
meta_data.append(add_me)
class DecimalEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return float(o)
return super(DecimalEncoder, self).default(o)
|
sergey-shandar/autorest
|
refs/heads/master
|
src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyFile/auto_rest_swagger_bat_file_service/models/error.py
|
432
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Error.
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, status=None, message=None):
self.status = status
self.message = message
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
|
antoinecarme/pyaf
|
refs/heads/master
|
tests/periodicities/Month/Cycle_Month_50_M_120.py
|
1
|
import tests.periodicities.period_test as per
per.buildModel((120 , 'M' , 50));
|
uranusjr/django
|
refs/heads/master
|
tests/utils_tests/test_inspect.py
|
62
|
import unittest
from django.utils import inspect
class Person:
def no_arguments(self):
return None
def one_argument(self, something):
return something
def just_args(self, *args):
return args
def all_kinds(self, name, address='home', age=25, *args, **kwargs):
return kwargs
class TestInspectMethods(unittest.TestCase):
def test_get_func_full_args_no_arguments(self):
self.assertEqual(inspect.get_func_full_args(Person.no_arguments), [])
def test_get_func_full_args_one_argument(self):
self.assertEqual(inspect.get_func_full_args(Person.one_argument), [('something',)])
def test_get_func_full_args_all_arguments(self):
arguments = [('name',), ('address', 'home'), ('age', 25), ('*args',), ('**kwargs',)]
self.assertEqual(inspect.get_func_full_args(Person.all_kinds), arguments)
def test_func_accepts_var_args_has_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.just_args), True)
def test_func_accepts_var_args_no_var_args(self):
self.assertIs(inspect.func_accepts_var_args(Person.one_argument), False)
|
luvit/gyp
|
refs/heads/luvit-dev
|
test/mac/gyptest-framework-headers.py
|
344
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that mac_framework_headers works properly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# TODO(thakis): Make this work with ninja, make. http://crbug.com/129013
test = TestGyp.TestGyp(formats=['xcode'])
CHDIR = 'framework-headers'
test.run_gyp('test.gyp', chdir=CHDIR)
# Test that headers are installed for frameworks
test.build('test.gyp', 'test_framework_headers_framework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/TestFramework', chdir=CHDIR)
test.built_file_must_exist(
'TestFramework.framework/Versions/A/Headers/myframework.h', chdir=CHDIR)
# Test that headers are installed for static libraries.
test.build('test.gyp', 'test_framework_headers_static', chdir=CHDIR)
test.built_file_must_exist('libTestLibrary.a', chdir=CHDIR)
test.built_file_must_exist('include/myframework.h', chdir=CHDIR)
test.pass_test()
|
sublime1809/django
|
refs/heads/master
|
django/contrib/gis/maps/google/gmap.py
|
25
|
from __future__ import unicode_literals
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.six.moves import xrange
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker
class GoogleMapException(Exception):
pass
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL = 'http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException(
'Google Maps API Key not found (try adding '
'GOOGLE_MAPS_API_KEY to your settings).'
)
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None:
zoom = 4
self.zoom = zoom
if center is None:
center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom': self.calc_zoom,
'center': self.center,
'dom_id': self.dom_id,
'js_module': self.js_module,
'kml_urls': self.kml_urls,
'zoom': self.zoom,
'polygons': self.polygons,
'polylines': self.polylines,
'icons': self.icons,
'markers': self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return format_html('<body {0} {1}>', self.onload, self.onunload)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return format_html('onload="{0}.{1}_load()"', self.js_module, self.dom_id)
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return format_html('<script src="{0}{1}" type="text/javascript"></script>',
self.api_url, self.key)
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return format_html('{0}\n <script type="text/javascript">\n//<![CDATA[\n{1}//]]>\n </script>',
self.api_script, mark_safe(self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return format_html('<style type="text/css">{0}</style>', self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return format_html('<html xmlns="http://www.w3.org/1999/xhtml" {0}>', self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set(marker.icon for marker in self.markers if marker.icon)
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module': self.js_module,
'dom_ids': self.dom_ids,
'load_map_js': self.load_map_js(),
'icons': self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps:
icons |= map.icons
return icons
|
andrewyoung1991/abjad
|
refs/heads/master
|
abjad/tools/tonalanalysistools/test/test_tonalanalysistools_Mode___eq__.py
|
2
|
# -*- encoding: utf-8 -*-
from abjad import *
from abjad.tools import tonalanalysistools
def test_tonalanalysistools_Mode___eq___01():
mode_1 = tonalanalysistools.Mode('dorian')
mode_2 = tonalanalysistools.Mode('dorian')
mode_3 = tonalanalysistools.Mode('phrygian')
assert mode_1 == mode_1
assert mode_1 == mode_2
assert not mode_1 == mode_3
assert mode_2 == mode_1
assert mode_2 == mode_2
assert not mode_2 == mode_3
assert not mode_3 == mode_1
assert not mode_3 == mode_2
assert mode_3 == mode_3
def test_tonalanalysistools_Mode___eq___02():
r'''Synonym modes do not compare equal, by definition.
'''
major = tonalanalysistools.Mode('major')
ionian = tonalanalysistools.Mode('ionian')
assert not major == ionian
assert not ionian == major
|
halvertoluke/edx-platform
|
refs/heads/default_branch
|
lms/djangoapps/courseware/features/word_cloud.py
|
94
|
# pylint: disable=missing-docstring
from lettuce import world, step
from common import i_am_registered_for_the_course, section_location, visit_scenario_item
@step('I view the word cloud and it has rendered')
def word_cloud_is_rendered(_step):
assert world.is_css_present('.word_cloud')
@step('the course has a Word Cloud component')
def view_word_cloud(_step):
coursenum = 'test_course'
i_am_registered_for_the_course(_step, coursenum)
add_word_cloud_to_course(coursenum)
visit_scenario_item('SECTION')
@step('I press the Save button')
def press_the_save_button(_step):
button_css = '.input_cloud_section input.save'
world.css_click(button_css)
@step('I see the empty result')
def see_empty_result(_step):
assert world.css_text('.your_words', 0) == ''
@step('I fill inputs')
def fill_inputs(_step):
input_css = '.input_cloud_section .input-cloud'
world.css_fill(input_css, 'text1', 0)
for index in range(1, 4):
world.css_fill('.input_cloud_section .input-cloud', 'text2', index)
@step('I see the result with words count')
def see_result(_step):
strong_css = '.your_words strong'
target_text = set([world.css_text(strong_css, i) for i in range(2)])
assert set(['text1', 'text2']) == target_text
def add_word_cloud_to_course(course):
category = 'word_cloud'
world.ItemFactory.create(parent_location=section_location(course),
category=category,
display_name='Word Cloud')
|
DaTrollMon/pyNES
|
refs/heads/0.1.x
|
pynes/tests/ror_test.py
|
28
|
# -*- coding: utf-8 -*-
'''
ROR, Rotate Right Test
This is an Bit Manipulation of the 6502.
'''
import unittest
from pynes.compiler import lexical, syntax, semantic
class RorTest(unittest.TestCase):
def test_ror_imm(self):
tokens = list(lexical('ROR #$10'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_HEX_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x6a, 0x10])
def test_ror_imm_with_decimal(self):
tokens = list(lexical('ROR #10'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_DECIMAL_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x6a, 0x0a])
def test_ror_imm_with_binary(self):
tokens = list(lexical('ROR #%00000100'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_BINARY_NUMBER', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_IMMEDIATE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x6a, 0x04])
def test_ror_zp(self):
tokens = list(lexical('ROR $00'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x66, 0x00])
def test_ror_zpx(self):
tokens = list(lexical('ROR $10,X'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ZEROPAGE_X', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x76, 0x10])
def test_ror_abs(self):
tokens = list(lexical('ROR $1234'))
self.assertEquals(2, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('$1234', tokens[1]['value'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x6e, 0x34, 0x12])
def test_ror_absx(self):
tokens = list(lexical('ROR $1234,X'))
self.assertEquals(4, len(tokens))
self.assertEquals('T_INSTRUCTION', tokens[0]['type'])
self.assertEquals('T_ADDRESS', tokens[1]['type'])
self.assertEquals('$1234', tokens[1]['value'])
self.assertEquals('T_SEPARATOR', tokens[2]['type'])
self.assertEquals('T_REGISTER', tokens[3]['type'])
ast = syntax(tokens)
self.assertEquals(1, len(ast))
self.assertEquals('S_ABSOLUTE_X', ast[0]['type'])
code = semantic(ast)
self.assertEquals(code, [0x7e, 0x34, 0x12])
|
TheLazyBastard/SerialConsole
|
refs/heads/master
|
Main.py
|
1
|
from PySide.QtCore import *
from PySide.QtGui import *
import sys
import UI
from Services import SerialService
import time
class MainWindow(QDialog,UI.Ui_Form):
def __init__(self, parent = None):
super(MainWindow,self).__init__(parent)
self.setupUi(self)
#----------------- Inicio el Servicio ------------------------------------
self.Servicio = SerialService()
#----------------- Relleno La Lista de COMS ------------------------------
for item in self.Servicio.GetListaCOMS():
self.CboCOMPorts.addItem(item)
#----------------- Relleno La Lista de Baudrate --------------------------
for item in self.Servicio.ListaBaudRate:
self.CboBaudRate.addItem(str(item))
#----------------- Instancio la Conexion --------------------------------
self.connect(self.BtnConectar,SIGNAL("clicked()"),self.Conectar)
#----------------- Cierro la Conexion -----------------------------------
self.connect(self.BtnDesconectar,SIGNAL("clicked()"),self.Desconectar)
#----------------- Envio de Datos ---------------------------------------
self.connect(self.BtnEnviar,SIGNAL("clicked()"),self.EnviarDatos)
#----------------- Instancio Los Hilos de Lectura y Escritura ------------
self.HiloLectura = ThreadLectura()
self.HiloEscritura = ThreadEscritura()
#----------------- Senal Para Recivir lo que envia el HiloLectura --------
self.connect(self.HiloLectura,SIGNAL("LEER_DATOS(QString)"),self.ActualizarTxtRx,Qt.QueuedConnection)
def Conectar(self):
COM = str(self.CboCOMPorts.currentText())
BR = int(self.CboBaudRate.currentText())
self.Servicio.Conectar(COM,BR)
if self.Servicio.conexion.isOpen():
self.HiloLectura.conexion = self.Servicio.GetConexion()
self.HiloLectura.start()
print(self.HiloLectura)
def Desconectar(self):
if self.HiloLectura.isRunning():
self.Servicio.Desconectar()
self.HiloLectura.quit()
def EnviarDatos(self):
if self.ChkNuevaLinea.isChecked():
self.HiloEscritura.nl = True
if self.ChkRetornoCarro.isChecked():
self.HiloEscritura.rc = True
self.HiloEscritura.conexion = self.Servicio.GetConexion()
self.HiloEscritura.texto = self.TxtEnviar.text()
self.HiloEscritura.start()
print(self.HiloEscritura)
self.TxtEnviar.setText("")
def ActualizarTxtRx(self,BytesRecibidos):
self.TxtDatosRecibidos.insertPlainText(str(BytesRecibidos))
#----------------- Definicion del Hilo de Lectura ------------------------
class ThreadLectura(QThread):
conexion = None
def __init__(self,parent = None):
super(ThreadLectura,self).__init__(parent)
def run(self):
if self.conexion is not None:
while self.conexion.isOpen():
BytesRecibidos = self.conexion.read()
self.emit(SIGNAL("LEER_DATOS(QString)"),BytesRecibidos)
#----------------- Definicion del Hilo de Escritura ---------------------
class ThreadEscritura(QThread):
conexion = None
texto = None
nl = False
rc = False
def __init__(self,parent = None):
super(ThreadEscritura,self).__init__(parent)
def run(self):
print()
Enviar = []
for char in self.texto:
Enviar.append(str(char))
if self.nl:
Enviar.append('\n')
if self.rc:
Enviar.append('\r')
if self.conexion.isOpen():
print("Conexion Abierta")
TXDATA = ''.join(Enviar)
self.conexion.write(TXDATA.encode())
app = QApplication(sys.argv)
form = MainWindow()
form.show()
app.exec_()
|
morsdatum/ArangoDB
|
refs/heads/master
|
3rdParty/V8/build/gyp/test/actions/src/confirm-dep-files.py
|
349
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Confirms presence of files generated by our targets we depend on.
If they exist, create a new file.
Note target's input files are explicitly NOT defined in the gyp file
so they can't easily be passed to this script as args.
"""
import os
import sys
outfile = sys.argv[1] # Example value we expect: deps_all_done_first_123.txt
if (os.path.exists("dep_1.txt") and
os.path.exists("dep_2.txt") and
os.path.exists("dep_3.txt")):
open(outfile, "w")
|
mattbasta/amo-validator
|
refs/heads/master
|
tests/test_content_overlays.py
|
1
|
from nose.tools import eq_
from helper import MockXPI
from validator.chromemanifest import ChromeManifest
import validator.testcases.content as content
from validator.errorbundler import ErrorBundle
def test_marking_overlays():
"""
Mark an overlay, then test that it marks the scripts within the overlay.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
overlay chrome://foo chrome://ns1/content/main.xul
""", "chrome.manifest")
err.save_resource("chrome.manifest", c)
err.save_resource("chrome.manifest_nopush", c)
xpi = MockXPI({"foo/main.xul": "tests/resources/content/script_list.xul"})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource("marked_scripts")
print marked_scripts
assert marked_scripts
eq_(marked_scripts, set(["chrome://ns1/foo.js",
"chrome://ns1/bar.js",
"chrome://asdf/foo.js"]))
def test_marking_overlays_no_overlay():
"""
Test that unmarked overlays don't mark scripts as being potentially
pollutable.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
#overlay chrome://foo chrome://ns1/main.xul
""", "chrome.manifest")
err.save_resource("chrome.manifest", c)
err.save_resource("chrome.manifest_nopush", c)
xpi = MockXPI({"foo/main.xul": "tests/resources/content/script_list.xul"})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource("marked_scripts")
print marked_scripts
assert not marked_scripts
def test_marking_overlays_subdir():
"""
Mark an overlay in a subdirectory, then test that it marks the scripts
within the overlay. Make sure it properly figures out relative URLs.
"""
err = ErrorBundle()
err.supported_versions = {}
c = ChromeManifest("""
content ns1 foo/
overlay chrome://foo chrome://ns1/content/subdir/main.xul
""", "chrome.manifest")
err.save_resource("chrome.manifest", c)
err.save_resource("chrome.manifest_nopush", c)
xpi = MockXPI({"foo/subdir/main.xul":
"tests/resources/content/script_list.xul"})
content.test_packed_packages(err, xpi)
assert not err.failed()
marked_scripts = err.get_resource("marked_scripts")
print marked_scripts
assert marked_scripts
eq_(marked_scripts, set(["chrome://ns1/subdir/foo.js", "chrome://ns1/bar.js",
"chrome://asdf/foo.js"]))
def test_script_scraping():
"""Test that scripts are gathered up during the validation process."""
err = ErrorBundle()
err.supported_versions = {}
xpi = MockXPI({"foo.js": "tests/resources/junk.xpi",
"dir/bar.jsm": "tests/resources/junk.xpi"})
content.test_packed_packages(err, xpi)
assert not err.failed()
scripts = err.get_resource("scripts")
print scripts
assert scripts
for bundle in scripts:
assert "foo.js" in bundle["scripts"]
assert "dir/bar.jsm" in bundle["scripts"]
eq_(bundle["package"], xpi)
eq_(bundle["state"], [])
|
chunfengh/seq2seq
|
refs/heads/master
|
seq2seq/encoders/conv_encoder.py
|
6
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
An encoder that pools over embeddings, as described in
https://arxiv.org/abs/1611.02344.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from pydoc import locate
import tensorflow as tf
from seq2seq.encoders.encoder import Encoder, EncoderOutput
from seq2seq.encoders.pooling_encoder import _create_position_embedding
class ConvEncoder(Encoder):
"""A deep convolutional encoder, as described in
https://arxiv.org/abs/1611.02344. The encoder supports optional positions
embeddings.
Params:
attention_cnn.units: Number of units in `cnn_a`. Same in each layer.
attention_cnn.kernel_size: Kernel size for `cnn_a`.
attention_cnn.layers: Number of layers in `cnn_a`.
embedding_dropout_keep_prob: Dropout keep probability
applied to the embeddings.
output_cnn.units: Number of units in `cnn_c`. Same in each layer.
output_cnn.kernel_size: Kernel size for `cnn_c`.
output_cnn.layers: Number of layers in `cnn_c`.
position_embeddings.enable: If true, add position embeddings to the
inputs before pooling.
position_embeddings.combiner_fn: Function used to combine the
position embeddings with the inputs. For example, `tensorflow.add`.
position_embeddings.num_positions: Size of the position embedding matrix.
This should be set to the maximum sequence length of the inputs.
"""
def __init__(self, params, mode, name="conv_encoder"):
super(ConvEncoder, self).__init__(params, mode, name)
self._combiner_fn = locate(self.params["position_embeddings.combiner_fn"])
@staticmethod
def default_params():
return {
"attention_cnn.units": 512,
"attention_cnn.kernel_size": 3,
"attention_cnn.layers": 15,
"embedding_dropout_keep_prob": 0.8,
"output_cnn.units": 256,
"output_cnn.kernel_size": 3,
"output_cnn.layers": 5,
"position_embeddings.enable": True,
"position_embeddings.combiner_fn": "tensorflow.multiply",
"position_embeddings.num_positions": 100,
}
def encode(self, inputs, sequence_length):
if self.params["position_embeddings.enable"]:
positions_embed = _create_position_embedding(
embedding_dim=inputs.get_shape().as_list()[-1],
num_positions=self.params["position_embeddings.num_positions"],
lengths=sequence_length,
maxlen=tf.shape(inputs)[1])
inputs = self._combiner_fn(inputs, positions_embed)
# Apply dropout to embeddings
inputs = tf.contrib.layers.dropout(
inputs=inputs,
keep_prob=self.params["embedding_dropout_keep_prob"],
is_training=self.mode == tf.contrib.learn.ModeKeys.TRAIN)
with tf.variable_scope("cnn_a"):
cnn_a_output = inputs
for layer_idx in range(self.params["attention_cnn.layers"]):
next_layer = tf.contrib.layers.conv2d(
inputs=cnn_a_output,
num_outputs=self.params["attention_cnn.units"],
kernel_size=self.params["attention_cnn.kernel_size"],
padding="SAME",
activation_fn=None)
# Add a residual connection, except for the first layer
if layer_idx > 0:
next_layer += cnn_a_output
cnn_a_output = tf.tanh(next_layer)
with tf.variable_scope("cnn_c"):
cnn_c_output = inputs
for layer_idx in range(self.params["output_cnn.layers"]):
next_layer = tf.contrib.layers.conv2d(
inputs=cnn_c_output,
num_outputs=self.params["output_cnn.units"],
kernel_size=self.params["output_cnn.kernel_size"],
padding="SAME",
activation_fn=None)
# Add a residual connection, except for the first layer
if layer_idx > 0:
next_layer += cnn_c_output
cnn_c_output = tf.tanh(next_layer)
final_state = tf.reduce_mean(cnn_c_output, 1)
return EncoderOutput(
outputs=cnn_a_output,
final_state=final_state,
attention_values=cnn_c_output,
attention_values_length=sequence_length)
|
erkanay/django
|
refs/heads/master
|
django/utils/tzinfo.py
|
97
|
"Implementation of tzinfo classes for use with datetime.datetime."
from __future__ import unicode_literals
from datetime import timedelta, tzinfo
import time
import warnings
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text, DEFAULT_LOCALE_ENCODING
warnings.warn(
"django.utils.tzinfo will be removed in Django 1.9. "
"Use django.utils.timezone instead.",
RemovedInDjango19Warning, stacklevel=2)
# Python's doc say: "A tzinfo subclass must have an __init__() method that can
# be called with no arguments". FixedOffset and LocalTimezone don't honor this
# requirement. Defining __getinitargs__ is sufficient to fix copy/deepcopy as
# well as pickling/unpickling.
class FixedOffset(tzinfo):
"Fixed offset in minutes east from UTC."
def __init__(self, offset):
warnings.warn(
"django.utils.tzinfo.FixedOffset will be removed in Django 1.9. "
"Use django.utils.timezone.get_fixed_timezone instead.",
RemovedInDjango19Warning)
if isinstance(offset, timedelta):
self.__offset = offset
offset = self.__offset.seconds // 60
else:
self.__offset = timedelta(minutes=offset)
sign = '-' if offset < 0 else '+'
self.__name = "%s%02d%02d" % (sign, abs(offset) / 60., abs(offset) % 60)
def __repr__(self):
return self.__name
def __getinitargs__(self):
return self.__offset,
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return timedelta(0)
# This implementation is used for display purposes. It uses an approximation
# for DST computations on dates >= 2038.
# A similar implementation exists in django.utils.timezone. It's used for
# timezone support (when USE_TZ = True) and focuses on correctness.
class LocalTimezone(tzinfo):
"Proxy timezone information from time module."
def __init__(self, dt):
warnings.warn(
"django.utils.tzinfo.LocalTimezone will be removed in Django 1.9. "
"Use django.utils.timezone.get_default_timezone instead.",
RemovedInDjango19Warning)
tzinfo.__init__(self)
self.__dt = dt
self._tzname = self.tzname(dt)
def __repr__(self):
return force_str(self._tzname)
def __getinitargs__(self):
return self.__dt,
def utcoffset(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone)
else:
return timedelta(seconds=-time.timezone)
def dst(self, dt):
if self._isdst(dt):
return timedelta(seconds=-time.altzone) - timedelta(seconds=-time.timezone)
else:
return timedelta(0)
def tzname(self, dt):
is_dst = False if dt is None else self._isdst(dt)
try:
return force_text(time.tzname[is_dst], DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
return None
def _isdst(self, dt):
tt = (dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.weekday(), 0, 0)
try:
stamp = time.mktime(tt)
except (OverflowError, ValueError):
# 32 bit systems can't handle dates after Jan 2038, and certain
# systems can't handle dates before ~1901-12-01:
#
# >>> time.mktime((1900, 1, 13, 0, 0, 0, 0, 0, 0))
# OverflowError: mktime argument out of range
# >>> time.mktime((1850, 1, 13, 0, 0, 0, 0, 0, 0))
# ValueError: year out of range
#
# In this case, we fake the date, because we only care about the
# DST flag.
tt = (2037,) + tt[1:]
stamp = time.mktime(tt)
tt = time.localtime(stamp)
return tt.tm_isdst > 0
|
accomac/namebench
|
refs/heads/master
|
nb_third_party/dns/e164.py
|
248
|
# Copyright (C) 2006, 2007, 2009 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS E.164 helpers
@var public_enum_domain: The DNS public ENUM domain, e164.arpa.
@type public_enum_domain: dns.name.Name object
"""
import dns.exception
import dns.name
import dns.resolver
public_enum_domain = dns.name.from_text('e164.arpa.')
def from_e164(text, origin=public_enum_domain):
"""Convert an E.164 number in textual form into a Name object whose
value is the ENUM domain name for that number.
@param text: an E.164 number in textual form.
@type text: str
@param origin: The domain in which the number should be constructed.
The default is e164.arpa.
@type: dns.name.Name object or None
@rtype: dns.name.Name object
"""
parts = [d for d in text if d.isdigit()]
parts.reverse()
return dns.name.from_text('.'.join(parts), origin=origin)
def to_e164(name, origin=public_enum_domain, want_plus_prefix=True):
"""Convert an ENUM domain name into an E.164 number.
@param name: the ENUM domain name.
@type name: dns.name.Name object.
@param origin: A domain containing the ENUM domain name. The
name is relativized to this domain before being converted to text.
@type: dns.name.Name object or None
@param want_plus_prefix: if True, add a '+' to the beginning of the
returned number.
@rtype: str
"""
if not origin is None:
name = name.relativize(origin)
dlabels = [d for d in name.labels if (d.isdigit() and len(d) == 1)]
if len(dlabels) != len(name.labels):
raise dns.exception.SyntaxError('non-digit labels in ENUM domain name')
dlabels.reverse()
text = ''.join(dlabels)
if want_plus_prefix:
text = '+' + text
return text
def query(number, domains, resolver=None):
"""Look for NAPTR RRs for the specified number in the specified domains.
e.g. lookup('16505551212', ['e164.dnspython.org.', 'e164.arpa.'])
"""
if resolver is None:
resolver = dns.resolver.get_default_resolver()
for domain in domains:
if isinstance(domain, (str, unicode)):
domain = dns.name.from_text(domain)
qname = dns.e164.from_e164(number, domain)
try:
return resolver.query(qname, 'NAPTR')
except dns.resolver.NXDOMAIN:
pass
raise dns.resolver.NXDOMAIN
|
CospanDesign/nysa-tx1-pcie-platform
|
refs/heads/master
|
tx1_pcie/demo/tx1_pcie/site_scons/ngd_utils.py
|
4
|
#Distributed under the MIT licesnse.
#Copyright (c) 2013 Cospan Design (dave.mccoy@cospandesign.com)
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
#of the Software, and to permit persons to whom the Software is furnished to do
#so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import glob
import json
import utils
import coregen_utils
NGD_DEFAULT_FLAG_FILE = "ngd_default_flags.json"
NGD_PROJECT_UCF = "project.ucf"
NGD_DIR = "ngd"
def get_ngd_flags(config):
"""
Given a configuration dictionary return flags for the NGD build
if user flags are not specified take the default flags from
site_scons/ngd_default_flags.json
Args:
config (dictionary): configuration dictionary
Return:
(dictionary): flag dictionary
Raises:
Nothing
"""
#print "Apply slave tags"
flags = {}
user_flags = {}
if "ngd" in config.keys():
if "flags" in config["ngd"].keys():
user_flags = config["ngd"]["flags"]
fn = os.path.join(os.path.dirname(__file__), NGD_DEFAULT_FLAG_FILE)
default_flags = json.load(open(fn, "r"))
default_flags["-dd"]["value"] = get_ngd_dir(config)
default_flags["-p"]["value"] = config["device"]
default_flags["-uc"]["value"] = create_ucf_filename(config)
coregen_files = coregen_utils.get_target_files(config)
if len(coregen_files) > 0:
default_flags["-sd"]["value"] = coregen_utils.get_coregen_dir(config, absolute = True)
for key in default_flags:
flags[key] = default_flags[key]
if key in user_flags.keys():
flags[key]["value"] = user_flags[key]
return flags
def create_ngd_dir(config):
"""
Create an ngd directory in the build folder
Args:
config (dictionary): configuration dictionary
Return:
(string): ngd output directory (relative)
Raises:
Nothing
"""
#Create a output directory if it does not exist
build_dir = utils.create_build_directory(config)
#Now I have an output directory to put stuff in
#Create an XST directory to put stuff related to XST
ngd_dir = os.path.join(build_dir, NGD_DIR)
if not os.path.exists(ngd_dir):
os.makedirs(ngd_dir)
return ngd_dir
def get_ngd_dir(config, absolute = False):
"""Returns the ngd output directory location
Args:
config (dictionary): configuration dictionary
absolute (boolean):
False (default): Relative to project base
True: Absolute
Returns:
(string): string representation of the path to the output
Raises:
Nothing
"""
build_dir = utils.get_build_directory(config, absolute)
ngd_dir = os.path.join(build_dir, NGD_DIR)
return ngd_dir
def get_ngd_filename(config, absolute = False):
"""get the output filename"""
ngd_dir = get_ngd_dir(config, absolute)
top_module = config["top_module"]
ngd_file = os.path.join(ngd_dir, "%s.ngd" % top_module)
#print "ngd filename: %s" % ngd_file
return ngd_file
def create_ucf_filename(config):
"""
find all UCF files assoicated with this project
this function searches through the constraints directory for any UCF file.
Unfortunately ngdbuild will only read in one ucf file, so in order to
include all ucf files the function will aggragate all ucf file to one ucf
file in the build/ngd/project.ucf file
Args:
config (dictionary): configuration dictionary
Returns:
(string): file name (absolute) of combinded ucf files
Raises:
Nothing
"""
project_dir = utils.get_project_base()
#XXX: I should look at the constraints assoicated with coregened files
ucf_search_path = os.path.join(project_dir, "constraints", "*.ucf")
ucf_files = glob.glob(ucf_search_path)
#print "ucf files: %s" % str(ucf_files)
#Get all ucf files within the cores directory
#XXX: Need to make an output cores directory
ngd_dir = get_ngd_dir(config, absolute = True)
p_ucf_fn = os.path.join(ngd_dir, NGD_PROJECT_UCF)
fp = open(p_ucf_fn, "w")
for f in ucf_files:
ufp = open(f, "r")
ucf = ufp.read()
#print "ucf: %s" % ucf
fp.write(ucf)
fp.write(os.linesep)
fp.close()
return p_ucf_fn
def get_ucf_filename(config):
"""Return the name of the project ucf file"""
ngd_dir = get_ngd_dir(config, absolute = True)
p_ucf_fn = os.path.join(ngd_dir, NGD_PROJECT_UCF)
return p_ucf_fn
def _get_ucf_files(path):
"""recursively search for ucf files"""
ucf_files = []
for base, dirs, _ in os.walk(path):
for d in dirs:
p = os.path.join(base, d)
ucf_files.extend(_get_ucf_files(p))
search_path = os.path.join(path, '*.ucf')
ucfs = glob.glob(search_path)
ucf_files.extend(ucfs)
return ucf_files
def get_build_flags_string(config):
"""Returns the flags for the build
Args:
config (dictionary): configuration dictionary
Returns:
(string): string of flags to be used on the command
Raises:
Nothing
"""
flag_string = " "
flags = get_ngd_flags(config)
for flag in flags:
if len(flags[flag]["value"]) == 0:
continue
if flags[flag]["value"] == "_true":
#Special case where we don't specify any variables
flag_string += "%s " % flag
continue
#Normal flag
flag_string += "%s %s " % (flag, flags[flag]["value"])
return flag_string
|
mfherbst/spack
|
refs/heads/develop
|
lib/spack/spack/test/cmd/module.py
|
2
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os.path
import pytest
import spack.main
import spack.modules
module = spack.main.SpackCommand('module')
def _module_files(module_type, *specs):
specs = [spack.spec.Spec(x).concretized() for x in specs]
writer_cls = spack.modules.module_types[module_type]
return [writer_cls(spec).layout.filename for spec in specs]
@pytest.fixture(
params=[
['rm', 'doesnotexist'], # Try to remove a non existing module
['find', 'mpileaks'], # Try to find a module with multiple matches
['find', 'doesnotexist'], # Try to find a module with no matches
['find', '--unkown_args'], # Try to give an unknown argument
]
)
def failure_args(request):
"""A list of arguments that will cause a failure"""
return request.param
@pytest.fixture(
params=['dotkit', 'tcl', 'lmod']
)
def module_type(request):
return request.param
# TODO : test the --delete-tree option
# TODO : this requires having a separate directory for test modules
# TODO : add tests for loads and find to check the prompt format
@pytest.mark.db
def test_exit_with_failure(database, module_type, failure_args):
with pytest.raises(spack.main.SpackCommandError):
module(module_type, *failure_args)
@pytest.mark.db
@pytest.mark.parametrize('deprecated_command', [
('refresh', '-m', 'tcl', 'mpileaks'),
('rm', '-m', 'tcl', '-m', 'lmod', 'mpileaks'),
('find', 'mpileaks'),
])
def test_deprecated_command(database, deprecated_command):
with pytest.raises(spack.main.SpackCommandError):
module(*deprecated_command)
@pytest.mark.db
def test_remove_and_add(database, module_type):
"""Tests adding and removing a tcl module file."""
if module_type == 'lmod':
# TODO: Testing this with lmod requires mocking
# TODO: the core compilers
return
rm_cli_args = ['rm', '-y', 'mpileaks']
module_files = _module_files(module_type, 'mpileaks')
for item in module_files:
assert os.path.exists(item)
module(module_type, *rm_cli_args)
for item in module_files:
assert not os.path.exists(item)
module(module_type, 'refresh', '-y', 'mpileaks')
for item in module_files:
assert os.path.exists(item)
@pytest.mark.db
@pytest.mark.parametrize('cli_args', [
['libelf'],
['--full-path', 'libelf']
])
def test_find(database, cli_args, module_type):
if module_type == 'lmod':
# TODO: Testing this with lmod requires mocking
# TODO: the core compilers
return
module(module_type, *(['find'] + cli_args))
@pytest.mark.db
@pytest.mark.usefixtures('database')
@pytest.mark.regression('2215')
def test_find_fails_on_multiple_matches():
# As we installed multiple versions of mpileaks, the command will
# fail because of multiple matches
out = module('tcl', 'find', 'mpileaks', fail_on_error=False)
assert module.returncode == 1
assert 'matches multiple packages' in out
# Passing multiple packages from the command line also results in the
# same failure
out = module(
'tcl', 'find', 'mpileaks ^mpich', 'libelf', fail_on_error=False
)
assert module.returncode == 1
assert 'matches multiple packages' in out
@pytest.mark.db
@pytest.mark.usefixtures('database')
@pytest.mark.regression('2570')
def test_find_fails_on_non_existing_packages():
# Another way the command might fail is if the package does not exist
out = module('tcl', 'find', 'doesnotexist', fail_on_error=False)
assert module.returncode == 1
assert 'matches no package' in out
@pytest.mark.db
@pytest.mark.usefixtures('database')
def test_find_recursive():
# If we call find without options it should return only one module
out = module('tcl', 'find', 'mpileaks ^zmpi')
assert len(out.split()) == 1
# If instead we call it with the recursive option the length should
# be greater
out = module('tcl', 'find', '-r', 'mpileaks ^zmpi')
assert len(out.split()) > 1
# Needed to make the 'module_configuration' fixture below work
writer_cls = spack.modules.lmod.LmodModulefileWriter
@pytest.mark.db
def test_setdefault_command(
mutable_database, module_configuration
):
module_configuration('autoload_direct')
# Install two different versions of a package
other_spec, preferred = 'a@1.0', 'a@2.0'
spack.spec.Spec(other_spec).concretized().package.do_install(fake=True)
spack.spec.Spec(preferred).concretized().package.do_install(fake=True)
writers = {
preferred: writer_cls(spack.spec.Spec(preferred).concretized()),
other_spec: writer_cls(spack.spec.Spec(other_spec).concretized())
}
# Create two module files for the same software
module('lmod', 'refresh', '-y', '--delete-tree', preferred, other_spec)
# Assert initial directory state: no link and all module files present
link_name = os.path.join(
os.path.dirname(writers[preferred].layout.filename),
'default'
)
for k in preferred, other_spec:
assert os.path.exists(writers[k].layout.filename)
assert not os.path.exists(link_name)
# Set the default to be the other spec
module('lmod', 'setdefault', other_spec)
# Check that a link named 'default' exists, and points to the right file
for k in preferred, other_spec:
assert os.path.exists(writers[k].layout.filename)
assert os.path.exists(link_name) and os.path.islink(link_name)
assert os.path.realpath(link_name) == writers[other_spec].layout.filename
# Reset the default to be the preferred spec
module('lmod', 'setdefault', preferred)
# Check that a link named 'default' exists, and points to the right file
for k in preferred, other_spec:
assert os.path.exists(writers[k].layout.filename)
assert os.path.exists(link_name) and os.path.islink(link_name)
assert os.path.realpath(link_name) == writers[preferred].layout.filename
|
pwns4cash/vivisect
|
refs/heads/master
|
visgraph/__init__.py
|
11
|
'''
The new visgraph package...
Sigh... If you want something done right...
'''
|
p0cisk/Quantum-GIS
|
refs/heads/master
|
tests/src/python/test_qgsoptional.py
|
74
|
# -*- coding: utf-8 -*-
'''
test_qgsoptional.py
--------------------------------------
Date : September 2016
Copyright : (C) 2016 Matthias Kuhn
email : matthias@opengis.ch
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis # NOQA
from qgis.testing import unittest
from qgis.core import QgsOptionalExpression, QgsExpression
class TestQgsOptional(unittest.TestCase):
def setUp(self):
"""Run before each test."""
pass
def tearDown(self):
"""Run after each test."""
pass
def testQgsOptionalExpression(self):
opt = QgsOptionalExpression()
self.assertFalse(opt.enabled())
opt = QgsOptionalExpression(QgsExpression('true'))
self.assertTrue(opt.enabled())
self.assertEqual(opt.data().expression(), 'true')
opt.setEnabled(False)
self.assertFalse(opt.enabled())
# boolean operator not yet working in python
# self.assertFalse(opt)
self.assertEqual(opt.data().expression(), 'true')
opt.setEnabled(True)
self.assertTrue(opt.enabled())
# self.assertTrue(opt)
self.assertEqual(opt.data().expression(), 'true')
opt.setData(QgsExpression('xyz'))
self.assertTrue(opt.enabled())
self.assertEqual(opt.data().expression(), 'xyz')
opt = QgsOptionalExpression(QgsExpression('true'), False)
self.assertFalse(opt.enabled())
if __name__ == '__main__':
unittest.main()
|
bfrgoncalves/Online-PhyloViZ
|
refs/heads/master
|
node_modules/l/node_modules/hook.io/node_modules/npm/node_modules/node-gyp/gyp/test/actions-subdir/src/make-file.py
|
489
|
#!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
contents = 'Hello from make-file.py\n'
open(sys.argv[1], 'wb').write(contents)
|
gregbillock/Spectrum-Access-System
|
refs/heads/master
|
src/prop/ehata/test/ehata_isolated_ridge_test.py
|
2
|
import os,sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import ehata
import csv
import math
deltas = []
with open('isolated_ridge_test.csv') as ridge_file:
rows = csv.reader(ridge_file)
for row in rows:
deltas.append(row)
with open('elevations.csv') as profiles_file:
rows = csv.reader(profiles_file)
target = 0
for row in rows:
print '\nOn row %d' % target
profile = []
for r in row:
profile.append(float(r))
profile[0] = int(profile[0])
profile = profile[0:int(profile[0])+3]
Kir = ehata.IsolatedRidgeCorrection(profile)
if math.fabs(float(deltas[0][target]) - Kir) > .1:
print('fail Kir on profile %d: %f vs %f' % (target, float(deltas[0][target]), Kir))
exit()
target = target + 1
print 'PASS'
|
bradsk88/WinkHouse
|
refs/heads/master
|
lib/werkzeug/contrib/wrappers.py
|
181
|
# -*- coding: utf-8 -*-
"""
werkzeug.contrib.wrappers
~~~~~~~~~~~~~~~~~~~~~~~~~
Extra wrappers or mixins contributed by the community. These wrappers can
be mixed in into request objects to add extra functionality.
Example::
from werkzeug.wrappers import Request as RequestBase
from werkzeug.contrib.wrappers import JSONRequestMixin
class Request(RequestBase, JSONRequestMixin):
pass
Afterwards this request object provides the extra functionality of the
:class:`JSONRequestMixin`.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import codecs
try:
from simplejson import loads
except ImportError:
from json import loads
from werkzeug.exceptions import BadRequest
from werkzeug.utils import cached_property
from werkzeug.http import dump_options_header, parse_options_header
from werkzeug._compat import wsgi_decoding_dance
def is_known_charset(charset):
"""Checks if the given charset is known to Python."""
try:
codecs.lookup(charset)
except LookupError:
return False
return True
class JSONRequestMixin(object):
"""Add json method to a request object. This will parse the input data
through simplejson if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not json or if the data itself cannot be parsed as json.
"""
@cached_property
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a JSON request')
try:
return loads(self.data)
except Exception:
raise BadRequest('Unable to read JSON request')
class ProtobufRequestMixin(object):
"""Add protobuf parsing method to a request object. This will parse the
input data through `protobuf`_ if possible.
:exc:`~werkzeug.exceptions.BadRequest` will be raised if the content-type
is not protobuf or if the data itself cannot be parsed property.
.. _protobuf: http://code.google.com/p/protobuf/
"""
#: by default the :class:`ProtobufRequestMixin` will raise a
#: :exc:`~werkzeug.exceptions.BadRequest` if the object is not
#: initialized. You can bypass that check by setting this
#: attribute to `False`.
protobuf_check_initialization = True
def parse_protobuf(self, proto_type):
"""Parse the data into an instance of proto_type."""
if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''):
raise BadRequest('Not a Protobuf request')
obj = proto_type()
try:
obj.ParseFromString(self.data)
except Exception:
raise BadRequest("Unable to parse Protobuf request")
# Fail if not all required fields are set
if self.protobuf_check_initialization and not obj.IsInitialized():
raise BadRequest("Partial Protobuf request")
return obj
class RoutingArgsRequestMixin(object):
"""This request mixin adds support for the wsgiorg routing args
`specification`_.
.. _specification: http://www.wsgi.org/wsgi/Specifications/routing_args
"""
def _get_routing_args(self):
return self.environ.get('wsgiorg.routing_args', (()))[0]
def _set_routing_args(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (value, self.routing_vars)
routing_args = property(_get_routing_args, _set_routing_args, doc='''
The positional URL arguments as `tuple`.''')
del _get_routing_args, _set_routing_args
def _get_routing_vars(self):
rv = self.environ.get('wsgiorg.routing_args')
if rv is not None:
return rv[1]
rv = {}
if not self.shallow:
self.routing_vars = rv
return rv
def _set_routing_vars(self, value):
if self.shallow:
raise RuntimeError('A shallow request tried to modify the WSGI '
'environment. If you really want to do that, '
'set `shallow` to False.')
self.environ['wsgiorg.routing_args'] = (self.routing_args, value)
routing_vars = property(_get_routing_vars, _set_routing_vars, doc='''
The keyword URL arguments as `dict`.''')
del _get_routing_vars, _set_routing_vars
class ReverseSlashBehaviorRequestMixin(object):
"""This mixin reverses the trailing slash behavior of :attr:`script_root`
and :attr:`path`. This makes it possible to use :func:`~urlparse.urljoin`
directly on the paths.
Because it changes the behavior or :class:`Request` this class has to be
mixed in *before* the actual request class::
class MyRequest(ReverseSlashBehaviorRequestMixin, Request):
pass
This example shows the differences (for an application mounted on
`/application` and the request going to `/application/foo/bar`):
+---------------+-------------------+---------------------+
| | normal behavior | reverse behavior |
+===============+===================+=====================+
| `script_root` | ``/application`` | ``/application/`` |
+---------------+-------------------+---------------------+
| `path` | ``/foo/bar`` | ``foo/bar`` |
+---------------+-------------------+---------------------+
"""
@cached_property
def path(self):
"""Requested path as unicode. This works a bit like the regular path
info in the WSGI environment but will not include a leading slash.
"""
path = wsgi_decoding_dance(self.environ.get('PATH_INFO') or '',
self.charset, self.encoding_errors)
return path.lstrip('/')
@cached_property
def script_root(self):
"""The root path of the script includling a trailing slash."""
path = wsgi_decoding_dance(self.environ.get('SCRIPT_NAME') or '',
self.charset, self.encoding_errors)
return path.rstrip('/') + '/'
class DynamicCharsetRequestMixin(object):
""""If this mixin is mixed into a request class it will provide
a dynamic `charset` attribute. This means that if the charset is
transmitted in the content type headers it's used from there.
Because it changes the behavior or :class:`Request` this class has
to be mixed in *before* the actual request class::
class MyRequest(DynamicCharsetRequestMixin, Request):
pass
By default the request object assumes that the URL charset is the
same as the data charset. If the charset varies on each request
based on the transmitted data it's not a good idea to let the URLs
change based on that. Most browsers assume either utf-8 or latin1
for the URLs if they have troubles figuring out. It's strongly
recommended to set the URL charset to utf-8::
class MyRequest(DynamicCharsetRequestMixin, Request):
url_charset = 'utf-8'
.. versionadded:: 0.6
"""
#: the default charset that is assumed if the content type header
#: is missing or does not contain a charset parameter. The default
#: is latin1 which is what HTTP specifies as default charset.
#: You may however want to set this to utf-8 to better support
#: browsers that do not transmit a charset for incoming data.
default_charset = 'latin1'
def unknown_charset(self, charset):
"""Called if a charset was provided but is not supported by
the Python codecs module. By default latin1 is assumed then
to not lose any information, you may override this method to
change the behavior.
:param charset: the charset that was not found.
:return: the replacement charset.
"""
return 'latin1'
@cached_property
def charset(self):
"""The charset from the content type."""
header = self.environ.get('CONTENT_TYPE')
if header:
ct, options = parse_options_header(header)
charset = options.get('charset')
if charset:
if is_known_charset(charset):
return charset
return self.unknown_charset(charset)
return self.default_charset
class DynamicCharsetResponseMixin(object):
"""If this mixin is mixed into a response class it will provide
a dynamic `charset` attribute. This means that if the charset is
looked up and stored in the `Content-Type` header and updates
itself automatically. This also means a small performance hit but
can be useful if you're working with different charsets on
responses.
Because the charset attribute is no a property at class-level, the
default value is stored in `default_charset`.
Because it changes the behavior or :class:`Response` this class has
to be mixed in *before* the actual response class::
class MyResponse(DynamicCharsetResponseMixin, Response):
pass
.. versionadded:: 0.6
"""
#: the default charset.
default_charset = 'utf-8'
def _get_charset(self):
header = self.headers.get('content-type')
if header:
charset = parse_options_header(header)[1].get('charset')
if charset:
return charset
return self.default_charset
def _set_charset(self, charset):
header = self.headers.get('content-type')
ct, options = parse_options_header(header)
if not ct:
raise TypeError('Cannot set charset if Content-Type '
'header is missing.')
options['charset'] = charset
self.headers['Content-Type'] = dump_options_header(ct, options)
charset = property(_get_charset, _set_charset, doc="""
The charset for the response. It's stored inside the
Content-Type header as a parameter.""")
del _get_charset, _set_charset
|
google/seq2seq
|
refs/heads/master
|
seq2seq/test/conv_encoder_test.py
|
6
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for PoolingEncoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.encoders import ConvEncoder
class ConvEncoderTest(tf.test.TestCase):
"""
Tests the ConvEncoder class.
"""
def setUp(self):
super(ConvEncoderTest, self).setUp()
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.mode = tf.contrib.learn.ModeKeys.TRAIN
def _test_with_params(self, params):
"""Tests the encoder with a given parameter configuration"""
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
example_length = tf.ones(
self.batch_size, dtype=tf.int32) * self.sequence_length
encode_fn = ConvEncoder(params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
att_value_units = encode_fn.params["attention_cnn.units"]
output_units = encode_fn.params["output_cnn.units"]
np.testing.assert_array_equal(
encoder_output_.outputs.shape,
[self.batch_size, self.sequence_length, att_value_units])
np.testing.assert_array_equal(
encoder_output_.attention_values.shape,
[self.batch_size, self.sequence_length, output_units])
np.testing.assert_array_equal(
encoder_output_.final_state.shape,
[self.batch_size, output_units])
def test_encode_with_pos(self):
self._test_with_params({
"position_embeddings.enable": True,
"position_embeddings.num_positions": self.sequence_length,
"attention_cnn.units": 5,
"output_cnn.units": 6
})
if __name__ == "__main__":
tf.test.main()
|
Lynx187/script.module.urlresolver
|
refs/heads/master
|
lib/urlresolver/plugins/megavids.py
|
3
|
'''
Allmyvideos urlresolver plugin
Copyright (C) 2013 Vinnydude
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from t0mm0.common.net import Net
from urlresolver.plugnplay.interfaces import UrlResolver
from urlresolver.plugnplay.interfaces import PluginSettings
from urlresolver.plugnplay import Plugin
from urlresolver import common
class MegaVidsResolver(Plugin, UrlResolver, PluginSettings):
implements = [UrlResolver, PluginSettings]
name = "mega-vids"
domains = [ "mega-vids.com" ]
def __init__(self):
p = self.get_setting('priority') or 100
self.priority = int(p)
self.net = Net()
def get_media_url(self, host, media_id):
url = self.get_url(host, media_id)
html = self.net.http_GET(url).content
data = {}
r = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', html)
for name, value in r:
data[name] = value
html = self.net.http_POST(url, data).content
r = re.search("file\s*:\s*'(.+?)'", html)
if r:
return r.group(1)
else:
raise UrlResolver.ResolverError('could not find video')
def get_url(self, host, media_id):
return 'http://mega-vids.com/%s' % media_id
def get_host_and_id(self, url):
r = re.search('//(.+?)/(?:embed-)?([0-9a-zA-Z]+)',url)
if r:
return r.groups()
else:
return False
return('host', 'media_id')
def valid_url(self, url, host):
if self.get_setting('enabled') == 'false': return False
return (re.match('http://(www.)?mega-vids.com/[0-9A-Za-z]+', url) or re.match('http://(www.)?mega-vids.com/embed-[0-9A-Za-z]+[\-]*\d*[x]*\d*.*[html]*', url) or 'mega-vids' in host)
|
ftrader-bitcoinabc/bitcoin-abc
|
refs/heads/master
|
test/functional/rpc_txoutproof.py
|
1
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test gettxoutproof and verifytxoutproof RPCs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
connect_nodes,
)
from test_framework.messages import CMerkleBlock, FromHex, ToHex
class MerkleBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
self.setup_clean_chain = True
# Nodes 0/1 are "wallet" nodes, Nodes 2/3 are used for testing
self.extra_args = [[], [], [], ["-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], self.nodes[1])
connect_nodes(self.nodes[0], self.nodes[2])
connect_nodes(self.nodes[0], self.nodes[3])
self.sync_all()
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction(
[node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid1 = self.nodes[0].sendrawtransaction(
self.nodes[0].signrawtransactionwithwallet(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction(
[node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(
self.nodes[0].signrawtransactionwithwallet(tx2)["hex"])
# This will raise an exception because the transaction is not yet in a
# block
assert_raises_rpc_error(-5, "Transaction not yet in block",
self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction(
[txin_spent], {self.nodes[0].getnewaddress(): 49.98})
txid3 = self.nodes[0].sendrawtransaction(
self.nodes[1].signrawtransactionwithwallet(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
assert_raises_rpc_error(-5, "Transaction not yet in block",
self.nodes[2].gettxoutproof, [txid_spent])
# We can get the proof if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[2].gettxoutproof, [
txid_spent], "00000000000000000000000000000000")
# We can get the proof if the transaction is unspent
assert_equal(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
# We can get the proof if we provide a list of transactions and one of
# them is unspent. The ordering of the list should not matter.
assert_equal(sorted(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid1, txid2]))), sorted(txlist))
assert_equal(sorted(self.nodes[2].verifytxoutproof(
self.nodes[2].gettxoutproof([txid2, txid1]))), sorted(txlist))
# We can always get a proof if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(
self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
# We can't get a proof if we specify transactions from different blocks
assert_raises_rpc_error(-5, "Not all transactions found in specified or retrieved block",
self.nodes[2].gettxoutproof, [txid1, txid3])
# Now we'll try tweaking a proof.
proof = self.nodes[3].gettxoutproof([txid1, txid2])
assert txid1 in self.nodes[0].verifytxoutproof(proof)
assert txid2 in self.nodes[1].verifytxoutproof(proof)
tweaked_proof = FromHex(CMerkleBlock(), proof)
# Make sure that our serialization/deserialization is working
assert txid1 in self.nodes[2].verifytxoutproof(ToHex(tweaked_proof))
# Check to see if we can go up the merkle tree and pass this off as a
# single-transaction block
tweaked_proof.txn.nTransactions = 1
tweaked_proof.txn.vHash = [tweaked_proof.header.hashMerkleRoot]
tweaked_proof.txn.vBits = [True] + [False] * 7
for n in self.nodes:
assert not n.verifytxoutproof(ToHex(tweaked_proof))
# TODO: try more variants, eg transactions at different depths, and
# verify that the proofs are invalid
if __name__ == '__main__':
MerkleBlockTest().main()
|
havard024/prego
|
refs/heads/master
|
core/rss.py
|
3
|
# encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Global RSS Framework
"""
from django.contrib.syndication.views import Feed
from django.contrib.sites.models import RequestSite
from treeio.core.models import Object, UpdateRecord, User
import hashlib
import random
class ObjectFeed(Feed):
"Generic RSS class"
def __init__(self, title, link, description, objects, *args, **kwargs):
self.title = title
self.link = link
self.description = description
self.key = ''
self.objects = objects
super(ObjectFeed, self).__init__(*args, **kwargs)
def __call__(self, request, *args, **kwargs):
"Generates response"
self.site_url = 'http://' + RequestSite(request).domain
self.link = self.site_url + self.link
response = super(ObjectFeed, self).__call__(request, *args, **kwargs)
# Dirty hack for "example.com" - I hate it too but it works (contrast to all other solutions)
# TODO: proper workaround for "example.com" in URLs
# P.S. worship Ctulhu before you attempt this
response.content = response.content.replace(
'http://example.com', self.site_url)
return response
def get_object(self, request, *args, **kwargs):
"Returns feed objects"
return self.objects[:50]
def items(self, obj):
"Returns a single object"
return obj
def item_title(self, obj):
"Returns object title"
if isinstance(obj, Object):
return obj.creator
elif isinstance(obj, UpdateRecord):
return obj.author
def item_pubdate(self, obj):
"Returns object's date_created"
return obj.date_created
def item_description(self, obj):
"Returns object's body, details or full message"
if isinstance(obj, Object):
if obj.body:
return obj.body
else:
return obj.details
elif isinstance(obj, UpdateRecord):
body = ''
for object in obj.about.all():
body += '<a href="' + self.site_url + \
object.get_absolute_url(
) + '">' + unicode(object) + ' (' + object.get_human_type() + ')</a><br />'
body += obj.get_full_message()
return body
def item_link(self, obj):
"Returns object's full url"
if isinstance(obj, Object):
return self.site_url + obj.get_absolute_url()
elif isinstance(obj, UpdateRecord):
# link must be unique
return self.link + '?' + str(random.random())
def verify_secret_key(request):
"Verifies secret key for a request"
if request.user.username:
# always allow authenticated users
return True
else:
key = request.GET['secret']
user_id, secret = key.split('.', 1)
try:
profile = User.objects.get(pk=user_id)
except:
return False
if key == get_secret_key(request, profile):
request.user = profile.user
return True
return False
def get_secret_key(request, profile=None):
"Generates secret key for a request in RSS format"
if not profile:
if request.user.username:
profile = request.user.get_profile()
if profile:
params = request.GET.copy()
if 'secret' in params:
del params['secret']
hash = hashlib.sha224()
hash.update(unicode(params))
hash.update(unicode(profile.id))
hash.update(unicode(profile.user.date_joined))
key = unicode(profile.id) + '.' + hash.hexdigest()
return key
return ''
|
open-synergy/opnsynid-hr
|
refs/heads/8.0
|
hr_assignment_transition/__init__.py
|
7
|
# -*- coding: utf-8 -*-
# Copyright 2018 OpenSynergy Indonesia
# Copyright 2020 PT. Simetri Sinergi Indonesia
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import (
models,
)
|
drpaneas/linuxed.gr
|
refs/heads/master
|
lib/python2.7/site-packages/unidecode/x000.py
|
113
|
data = (
# Code points u+007f and below are equivalent to ASCII and are handled by a
# special case in the code. Hence they are not present in this table.
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
' ', # 0xa0
'!', # 0xa1
'C/', # 0xa2
# Not "GBP" - Pound Sign is used for more than just British Pounds.
'PS', # 0xa3
'$?', # 0xa4
'Y=', # 0xa5
'|', # 0xa6
'SS', # 0xa7
'"', # 0xa8
'(c)', # 0xa9
'a', # 0xaa
'<<', # 0xab
'!', # 0xac
'', # 0xad
'(r)', # 0xae
'-', # 0xaf
'deg', # 0xb0
'+-', # 0xb1
# These might be combined with other superscript digits (u+2070 - u+2079)
'2', # 0xb2
'3', # 0xb3
'\'', # 0xb4
'u', # 0xb5
'P', # 0xb6
'*', # 0xb7
',', # 0xb8
'1', # 0xb9
'o', # 0xba
'>>', # 0xbb
'1/4', # 0xbc
'1/2', # 0xbd
'3/4', # 0xbe
'?', # 0xbf
'A', # 0xc0
'A', # 0xc1
'A', # 0xc2
'A', # 0xc3
# Not "AE" - used in languages other than German
'A', # 0xc4
'A', # 0xc5
'AE', # 0xc6
'C', # 0xc7
'E', # 0xc8
'E', # 0xc9
'E', # 0xca
'E', # 0xcb
'I', # 0xcc
'I', # 0xcd
'I', # 0xce
'I', # 0xcf
'D', # 0xd0
'N', # 0xd1
'O', # 0xd2
'O', # 0xd3
'O', # 0xd4
'O', # 0xd5
# Not "OE" - used in languages other than German
'O', # 0xd6
'x', # 0xd7
'O', # 0xd8
'U', # 0xd9
'U', # 0xda
'U', # 0xdb
# Not "UE" - used in languages other than German
'U', # 0xdc
'Y', # 0xdd
'Th', # 0xde
'ss', # 0xdf
'a', # 0xe0
'a', # 0xe1
'a', # 0xe2
'a', # 0xe3
# Not "ae" - used in languages other than German
'a', # 0xe4
'a', # 0xe5
'ae', # 0xe6
'c', # 0xe7
'e', # 0xe8
'e', # 0xe9
'e', # 0xea
'e', # 0xeb
'i', # 0xec
'i', # 0xed
'i', # 0xee
'i', # 0xef
'd', # 0xf0
'n', # 0xf1
'o', # 0xf2
'o', # 0xf3
'o', # 0xf4
'o', # 0xf5
# Not "oe" - used in languages other than German
'o', # 0xf6
'/', # 0xf7
'o', # 0xf8
'u', # 0xf9
'u', # 0xfa
'u', # 0xfb
# Not "ue" - used in languages other than German
'u', # 0xfc
'y', # 0xfd
'th', # 0xfe
'y', # 0xff
)
|
bluemini/kuma
|
refs/heads/master
|
vendor/packages/pygments/lexers/perl.py
|
72
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.perl
~~~~~~~~~~~~~~~~~~~~
Lexers for Perl and related languages.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
using, this, default, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
from pygments.util import shebang_matches
__all__ = ['PerlLexer', 'Perl6Lexer']
class PerlLexer(RegexLexer):
"""
For `Perl <http://www.perl.org>`_ source code.
"""
name = 'Perl'
aliases = ['perl', 'pl']
filenames = ['*.pl', '*.pm', '*.t']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
# TODO: give this to a perl guy who knows how to parse perl...
tokens = {
'balanced-regex': [
(r'/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\[^\\]|[^\\!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'\{(\\\\|\\[^\\]|[^\\}])*\}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\[^\\]|[^\\>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\[^\\]|[^\\\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\[^\\]|[^\\)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\[^\\]|[^\\$])*\$[egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\#.*?$', Comment.Single),
(r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
(words((
'case', 'continue', 'do', 'else', 'elsif', 'for', 'foreach',
'if', 'last', 'my', 'next', 'our', 'redo', 'reset', 'then',
'unless', 'until', 'while', 'use', 'print', 'new', 'BEGIN',
'CHECK', 'INIT', 'END', 'return'), suffix=r'\b'),
Keyword),
(r'(format)(\s+)(\w+)(\s*)(=)(\s*\n)',
bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'),
(r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
# common delimiters
(r's/(\\\\|\\[^\\]|[^\\/])*/(\\\\|\\[^\\]|[^\\/])*/[egimosx]*',
String.Regex),
(r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
(r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
(r's@(\\\\|\\[^\\]|[^\\@])*@(\\\\|\\[^\\]|[^\\@])*@[egimosx]*',
String.Regex),
(r's%(\\\\|\\[^\\]|[^\\%])*%(\\\\|\\[^\\]|[^\\%])*%[egimosx]*',
String.Regex),
# balanced delimiters
(r's\{(\\\\|\\[^\\]|[^\\}])*\}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\[^\\]|[^\\>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\[^\\]|[^\\\]])*\]\s*', String.Regex,
'balanced-regex'),
(r's\((\\\\|\\[^\\]|[^\\)])*\)\s*', String.Regex,
'balanced-regex'),
(r'm?/(\\\\|\\[^\\]|[^\\/\n])*/[gcimosx]*', String.Regex),
(r'm(?=[/!\\{<\[(@%$])', String.Regex, 'balanced-regex'),
(r'((?<==~)|(?<=\())\s*/(\\\\|\\[^\\]|[^\\/])*/[gcimosx]*',
String.Regex),
(r'\s+', Text),
(words((
'abs', 'accept', 'alarm', 'atan2', 'bind', 'binmode', 'bless', 'caller', 'chdir',
'chmod', 'chomp', 'chop', 'chown', 'chr', 'chroot', 'close', 'closedir', 'connect',
'continue', 'cos', 'crypt', 'dbmclose', 'dbmopen', 'defined', 'delete', 'die',
'dump', 'each', 'endgrent', 'endhostent', 'endnetent', 'endprotoent',
'endpwent', 'endservent', 'eof', 'eval', 'exec', 'exists', 'exit', 'exp', 'fcntl',
'fileno', 'flock', 'fork', 'format', 'formline', 'getc', 'getgrent', 'getgrgid',
'getgrnam', 'gethostbyaddr', 'gethostbyname', 'gethostent', 'getlogin',
'getnetbyaddr', 'getnetbyname', 'getnetent', 'getpeername', 'getpgrp',
'getppid', 'getpriority', 'getprotobyname', 'getprotobynumber',
'getprotoent', 'getpwent', 'getpwnam', 'getpwuid', 'getservbyname',
'getservbyport', 'getservent', 'getsockname', 'getsockopt', 'glob', 'gmtime',
'goto', 'grep', 'hex', 'import', 'index', 'int', 'ioctl', 'join', 'keys', 'kill', 'last',
'lc', 'lcfirst', 'length', 'link', 'listen', 'local', 'localtime', 'log', 'lstat',
'map', 'mkdir', 'msgctl', 'msgget', 'msgrcv', 'msgsnd', 'my', 'next', 'no', 'oct', 'open',
'opendir', 'ord', 'our', 'pack', 'package', 'pipe', 'pop', 'pos', 'printf',
'prototype', 'push', 'quotemeta', 'rand', 'read', 'readdir',
'readline', 'readlink', 'readpipe', 'recv', 'redo', 'ref', 'rename', 'require',
'reverse', 'rewinddir', 'rindex', 'rmdir', 'scalar', 'seek', 'seekdir',
'select', 'semctl', 'semget', 'semop', 'send', 'setgrent', 'sethostent', 'setnetent',
'setpgrp', 'setpriority', 'setprotoent', 'setpwent', 'setservent',
'setsockopt', 'shift', 'shmctl', 'shmget', 'shmread', 'shmwrite', 'shutdown',
'sin', 'sleep', 'socket', 'socketpair', 'sort', 'splice', 'split', 'sprintf', 'sqrt',
'srand', 'stat', 'study', 'substr', 'symlink', 'syscall', 'sysopen', 'sysread',
'sysseek', 'system', 'syswrite', 'tell', 'telldir', 'tie', 'tied', 'time', 'times', 'tr',
'truncate', 'uc', 'ucfirst', 'umask', 'undef', 'unlink', 'unpack', 'unshift', 'untie',
'utime', 'values', 'vec', 'wait', 'waitpid', 'wantarray', 'warn', 'write'), suffix=r'\b'),
Name.Builtin),
(r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
(r'<<([\'"]?)([a-zA-Z_]\w*)\1;?\n.*?\n\2\n', String),
(r'__END__', Comment.Preproc, 'end-part'),
(r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
(r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
(r'[$@%#]+', Name.Variable, 'varname'),
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
Number.Float),
(r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
(r'\d+(_\d+)*', Number.Integer),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
(r'`(\\\\|\\[^\\]|[^`\\])*`', String.Backtick),
(r'<([^\s>]+)>', String.Regex),
(r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
(r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
(r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
(r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
(r'(q|qq|qw|qr|qx)([\W_])(.|\n)*?\2', String.Other),
(r'package\s+', Keyword, 'modulename'),
(r'sub\s+', Keyword, 'funcname'),
(r'(\[\]|\*\*|::|<<|>>|>=|<=>|<=|={3}|!=|=~|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&^|!\\~]=?', Operator),
(r'[()\[\]:;,<>/?{}]', Punctuation), # yes, there's no shortage
# of punctuation in Perl!
(r'(?=\w)', Name, 'name'),
],
'format': [
(r'\.\n', String.Interpol, '#pop'),
(r'[^\n]*\n', String.Interpol),
],
'varname': [
(r'\s+', Text),
(r'\{', Punctuation, '#pop'), # hash syntax?
(r'\)|,', Punctuation, '#pop'), # argument specifier
(r'\w+::', Name.Namespace),
(r'[\w:]+', Name.Variable, '#pop'),
],
'name': [
(r'\w+::', Name.Namespace),
(r'[\w:]+', Name, '#pop'),
(r'[A-Z_]+(?=\W)', Name.Constant, '#pop'),
(r'(?=\W)', Text, '#pop'),
],
'modulename': [
(r'[a-zA-Z_]\w*', Name.Namespace, '#pop')
],
'funcname': [
(r'[a-zA-Z_]\w*[!?]?', Name.Function),
(r'\s+', Text),
# argument declaration
(r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)),
(r';', Punctuation, '#pop'),
(r'.*?\{', Punctuation, '#pop'),
],
'cb-string': [
(r'\\[{}\\]', String.Other),
(r'\\', String.Other),
(r'\{', String.Other, 'cb-string'),
(r'\}', String.Other, '#pop'),
(r'[^{}\\]+', String.Other)
],
'rb-string': [
(r'\\[()\\]', String.Other),
(r'\\', String.Other),
(r'\(', String.Other, 'rb-string'),
(r'\)', String.Other, '#pop'),
(r'[^()]+', String.Other)
],
'sb-string': [
(r'\\[\[\]\\]', String.Other),
(r'\\', String.Other),
(r'\[', String.Other, 'sb-string'),
(r'\]', String.Other, '#pop'),
(r'[^\[\]]+', String.Other)
],
'lt-string': [
(r'\\[<>\\]', String.Other),
(r'\\', String.Other),
(r'\<', String.Other, 'lt-string'),
(r'\>', String.Other, '#pop'),
(r'[^<>]+', String.Other)
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
def analyse_text(text):
if shebang_matches(text, r'perl'):
return True
if re.search('(?:my|our)\s+[$@%(]', text):
return 0.9
class Perl6Lexer(ExtendedRegexLexer):
"""
For `Perl 6 <http://www.perl6.org>`_ source code.
.. versionadded:: 2.0
"""
name = 'Perl6'
aliases = ['perl6', 'pl6']
filenames = ['*.pl', '*.pm', '*.nqp', '*.p6', '*.6pl', '*.p6l', '*.pl6',
'*.6pm', '*.p6m', '*.pm6', '*.t']
mimetypes = ['text/x-perl6', 'application/x-perl6']
flags = re.MULTILINE | re.DOTALL | re.UNICODE
PERL6_IDENTIFIER_RANGE = "['\w:-]"
PERL6_KEYWORDS = (
'BEGIN', 'CATCH', 'CHECK', 'CONTROL', 'END', 'ENTER', 'FIRST', 'INIT',
'KEEP', 'LAST', 'LEAVE', 'NEXT', 'POST', 'PRE', 'START', 'TEMP',
'UNDO', 'as', 'assoc', 'async', 'augment', 'binary', 'break', 'but',
'cached', 'category', 'class', 'constant', 'contend', 'continue',
'copy', 'deep', 'default', 'defequiv', 'defer', 'die', 'do', 'else',
'elsif', 'enum', 'equiv', 'exit', 'export', 'fail', 'fatal', 'for',
'gather', 'given', 'goto', 'grammar', 'handles', 'has', 'if', 'inline',
'irs', 'is', 'last', 'leave', 'let', 'lift', 'loop', 'looser', 'macro',
'make', 'maybe', 'method', 'module', 'multi', 'my', 'next', 'of',
'ofs', 'only', 'oo', 'ors', 'our', 'package', 'parsed', 'prec',
'proto', 'readonly', 'redo', 'ref', 'regex', 'reparsed', 'repeat',
'require', 'required', 'return', 'returns', 'role', 'rule', 'rw',
'self', 'slang', 'state', 'sub', 'submethod', 'subset', 'supersede',
'take', 'temp', 'tighter', 'token', 'trusts', 'try', 'unary',
'unless', 'until', 'use', 'warn', 'when', 'where', 'while', 'will',
)
PERL6_BUILTINS = (
'ACCEPTS', 'HOW', 'REJECTS', 'VAR', 'WHAT', 'WHENCE', 'WHERE', 'WHICH',
'WHO', 'abs', 'acos', 'acosec', 'acosech', 'acosh', 'acotan', 'acotanh',
'all', 'any', 'approx', 'arity', 'asec', 'asech', 'asin', 'asinh',
'assuming', 'atan', 'atan2', 'atanh', 'attr', 'bless', 'body', 'by',
'bytes', 'caller', 'callsame', 'callwith', 'can', 'capitalize', 'cat',
'ceiling', 'chars', 'chmod', 'chomp', 'chop', 'chr', 'chroot',
'circumfix', 'cis', 'classify', 'clone', 'close', 'cmp_ok', 'codes',
'comb', 'connect', 'contains', 'context', 'cos', 'cosec', 'cosech',
'cosh', 'cotan', 'cotanh', 'count', 'defined', 'delete', 'diag',
'dies_ok', 'does', 'e', 'each', 'eager', 'elems', 'end', 'eof', 'eval',
'eval_dies_ok', 'eval_elsewhere', 'eval_lives_ok', 'evalfile', 'exists',
'exp', 'first', 'flip', 'floor', 'flunk', 'flush', 'fmt', 'force_todo',
'fork', 'from', 'getc', 'gethost', 'getlogin', 'getpeername', 'getpw',
'gmtime', 'graphs', 'grep', 'hints', 'hyper', 'im', 'index', 'infix',
'invert', 'is_approx', 'is_deeply', 'isa', 'isa_ok', 'isnt', 'iterator',
'join', 'key', 'keys', 'kill', 'kv', 'lastcall', 'lazy', 'lc', 'lcfirst',
'like', 'lines', 'link', 'lives_ok', 'localtime', 'log', 'log10', 'map',
'max', 'min', 'minmax', 'name', 'new', 'nextsame', 'nextwith', 'nfc',
'nfd', 'nfkc', 'nfkd', 'nok_error', 'nonce', 'none', 'normalize', 'not',
'nothing', 'ok', 'once', 'one', 'open', 'opendir', 'operator', 'ord',
'p5chomp', 'p5chop', 'pack', 'pair', 'pairs', 'pass', 'perl', 'pi',
'pick', 'plan', 'plan_ok', 'polar', 'pop', 'pos', 'postcircumfix',
'postfix', 'pred', 'prefix', 'print', 'printf', 'push', 'quasi',
'quotemeta', 'rand', 're', 'read', 'readdir', 'readline', 'reduce',
'reverse', 'rewind', 'rewinddir', 'rindex', 'roots', 'round',
'roundrobin', 'run', 'runinstead', 'sameaccent', 'samecase', 'say',
'sec', 'sech', 'sech', 'seek', 'shape', 'shift', 'sign', 'signature',
'sin', 'sinh', 'skip', 'skip_rest', 'sleep', 'slurp', 'sort', 'splice',
'split', 'sprintf', 'sqrt', 'srand', 'strand', 'subst', 'substr', 'succ',
'sum', 'symlink', 'tan', 'tanh', 'throws_ok', 'time', 'times', 'to',
'todo', 'trim', 'trim_end', 'trim_start', 'true', 'truncate', 'uc',
'ucfirst', 'undef', 'undefine', 'uniq', 'unlike', 'unlink', 'unpack',
'unpolar', 'unshift', 'unwrap', 'use_ok', 'value', 'values', 'vec',
'version_lt', 'void', 'wait', 'want', 'wrap', 'write', 'zip',
)
PERL6_BUILTIN_CLASSES = (
'Abstraction', 'Any', 'AnyChar', 'Array', 'Associative', 'Bag', 'Bit',
'Blob', 'Block', 'Bool', 'Buf', 'Byte', 'Callable', 'Capture', 'Char', 'Class',
'Code', 'Codepoint', 'Comparator', 'Complex', 'Decreasing', 'Exception',
'Failure', 'False', 'Grammar', 'Grapheme', 'Hash', 'IO', 'Increasing',
'Int', 'Junction', 'KeyBag', 'KeyExtractor', 'KeyHash', 'KeySet',
'KitchenSink', 'List', 'Macro', 'Mapping', 'Match', 'Matcher', 'Method',
'Module', 'Num', 'Object', 'Ordered', 'Ordering', 'OrderingPair',
'Package', 'Pair', 'Positional', 'Proxy', 'Range', 'Rat', 'Regex',
'Role', 'Routine', 'Scalar', 'Seq', 'Set', 'Signature', 'Str', 'StrLen',
'StrPos', 'Sub', 'Submethod', 'True', 'UInt', 'Undef', 'Version', 'Void',
'Whatever', 'bit', 'bool', 'buf', 'buf1', 'buf16', 'buf2', 'buf32',
'buf4', 'buf64', 'buf8', 'complex', 'int', 'int1', 'int16', 'int2',
'int32', 'int4', 'int64', 'int8', 'num', 'rat', 'rat1', 'rat16', 'rat2',
'rat32', 'rat4', 'rat64', 'rat8', 'uint', 'uint1', 'uint16', 'uint2',
'uint32', 'uint4', 'uint64', 'uint8', 'utf16', 'utf32', 'utf8',
)
PERL6_OPERATORS = (
'X', 'Z', 'after', 'also', 'and', 'andthen', 'before', 'cmp', 'div',
'eq', 'eqv', 'extra', 'ff', 'fff', 'ge', 'gt', 'le', 'leg', 'lt', 'm',
'mm', 'mod', 'ne', 'or', 'orelse', 'rx', 's', 'tr', 'x', 'xor', 'xx',
'++', '--', '**', '!', '+', '-', '~', '?', '|', '||', '+^', '~^', '?^',
'^', '*', '/', '%', '%%', '+&', '+<', '+>', '~&', '~<', '~>', '?&',
'gcd', 'lcm', '+', '-', '+|', '+^', '~|', '~^', '?|', '?^',
'~', '&', '^', 'but', 'does', '<=>', '..', '..^', '^..', '^..^',
'!=', '==', '<', '<=', '>', '>=', '~~', '===', '!eqv',
'&&', '||', '^^', '//', 'min', 'max', '??', '!!', 'ff', 'fff', 'so',
'not', '<==', '==>', '<<==', '==>>',
)
# Perl 6 has a *lot* of possible bracketing characters
# this list was lifted from STD.pm6 (https://github.com/perl6/std)
PERL6_BRACKETS = {
u'\u0028': u'\u0029', u'\u003c': u'\u003e', u'\u005b': u'\u005d',
u'\u007b': u'\u007d', u'\u00ab': u'\u00bb', u'\u0f3a': u'\u0f3b',
u'\u0f3c': u'\u0f3d', u'\u169b': u'\u169c', u'\u2018': u'\u2019',
u'\u201a': u'\u2019', u'\u201b': u'\u2019', u'\u201c': u'\u201d',
u'\u201e': u'\u201d', u'\u201f': u'\u201d', u'\u2039': u'\u203a',
u'\u2045': u'\u2046', u'\u207d': u'\u207e', u'\u208d': u'\u208e',
u'\u2208': u'\u220b', u'\u2209': u'\u220c', u'\u220a': u'\u220d',
u'\u2215': u'\u29f5', u'\u223c': u'\u223d', u'\u2243': u'\u22cd',
u'\u2252': u'\u2253', u'\u2254': u'\u2255', u'\u2264': u'\u2265',
u'\u2266': u'\u2267', u'\u2268': u'\u2269', u'\u226a': u'\u226b',
u'\u226e': u'\u226f', u'\u2270': u'\u2271', u'\u2272': u'\u2273',
u'\u2274': u'\u2275', u'\u2276': u'\u2277', u'\u2278': u'\u2279',
u'\u227a': u'\u227b', u'\u227c': u'\u227d', u'\u227e': u'\u227f',
u'\u2280': u'\u2281', u'\u2282': u'\u2283', u'\u2284': u'\u2285',
u'\u2286': u'\u2287', u'\u2288': u'\u2289', u'\u228a': u'\u228b',
u'\u228f': u'\u2290', u'\u2291': u'\u2292', u'\u2298': u'\u29b8',
u'\u22a2': u'\u22a3', u'\u22a6': u'\u2ade', u'\u22a8': u'\u2ae4',
u'\u22a9': u'\u2ae3', u'\u22ab': u'\u2ae5', u'\u22b0': u'\u22b1',
u'\u22b2': u'\u22b3', u'\u22b4': u'\u22b5', u'\u22b6': u'\u22b7',
u'\u22c9': u'\u22ca', u'\u22cb': u'\u22cc', u'\u22d0': u'\u22d1',
u'\u22d6': u'\u22d7', u'\u22d8': u'\u22d9', u'\u22da': u'\u22db',
u'\u22dc': u'\u22dd', u'\u22de': u'\u22df', u'\u22e0': u'\u22e1',
u'\u22e2': u'\u22e3', u'\u22e4': u'\u22e5', u'\u22e6': u'\u22e7',
u'\u22e8': u'\u22e9', u'\u22ea': u'\u22eb', u'\u22ec': u'\u22ed',
u'\u22f0': u'\u22f1', u'\u22f2': u'\u22fa', u'\u22f3': u'\u22fb',
u'\u22f4': u'\u22fc', u'\u22f6': u'\u22fd', u'\u22f7': u'\u22fe',
u'\u2308': u'\u2309', u'\u230a': u'\u230b', u'\u2329': u'\u232a',
u'\u23b4': u'\u23b5', u'\u2768': u'\u2769', u'\u276a': u'\u276b',
u'\u276c': u'\u276d', u'\u276e': u'\u276f', u'\u2770': u'\u2771',
u'\u2772': u'\u2773', u'\u2774': u'\u2775', u'\u27c3': u'\u27c4',
u'\u27c5': u'\u27c6', u'\u27d5': u'\u27d6', u'\u27dd': u'\u27de',
u'\u27e2': u'\u27e3', u'\u27e4': u'\u27e5', u'\u27e6': u'\u27e7',
u'\u27e8': u'\u27e9', u'\u27ea': u'\u27eb', u'\u2983': u'\u2984',
u'\u2985': u'\u2986', u'\u2987': u'\u2988', u'\u2989': u'\u298a',
u'\u298b': u'\u298c', u'\u298d': u'\u298e', u'\u298f': u'\u2990',
u'\u2991': u'\u2992', u'\u2993': u'\u2994', u'\u2995': u'\u2996',
u'\u2997': u'\u2998', u'\u29c0': u'\u29c1', u'\u29c4': u'\u29c5',
u'\u29cf': u'\u29d0', u'\u29d1': u'\u29d2', u'\u29d4': u'\u29d5',
u'\u29d8': u'\u29d9', u'\u29da': u'\u29db', u'\u29f8': u'\u29f9',
u'\u29fc': u'\u29fd', u'\u2a2b': u'\u2a2c', u'\u2a2d': u'\u2a2e',
u'\u2a34': u'\u2a35', u'\u2a3c': u'\u2a3d', u'\u2a64': u'\u2a65',
u'\u2a79': u'\u2a7a', u'\u2a7d': u'\u2a7e', u'\u2a7f': u'\u2a80',
u'\u2a81': u'\u2a82', u'\u2a83': u'\u2a84', u'\u2a8b': u'\u2a8c',
u'\u2a91': u'\u2a92', u'\u2a93': u'\u2a94', u'\u2a95': u'\u2a96',
u'\u2a97': u'\u2a98', u'\u2a99': u'\u2a9a', u'\u2a9b': u'\u2a9c',
u'\u2aa1': u'\u2aa2', u'\u2aa6': u'\u2aa7', u'\u2aa8': u'\u2aa9',
u'\u2aaa': u'\u2aab', u'\u2aac': u'\u2aad', u'\u2aaf': u'\u2ab0',
u'\u2ab3': u'\u2ab4', u'\u2abb': u'\u2abc', u'\u2abd': u'\u2abe',
u'\u2abf': u'\u2ac0', u'\u2ac1': u'\u2ac2', u'\u2ac3': u'\u2ac4',
u'\u2ac5': u'\u2ac6', u'\u2acd': u'\u2ace', u'\u2acf': u'\u2ad0',
u'\u2ad1': u'\u2ad2', u'\u2ad3': u'\u2ad4', u'\u2ad5': u'\u2ad6',
u'\u2aec': u'\u2aed', u'\u2af7': u'\u2af8', u'\u2af9': u'\u2afa',
u'\u2e02': u'\u2e03', u'\u2e04': u'\u2e05', u'\u2e09': u'\u2e0a',
u'\u2e0c': u'\u2e0d', u'\u2e1c': u'\u2e1d', u'\u2e20': u'\u2e21',
u'\u3008': u'\u3009', u'\u300a': u'\u300b', u'\u300c': u'\u300d',
u'\u300e': u'\u300f', u'\u3010': u'\u3011', u'\u3014': u'\u3015',
u'\u3016': u'\u3017', u'\u3018': u'\u3019', u'\u301a': u'\u301b',
u'\u301d': u'\u301e', u'\ufd3e': u'\ufd3f', u'\ufe17': u'\ufe18',
u'\ufe35': u'\ufe36', u'\ufe37': u'\ufe38', u'\ufe39': u'\ufe3a',
u'\ufe3b': u'\ufe3c', u'\ufe3d': u'\ufe3e', u'\ufe3f': u'\ufe40',
u'\ufe41': u'\ufe42', u'\ufe43': u'\ufe44', u'\ufe47': u'\ufe48',
u'\ufe59': u'\ufe5a', u'\ufe5b': u'\ufe5c', u'\ufe5d': u'\ufe5e',
u'\uff08': u'\uff09', u'\uff1c': u'\uff1e', u'\uff3b': u'\uff3d',
u'\uff5b': u'\uff5d', u'\uff5f': u'\uff60', u'\uff62': u'\uff63',
}
def _build_word_match(words, boundary_regex_fragment=None, prefix='', suffix=''):
if boundary_regex_fragment is None:
return r'\b(' + prefix + r'|'.join(re.escape(x) for x in words) + \
suffix + r')\b'
else:
return r'(?<!' + boundary_regex_fragment + r')' + prefix + r'(' + \
r'|'.join(re.escape(x) for x in words) + r')' + suffix + r'(?!' + \
boundary_regex_fragment + r')'
def brackets_callback(token_class):
def callback(lexer, match, context):
groups = match.groupdict()
opening_chars = groups['delimiter']
n_chars = len(opening_chars)
adverbs = groups.get('adverbs')
closer = Perl6Lexer.PERL6_BRACKETS.get(opening_chars[0])
text = context.text
if closer is None: # it's not a mirrored character, which means we
# just need to look for the next occurrence
end_pos = text.find(opening_chars, match.start('delimiter') + n_chars)
else: # we need to look for the corresponding closing character,
# keep nesting in mind
closing_chars = closer * n_chars
nesting_level = 1
search_pos = match.start('delimiter')
while nesting_level > 0:
next_open_pos = text.find(opening_chars, search_pos + n_chars)
next_close_pos = text.find(closing_chars, search_pos + n_chars)
if next_close_pos == -1:
next_close_pos = len(text)
nesting_level = 0
elif next_open_pos != -1 and next_open_pos < next_close_pos:
nesting_level += 1
search_pos = next_open_pos
else: # next_close_pos < next_open_pos
nesting_level -= 1
search_pos = next_close_pos
end_pos = next_close_pos
if end_pos < 0: # if we didn't find a closer, just highlight the
# rest of the text in this class
end_pos = len(text)
if adverbs is not None and re.search(r':to\b', adverbs):
heredoc_terminator = text[match.start('delimiter') + n_chars:end_pos]
end_heredoc = re.search(r'^\s*' + re.escape(heredoc_terminator) +
r'\s*$', text[end_pos:], re.MULTILINE)
if end_heredoc:
end_pos += end_heredoc.end()
else:
end_pos = len(text)
yield match.start(), token_class, text[match.start():end_pos + n_chars]
context.pos = end_pos + n_chars
return callback
def opening_brace_callback(lexer, match, context):
stack = context.stack
yield match.start(), Text, context.text[match.start():match.end()]
context.pos = match.end()
# if we encounter an opening brace and we're one level
# below a token state, it means we need to increment
# the nesting level for braces so we know later when
# we should return to the token rules.
if len(stack) > 2 and stack[-2] == 'token':
context.perl6_token_nesting_level += 1
def closing_brace_callback(lexer, match, context):
stack = context.stack
yield match.start(), Text, context.text[match.start():match.end()]
context.pos = match.end()
# if we encounter a free closing brace and we're one level
# below a token state, it means we need to check the nesting
# level to see if we need to return to the token state.
if len(stack) > 2 and stack[-2] == 'token':
context.perl6_token_nesting_level -= 1
if context.perl6_token_nesting_level == 0:
stack.pop()
def embedded_perl6_callback(lexer, match, context):
context.perl6_token_nesting_level = 1
yield match.start(), Text, context.text[match.start():match.end()]
context.pos = match.end()
context.stack.append('root')
# If you're modifying these rules, be careful if you need to process '{' or '}'
# characters. We have special logic for processing these characters (due to the fact
# that you can nest Perl 6 code in regex blocks), so if you need to process one of
# them, make sure you also process the corresponding one!
tokens = {
'common': [
(r'#[`|=](?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + r'])(?P=first_char)*)',
brackets_callback(Comment.Multiline)),
(r'#[^\n]*$', Comment.Singleline),
(r'^(\s*)=begin\s+(\w+)\b.*?^\1=end\s+\2', Comment.Multiline),
(r'^(\s*)=for.*?\n\s*?\n', Comment.Multiline),
(r'^=.*?\n\s*?\n', Comment.Multiline),
(r'(regex|token|rule)(\s*' + PERL6_IDENTIFIER_RANGE + '+:sym)',
bygroups(Keyword, Name), 'token-sym-brackets'),
(r'(regex|token|rule)(?!' + PERL6_IDENTIFIER_RANGE + ')(\s*' + PERL6_IDENTIFIER_RANGE + '+)?',
bygroups(Keyword, Name), 'pre-token'),
# deal with a special case in the Perl 6 grammar (role q { ... })
(r'(role)(\s+)(q)(\s*)', bygroups(Keyword, Text, Name, Text)),
(_build_word_match(PERL6_KEYWORDS, PERL6_IDENTIFIER_RANGE), Keyword),
(_build_word_match(PERL6_BUILTIN_CLASSES, PERL6_IDENTIFIER_RANGE, suffix='(?::[UD])?'),
Name.Builtin),
(_build_word_match(PERL6_BUILTINS, PERL6_IDENTIFIER_RANGE), Name.Builtin),
# copied from PerlLexer
(r'[$@%&][.^:?=!~]?' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*',
Name.Variable),
(r'\$[!/](?:<<.*?>>|<.*?>|«.*?»)*', Name.Variable.Global),
(r'::\?\w+', Name.Variable.Global),
(r'[$@%&]\*' + PERL6_IDENTIFIER_RANGE + u'+(?:<<.*?>>|<.*?>|«.*?»)*',
Name.Variable.Global),
(r'\$(?:<.*?>)+', Name.Variable),
(r'(?:q|qq|Q)[a-zA-Z]?\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^0-9a-zA-Z:\s])'
r'(?P=first_char)*)', brackets_callback(String)),
# copied from PerlLexer
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'(?i)(\d*(_\d*)*\.\d+(_\d*)*|\d+(_\d*)*\.\d+(_\d*)*)(e[+-]?\d+)?',
Number.Float),
(r'(?i)\d+(_\d*)*e[+-]?\d+(_\d*)*', Number.Float),
(r'\d+(_\d+)*', Number.Integer),
(r'(?<=~~)\s*/(?:\\\\|\\/|.)*?/', String.Regex),
(r'(?<=[=(,])\s*/(?:\\\\|\\/|.)*?/', String.Regex),
(r'm\w+(?=\()', Name),
(r'(?:m|ms|rx)\s*(?P<adverbs>:[\w\s:]+)?\s*(?P<delimiter>(?P<first_char>[^\w:\s])'
r'(?P=first_char)*)', brackets_callback(String.Regex)),
(r'(?:s|ss|tr)\s*(?::[\w\s:]+)?\s*/(?:\\\\|\\/|.)*?/(?:\\\\|\\/|.)*?/',
String.Regex),
(r'<[^\s=].*?\S>', String),
(_build_word_match(PERL6_OPERATORS), Operator),
(r'\w' + PERL6_IDENTIFIER_RANGE + '*', Name),
(r"'(\\\\|\\[^\\]|[^'\\])*'", String),
(r'"(\\\\|\\[^\\]|[^"\\])*"', String),
],
'root': [
include('common'),
(r'\{', opening_brace_callback),
(r'\}', closing_brace_callback),
(r'.+?', Text),
],
'pre-token': [
include('common'),
(r'\{', Text, ('#pop', 'token')),
(r'.+?', Text),
],
'token-sym-brackets': [
(r'(?P<delimiter>(?P<first_char>[' + ''.join(PERL6_BRACKETS) + '])(?P=first_char)*)',
brackets_callback(Name), ('#pop', 'pre-token')),
default(('#pop', 'pre-token')),
],
'token': [
(r'\}', Text, '#pop'),
(r'(?<=:)(?:my|our|state|constant|temp|let).*?;', using(this)),
# make sure that quotes in character classes aren't treated as strings
(r'<(?:[-!?+.]\s*)?\[.*?\]>', String.Regex),
# make sure that '#' characters in quotes aren't treated as comments
(r"(?<!\\)'(\\\\|\\[^\\]|[^'\\])*'", String.Regex),
(r'(?<!\\)"(\\\\|\\[^\\]|[^"\\])*"', String.Regex),
(r'#.*?$', Comment.Singleline),
(r'\{', embedded_perl6_callback),
('.+?', String.Regex),
],
}
def analyse_text(text):
def strip_pod(lines):
in_pod = False
stripped_lines = []
for line in lines:
if re.match(r'^=(?:end|cut)', line):
in_pod = False
elif re.match(r'^=\w+', line):
in_pod = True
elif not in_pod:
stripped_lines.append(line)
return stripped_lines
# XXX handle block comments
lines = text.splitlines()
lines = strip_pod(lines)
text = '\n'.join(lines)
if shebang_matches(text, r'perl6|rakudo|niecza|pugs'):
return True
saw_perl_decl = False
rating = False
# check for my/our/has declarations
if re.search("(?:my|our|has)\s+(?:" + Perl6Lexer.PERL6_IDENTIFIER_RANGE +
"+\s+)?[$@%&(]", text):
rating = 0.8
saw_perl_decl = True
for line in lines:
line = re.sub('#.*', '', line)
if re.match('^\s*$', line):
continue
# match v6; use v6; use v6.0; use v6.0.0;
if re.match('^\s*(?:use\s+)?v6(?:\.\d(?:\.\d)?)?;', line):
return True
# match class, module, role, enum, grammar declarations
class_decl = re.match('^\s*(?:(?P<scope>my|our)\s+)?(?:module|class|role|enum|grammar)', line)
if class_decl:
if saw_perl_decl or class_decl.group('scope') is not None:
return True
rating = 0.05
continue
break
return rating
def __init__(self, **options):
super(Perl6Lexer, self).__init__(**options)
self.encoding = options.get('encoding', 'utf-8')
|
rmcgibbo/msmbuilder
|
refs/heads/master
|
msmbuilder/utils/validation.py
|
3
|
from __future__ import print_function, division, absolute_import
import numpy as np
import mdtraj as md
__all__ = ['list_of_1d', 'check_iter_of_sequences', 'array2d']
def list_of_1d(y):
if not hasattr(y, '__iter__') or len(y) == 0:
raise ValueError('Bad input shape')
if not hasattr(y[0], '__iter__'):
return [np.array(y)]
result = []
for i, x in enumerate(y):
value = np.array(x)
if value.ndim != 1:
raise ValueError(
"Bad input shape. Element %d has shape %s, but "
"should be 1D" % (i, str(value.shape)))
result.append(value)
return result
def check_iter_of_sequences(sequences, allow_trajectory=False, ndim=2,
max_iter=None):
"""Check that ``sequences`` is a iterable of trajectory-like sequences,
suitable as input to ``fit()`` for estimators following the Mixtape
API.
Parameters
----------
sequences : object
The object to check
allow_trajectory : bool
Are ``md.Trajectory``s allowed?
ndim : int
The expected dimensionality of the sequences
max_iter : int, optional
Only check at maximum the first ``max_iter`` entries in ``sequences``.
"""
value = True
for i, X in enumerate(sequences):
if not isinstance(X, np.ndarray):
if (not allow_trajectory) and isinstance(X, md.Trajectory):
value = False
break
if not isinstance(X, md.Trajectory) and X.ndim != ndim:
value = False
break
if max_iter is not None and i >= max_iter:
break
if not value:
raise ValueError('sequences must be a list of sequences')
def array2d(X, dtype=None, order=None, copy=False, force_all_finite=True):
"""Returns at least 2-d array with data from X"""
X_2d = np.asarray(np.atleast_2d(X), dtype=dtype, order=order)
if force_all_finite:
_assert_all_finite(X_2d)
if X is X_2d and copy:
X_2d = _safe_copy(X_2d)
return X_2d
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def _safe_copy(X):
# Copy, but keep the order
return np.copy(X, order='K')
|
xuru/bowling
|
refs/heads/master
|
app/resources/__init__.py
|
1
|
from .roll import *
from .game import *
|
gangadhar-kadam/nassimapp
|
refs/heads/master
|
selling/doctype/customer/customer.py
|
7
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.model.doc import Document, make_autoname
from webnotes import msgprint, _
import webnotes.defaults
from utilities.transaction_base import TransactionBase
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def autoname(self):
cust_master_name = webnotes.defaults.get_global_default('cust_master_name')
if cust_master_name == 'Customer Name':
if webnotes.conn.exists("Supplier", self.doc.customer_name):
msgprint(_("A Supplier exists with same name"), raise_exception=1)
self.doc.name = self.doc.customer_name
else:
self.doc.name = make_autoname(self.doc.naming_series+'.#####')
def get_company_abbr(self):
return webnotes.conn.get_value('Company', self.doc.company, 'abbr')
def get_receivables_group(self):
g = webnotes.conn.sql("select receivables_group from tabCompany where name=%s", self.doc.company)
g = g and g[0][0] or ''
if not g:
msgprint("Update Company master, assign a default group for Receivables")
raise Exception
return g
def validate_values(self):
if webnotes.defaults.get_global_default('cust_master_name') == 'Naming Series' and not self.doc.naming_series:
webnotes.throw("Series is Mandatory.", webnotes.MandatoryError)
def validate(self):
self.validate_values()
def update_lead_status(self):
if self.doc.lead_name:
webnotes.conn.sql("update `tabLead` set status='Converted' where name = %s", self.doc.lead_name)
def update_address(self):
webnotes.conn.sql("""update `tabAddress` set customer_name=%s, modified=NOW()
where customer=%s""", (self.doc.customer_name, self.doc.name))
def update_contact(self):
webnotes.conn.sql("""update `tabContact` set customer_name=%s, modified=NOW()
where customer=%s""", (self.doc.customer_name, self.doc.name))
def create_account_head(self):
if self.doc.company :
abbr = self.get_company_abbr()
if not webnotes.conn.exists("Account", (self.doc.name + " - " + abbr)):
parent_account = self.get_receivables_group()
# create
ac_bean = webnotes.bean({
"doctype": "Account",
'account_name': self.doc.name,
'parent_account': parent_account,
'group_or_ledger':'Ledger',
'company':self.doc.company,
'master_type':'Customer',
'master_name':self.doc.name,
"freeze_account": "No"
})
ac_bean.ignore_permissions = True
ac_bean.insert()
msgprint(_("Account Head") + ": " + ac_bean.doc.name + _(" created"))
else :
msgprint(_("Please Select Company under which you want to create account head"))
def update_credit_days_limit(self):
webnotes.conn.sql("""update tabAccount set credit_days = %s, credit_limit = %s
where master_type='Customer' and master_name = %s""",
(self.doc.credit_days or 0, self.doc.credit_limit or 0, self.doc.name))
def create_lead_address_contact(self):
if self.doc.lead_name:
if not webnotes.conn.get_value("Address", {"lead": self.doc.lead_name, "customer": self.doc.customer}):
webnotes.conn.sql("""update `tabAddress` set customer=%s, customer_name=%s where lead=%s""",
(self.doc.name, self.doc.customer_name, self.doc.lead_name))
lead = webnotes.conn.get_value("Lead", self.doc.lead_name, ["lead_name", "email_id", "phone", "mobile_no"], as_dict=True)
c = Document('Contact')
c.first_name = lead.lead_name
c.email_id = lead.email_id
c.phone = lead.phone
c.mobile_no = lead.mobile_no
c.customer = self.doc.name
c.customer_name = self.doc.customer_name
c.is_primary_contact = 1
try:
c.save(1)
except NameError, e:
pass
def on_update(self):
self.validate_name_with_customer_group()
self.update_lead_status()
self.update_address()
self.update_contact()
# create account head
self.create_account_head()
# update credit days and limit in account
self.update_credit_days_limit()
#create address and contact from lead
self.create_lead_address_contact()
def validate_name_with_customer_group(self):
if webnotes.conn.exists("Customer Group", self.doc.name):
webnotes.msgprint("An Customer Group exists with same name (%s), \
please change the Customer name or rename the Customer Group" %
self.doc.name, raise_exception=1)
def delete_customer_address(self):
addresses = webnotes.conn.sql("""select name, lead from `tabAddress`
where customer=%s""", (self.doc.name,))
for name, lead in addresses:
if lead:
webnotes.conn.sql("""update `tabAddress` set customer=null, customer_name=null
where name=%s""", name)
else:
webnotes.conn.sql("""delete from `tabAddress` where name=%s""", name)
def delete_customer_contact(self):
for contact in webnotes.conn.sql_list("""select name from `tabContact`
where customer=%s""", self.doc.name):
webnotes.delete_doc("Contact", contact)
def delete_customer_account(self):
"""delete customer's ledger if exist and check balance before deletion"""
acc = webnotes.conn.sql("select name from `tabAccount` where master_type = 'Customer' \
and master_name = %s and docstatus < 2", self.doc.name)
if acc:
from webnotes.model import delete_doc
delete_doc('Account', acc[0][0])
def on_trash(self):
self.delete_customer_address()
self.delete_customer_contact()
self.delete_customer_account()
if self.doc.lead_name:
webnotes.conn.sql("update `tabLead` set status='Interested' where name=%s",self.doc.lead_name)
def before_rename(self, olddn, newdn, merge=False):
from accounts.utils import rename_account_for
rename_account_for("Customer", olddn, newdn, merge, self.doc.company)
def after_rename(self, olddn, newdn, merge=False):
set_field = ''
if webnotes.defaults.get_global_default('cust_master_name') == 'Customer Name':
webnotes.conn.set(self.doc, "customer_name", newdn)
self.update_contact()
set_field = ", customer_name=%(newdn)s"
self.update_customer_address(newdn, set_field)
def update_customer_address(self, newdn, set_field):
webnotes.conn.sql("""update `tabAddress` set address_title=%(newdn)s
{set_field} where customer=%(newdn)s"""\
.format(set_field=set_field), ({"newdn": newdn}))
@webnotes.whitelist()
def get_dashboard_info(customer):
if not webnotes.has_permission("Customer", "read", customer):
webnotes.msgprint("No Permission", raise_exception=True)
out = {}
for doctype in ["Opportunity", "Quotation", "Sales Order", "Delivery Note", "Sales Invoice"]:
out[doctype] = webnotes.conn.get_value(doctype,
{"customer": customer, "docstatus": ["!=", 2] }, "count(*)")
billing = webnotes.conn.sql("""select sum(grand_total), sum(outstanding_amount)
from `tabSales Invoice`
where customer=%s
and docstatus = 1
and fiscal_year = %s""", (customer, webnotes.conn.get_default("fiscal_year")))
out["total_billing"] = billing[0][0]
out["total_unpaid"] = billing[0][1]
return out
|
windyuuy/opera
|
refs/heads/master
|
chromium/src/tools/gyp/test/win/vs-macros/as.py
|
332
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-a', dest='platform')
parser.add_option('-o', dest='output')
parser.add_option('-p', dest='path')
(options, args) = parser.parse_args()
f = open(options.output, 'w')
print >>f, 'options', options
print >>f, 'args', args
f.close()
|
jnnk/pyethereum
|
refs/heads/master
|
pyethereum/trie.py
|
1
|
#!/usr/bin/env python
import os
import rlp
import utils
import db
DB = db.DB
def bin_to_nibbles(s):
"""convert string s to nibbles (half-bytes)
>>> bin_to_nibbles("")
[]
>>> bin_to_nibbles("h")
[6, 8]
>>> bin_to_nibbles("he")
[6, 8, 6, 5]
>>> bin_to_nibbles("hello")
[6, 8, 6, 5, 6, 12, 6, 12, 6, 15]
"""
res = []
for x in s:
res += divmod(ord(x), 16)
return res
def nibbles_to_bin(nibbles):
if any(x > 15 or x < 0 for x in nibbles):
raise Exception("nibbles can only be [0,..15]")
if len(nibbles) % 2:
raise Exception("nibbles must be of even numbers")
res = ''
for i in range(0, len(nibbles), 2):
res += chr(16 * nibbles[i] + nibbles[i + 1])
return res
NIBBLE_TERMINATOR = 16
RECORDING = 1
NONE = 0
VERIFYING = -1
class InvalidSPVProof(Exception):
pass
def with_terminator(nibbles):
nibbles = nibbles[:]
if not nibbles or nibbles[-1] != NIBBLE_TERMINATOR:
nibbles.append(NIBBLE_TERMINATOR)
return nibbles
def without_terminator(nibbles):
nibbles = nibbles[:]
if nibbles and nibbles[-1] == NIBBLE_TERMINATOR:
del nibbles[-1]
return nibbles
def adapt_terminator(nibbles, has_terminator):
if has_terminator:
return with_terminator(nibbles)
else:
return without_terminator(nibbles)
def pack_nibbles(nibbles):
"""pack nibbles to binary
:param nibbles: a nibbles sequence. may have a terminator
"""
if nibbles[-1:] == [NIBBLE_TERMINATOR]:
flags = 2
nibbles = nibbles[:-1]
else:
flags = 0
oddlen = len(nibbles) % 2
flags |= oddlen # set lowest bit if odd number of nibbles
if oddlen:
nibbles = [flags] + nibbles
else:
nibbles = [flags, 0] + nibbles
o = ''
for i in range(0, len(nibbles), 2):
o += chr(16 * nibbles[i] + nibbles[i + 1])
return o
def unpack_to_nibbles(bindata):
"""unpack packed binary data to nibbles
:param bindata: binary packed from nibbles
:return: nibbles sequence, may have a terminator
"""
o = bin_to_nibbles(bindata)
flags = o[0]
if flags & 2:
o.append(NIBBLE_TERMINATOR)
if flags & 1 == 1:
o = o[1:]
else:
o = o[2:]
return o
def starts_with(full, part):
''' test whether the items in the part is
the leading items of the full
'''
if len(full) < len(part):
return False
return full[:len(part)] == part
(
NODE_TYPE_BLANK,
NODE_TYPE_LEAF,
NODE_TYPE_EXTENSION,
NODE_TYPE_BRANCH
) = tuple(range(4))
def is_key_value_type(node_type):
return node_type in [NODE_TYPE_LEAF,
NODE_TYPE_EXTENSION]
BLANK_NODE = ''
BLANK_ROOT = ''
class Trie(object):
proof_mode = 0
def __init__(self, dbfile, root_hash=BLANK_ROOT):
'''it also present a dictionary like interface
:param dbfile: key value database
:root: blank or trie node in form of [key, value] or [v0,v1..v15,v]
'''
if isinstance(dbfile, str):
dbfile = os.path.abspath(dbfile)
self.db = DB(dbfile)
else:
self.db = dbfile # Pass in a database object directly
self.set_root_hash(root_hash)
self.proof_mode = 0
self.proof_nodes = []
# For SPV proof production/verification purposes
def spv_check(self, node):
if not self.proof_mode:
pass
elif self.proof_mode == RECORDING:
self.proof_nodes.append(node)
elif self.proof_mode == VERIFYING:
if node not in self.proof_nodes:
raise InvalidSPVProof("Proof invalid!")
@property
def root_hash(self):
'''always empty or a 32 bytes string
'''
return self.get_root_hash()
def get_root_hash(self):
if self.root_node == BLANK_NODE:
return BLANK_ROOT
assert isinstance(self.root_node, list)
val = rlp.encode(self.root_node)
key = utils.sha3(val)
self.db.put(key, val)
self.spv_check(self.root_node)
return key
@root_hash.setter
def root_hash(self, value):
self.set_root_hash(value)
def set_root_hash(self, root_hash):
if root_hash == BLANK_ROOT:
self.root_node = BLANK_NODE
return
assert isinstance(root_hash, (str, unicode))
assert len(root_hash) in [0, 32]
self.root_node = self._decode_to_node(root_hash)
def clear(self):
''' clear all tree data
'''
self._delete_child_storage(self.root_node)
self._delete_node_storage(self.root_node)
self.root_node = BLANK_NODE
def _delete_child_storage(self, node):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BRANCH:
for item in node[:16]:
self._delete_child_storage(self._decode_to_node(item))
elif is_key_value_type(node_type):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_EXTENSION:
self._delete_child_storage(self._decode_to_node(node[1]))
def _encode_node(self, node):
if node == BLANK_NODE:
return BLANK_NODE
assert isinstance(node, list)
rlpnode = rlp.encode(node)
if len(rlpnode) < 32:
return node
hashkey = utils.sha3(rlpnode)
self.db.put(hashkey, rlpnode)
self.spv_check(node)
return hashkey
def _decode_to_node(self, encoded):
if encoded == BLANK_NODE:
return BLANK_NODE
if isinstance(encoded, list):
return encoded
o = rlp.decode(self.db.get(encoded))
self.spv_check(o)
return o
def _get_node_type(self, node):
''' get node type and content
:param node: node in form of list, or BLANK_NODE
:return: node type
'''
if node == BLANK_NODE:
return NODE_TYPE_BLANK
if len(node) == 2:
nibbles = unpack_to_nibbles(node[0])
has_terminator = (nibbles and nibbles[-1] == NIBBLE_TERMINATOR)
return NODE_TYPE_LEAF if has_terminator\
else NODE_TYPE_EXTENSION
if len(node) == 17:
return NODE_TYPE_BRANCH
def _get(self, node, key):
""" get value inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
:return:
BLANK_NODE if does not exist, otherwise value or hash
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
# already reach the expected node
if not key:
return node[-1]
sub_node = self._decode_to_node(node[key[0]])
return self._get(sub_node, key[1:])
# key value node
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
return node[1] if key == curr_key else BLANK_NODE
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
if starts_with(key, curr_key):
sub_node = self._decode_to_node(node[1])
return self._get(sub_node, key[len(curr_key):])
else:
return BLANK_NODE
def _update(self, node, key, value):
""" update item inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
.. note:: key may be []
:param value: value string
:return: new node
if this node is changed to a new node, it's parent will take the
responsibility to *store* the new node storage, and delete the old
node storage
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return [pack_nibbles(with_terminator(key)), value]
elif node_type == NODE_TYPE_BRANCH:
if not key:
node[-1] = value
else:
new_node = self._update_and_delete_storage(
self._decode_to_node(node[key[0]]),
key[1:], value)
node[key[0]] = self._encode_node(new_node)
return node
elif is_key_value_type(node_type):
return self._update_kv_node(node, key, value)
def _update_and_delete_storage(self, node, key, value):
old_node = node[:]
new_node = self._update(node, key, value)
if old_node != new_node:
self._delete_node_storage(old_node)
return new_node
def _update_kv_node(self, node, key, value):
node_type = self._get_node_type(node)
curr_key = without_terminator(unpack_to_nibbles(node[0]))
is_inner = node_type == NODE_TYPE_EXTENSION
# find longest common prefix
prefix_length = 0
for i in range(min(len(curr_key), len(key))):
if key[i] != curr_key[i]:
break
prefix_length = i + 1
remain_key = key[prefix_length:]
remain_curr_key = curr_key[prefix_length:]
if remain_key == [] == remain_curr_key:
if not is_inner:
return [node[0], value]
new_node = self._update_and_delete_storage(
self._decode_to_node(node[1]), remain_key, value)
elif remain_curr_key == []:
if is_inner:
new_node = self._update_and_delete_storage(
self._decode_to_node(node[1]), remain_key, value)
else:
new_node = [BLANK_NODE] * 17
new_node[-1] = node[1]
new_node[remain_key[0]] = self._encode_node([
pack_nibbles(with_terminator(remain_key[1:])),
value
])
else:
new_node = [BLANK_NODE] * 17
if len(remain_curr_key) == 1 and is_inner:
new_node[remain_curr_key[0]] = node[1]
else:
new_node[remain_curr_key[0]] = self._encode_node([
pack_nibbles(
adapt_terminator(remain_curr_key[1:], not is_inner)
),
node[1]
])
if remain_key == []:
new_node[-1] = value
else:
new_node[remain_key[0]] = self._encode_node([
pack_nibbles(with_terminator(remain_key[1:])), value
])
if prefix_length:
# create node for key prefix
return [pack_nibbles(curr_key[:prefix_length]),
self._encode_node(new_node)]
else:
return new_node
def _getany(self, node, reverse=False, path=[]):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return None
if node_type == NODE_TYPE_BRANCH:
if node[16]:
return [16]
scan_range = range(16)
if reverse:
scan_range.reverse()
for i in scan_range:
o = self._getany(self._decode_to_node(node[i]), path=path+[i])
if o:
return [i] + o
return None
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
return curr_key
if node_type == NODE_TYPE_EXTENSION:
curr_key = without_terminator(unpack_to_nibbles(node[0]))
sub_node = self._decode_to_node(node[1])
return self._getany(sub_node, path=path+curr_key)
def _iter(self, node, key, reverse=False, path=[]):
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return None
elif node_type == NODE_TYPE_BRANCH:
if len(key):
sub_node = self._decode_to_node(node[key[0]])
o = self._iter(sub_node, key[1:], reverse, path+[key[0]])
if o:
return [key[0]] + o
if reverse:
scan_range = range(key[0] if len(key) else 0)
else:
scan_range = range(key[0]+1 if len(key) else 0, 16)
for i in scan_range:
sub_node = self._decode_to_node(node[i])
o = self._getany(sub_node, reverse, path+[i])
if o:
return [i] + o
if reverse and node[16]:
return [16]
return None
descend_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
if reverse:
return descend_key if descend_key < key else None
else:
return descend_key if descend_key > key else None
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
sub_node = self._decode_to_node(node[1])
sub_key = key[len(descend_key):]
if starts_with(key, descend_key):
o = self._iter(sub_node, sub_key, reverse, path + descend_key)
elif descend_key > key[:len(descend_key)] and not reverse:
o = self._getany(sub_node, sub_key, False, path + descend_key)
elif descend_key < key[:len(descend_key)] and reverse:
o = self._getany(sub_node, sub_key, True, path + descend_key)
else:
o = None
return descend_key + o if o else None
def next(self, key):
key = bin_to_nibbles(key)
o = self._iter(self.root_node, key)
return nibbles_to_bin(o) if o else None
def prev(self, key):
key = bin_to_nibbles(key)
o = self._iter(self.root_node, key, reverse=True)
return nibbles_to_bin(o) if o else None
def _delete_node_storage(self, node):
'''delete storage
:param node: node in form of list, or BLANK_NODE
'''
if node == BLANK_NODE:
return
assert isinstance(node, list)
encoded = self._encode_node(node)
if len(encoded) < 32:
return
"""
===== FIXME ====
in the current trie implementation two nodes can share identical subtrees
thus we can not safely delete nodes for now
"""
#self.db.delete(encoded) # FIXME
def _delete(self, node, key):
""" update item inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
.. note:: key may be []
:return: new node
if this node is changed to a new node, it's parent will take the
responsibility to *store* the new node storage, and delete the old
node storage
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
return self._delete_branch_node(node, key)
if is_key_value_type(node_type):
return self._delete_kv_node(node, key)
def _normalize_branch_node(self, node):
'''node should have only one item changed
'''
not_blank_items_count = sum(1 for x in range(17) if node[x])
assert not_blank_items_count >= 1
if not_blank_items_count > 1:
return node
# now only one item is not blank
not_blank_index = [i for i, item in enumerate(node) if item][0]
# the value item is not blank
if not_blank_index == 16:
return [pack_nibbles(with_terminator([])), node[16]]
# normal item is not blank
sub_node = self._decode_to_node(node[not_blank_index])
sub_node_type = self._get_node_type(sub_node)
if is_key_value_type(sub_node_type):
# collape subnode to this node, not this node will have same
# terminator with the new sub node, and value does not change
new_key = [not_blank_index] + \
unpack_to_nibbles(sub_node[0])
return [pack_nibbles(new_key), sub_node[1]]
if sub_node_type == NODE_TYPE_BRANCH:
return [pack_nibbles([not_blank_index]),
self._encode_node(sub_node)]
assert False
def _delete_and_delete_storage(self, node, key):
old_node = node[:]
new_node = self._delete(node, key)
if old_node != new_node:
self._delete_node_storage(old_node)
return new_node
def _delete_branch_node(self, node, key):
# already reach the expected node
if not key:
node[-1] = BLANK_NODE
return self._normalize_branch_node(node)
encoded_new_sub_node = self._encode_node(
self._delete_and_delete_storage(
self._decode_to_node(node[key[0]]), key[1:])
)
if encoded_new_sub_node == node[key[0]]:
return node
node[key[0]] = encoded_new_sub_node
if encoded_new_sub_node == BLANK_NODE:
return self._normalize_branch_node(node)
return node
def _delete_kv_node(self, node, key):
node_type = self._get_node_type(node)
assert is_key_value_type(node_type)
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if not starts_with(key, curr_key):
# key not found
return node
if node_type == NODE_TYPE_LEAF:
return BLANK_NODE if key == curr_key else node
# for inner key value type
new_sub_node = self._delete_and_delete_storage(
self._decode_to_node(node[1]), key[len(curr_key):])
if self._encode_node(new_sub_node) == node[1]:
return node
# new sub node is BLANK_NODE
if new_sub_node == BLANK_NODE:
return BLANK_NODE
assert isinstance(new_sub_node, list)
# new sub node not blank, not value and has changed
new_sub_node_type = self._get_node_type(new_sub_node)
if is_key_value_type(new_sub_node_type):
# collape subnode to this node, not this node will have same
# terminator with the new sub node, and value does not change
new_key = curr_key + unpack_to_nibbles(new_sub_node[0])
return [pack_nibbles(new_key), new_sub_node[1]]
if new_sub_node_type == NODE_TYPE_BRANCH:
return [pack_nibbles(curr_key), self._encode_node(new_sub_node)]
# should be no more cases
assert False
def delete(self, key):
'''
:param key: a string with length of [0, 32]
'''
if not isinstance(key, (str, unicode)):
raise Exception("Key must be string")
if len(key) > 32:
raise Exception("Max key length is 32")
self.root_node = self._delete_and_delete_storage(
self.root_node,
bin_to_nibbles(str(key)))
self.get_root_hash()
def _get_size(self, node):
'''Get counts of (key, value) stored in this and the descendant nodes
:param node: node in form of list, or BLANK_NODE
'''
if node == BLANK_NODE:
return 0
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
value_is_node = node_type == NODE_TYPE_EXTENSION
if value_is_node:
return self._get_size(self._decode_to_node(node[1]))
else:
return 1
elif node_type == NODE_TYPE_BRANCH:
sizes = [self._get_size(self._decode_to_node(node[x]))
for x in range(16)]
sizes = sizes + [1 if node[-1] else 0]
return sum(sizes)
def _to_dict(self, node):
'''convert (key, value) stored in this and the descendant nodes
to dict items.
:param node: node in form of list, or BLANK_NODE
.. note::
Here key is in full form, rather than key of the individual node
'''
if node == BLANK_NODE:
return {}
node_type = self._get_node_type(node)
if is_key_value_type(node_type):
nibbles = without_terminator(unpack_to_nibbles(node[0]))
key = '+'.join([str(x) for x in nibbles])
if node_type == NODE_TYPE_EXTENSION:
sub_dict = self._to_dict(self._decode_to_node(node[1]))
else:
sub_dict = {str(NIBBLE_TERMINATOR): node[1]}
# prepend key of this node to the keys of children
res = {}
for sub_key, sub_value in sub_dict.iteritems():
full_key = '{0}+{1}'.format(key, sub_key).strip('+')
res[full_key] = sub_value
return res
elif node_type == NODE_TYPE_BRANCH:
res = {}
for i in range(16):
sub_dict = self._to_dict(self._decode_to_node(node[i]))
for sub_key, sub_value in sub_dict.iteritems():
full_key = '{0}+{1}'.format(i, sub_key).strip('+')
res[full_key] = sub_value
if node[16]:
res[str(NIBBLE_TERMINATOR)] = node[-1]
return res
def to_dict(self):
d = self._to_dict(self.root_node)
res = {}
for key_str, value in d.iteritems():
if key_str:
nibbles = [int(x) for x in key_str.split('+')]
else:
nibbles = []
key = nibbles_to_bin(without_terminator(nibbles))
res[key] = value
return res
def get(self, key):
return self._get(self.root_node, bin_to_nibbles(str(key)))
def __len__(self):
return self._get_size(self.root_node)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
return self.update(key, value)
def __delitem__(self, key):
return self.delete(key)
def __iter__(self):
return iter(self.to_dict())
def __contains__(self, key):
return self.get(key) != BLANK_NODE
def update(self, key, value):
'''
:param key: a string
:value: a string
'''
if not isinstance(key, (str, unicode)):
raise Exception("Key must be string")
# if len(key) > 32:
# raise Exception("Max key length is 32")
if not isinstance(value, (str, unicode)):
raise Exception("Value must be string")
# if value == '':
# return self.delete(key)
self.root_node = self._update_and_delete_storage(
self.root_node,
bin_to_nibbles(str(key)),
value)
self.get_root_hash()
def root_hash_valid(self):
if self.root_hash == BLANK_ROOT:
return True
return self.root_hash in self.db
def produce_spv_proof(self, key):
self.proof_mode = RECORDING
self.proof_nodes = [self.root_node]
self.get(key)
self.proof_mode = NONE
o = self.proof_nodes
self.proof_nodes = []
return o
def verify_spv_proof(root, key, proof):
t = Trie(db.EphemDB())
t.proof_mode = VERIFYING
t.proof_nodes = proof
for i, node in enumerate(proof):
R = rlp.encode(node)
H = utils.sha3(R)
t.db.put(H, R)
try:
t.root_hash = root
t.get(key)
return True
except Exception, e:
print e
return False
if __name__ == "__main__":
import sys
def encode_node(nd):
if isinstance(nd, str):
return nd.encode('hex')
else:
return rlp.encode(nd).encode('hex')
if len(sys.argv) >= 2:
if sys.argv[1] == 'insert':
t = Trie(sys.argv[2], sys.argv[3].decode('hex'))
t.update(sys.argv[4], sys.argv[5])
print encode_node(t.root_hash)
elif sys.argv[1] == 'get':
t = Trie(sys.argv[2], sys.argv[3].decode('hex'))
print t.get(sys.argv[4])
|
eharney/cinder
|
refs/heads/master
|
cinder/tests/unit/image/fake.py
|
1
|
# Copyright 2011 Justin Santa Barbara
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of a fake image service."""
import copy
import datetime
import mock
import uuid
from cinder import exception
import cinder.image.glance
from cinder.tests.unit import fake_constants
class _FakeImageService(object):
"""Mock (fake) image service for unit testing."""
def __init__(self):
self.images = {}
# NOTE(justinsb): The OpenStack API can't upload an image?
# So, make sure we've got one..
timestamp = datetime.datetime(2011, 1, 1, 1, 2, 3)
image1 = {'id': fake_constants.IMAGE_ID,
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'private',
'protected': False,
'container_format': 'raw',
'disk_format': 'raw',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64'},
'size': 12345678}
image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'},
'size': 1}
image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': None,
'disk_format': None,
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'},
'size': 1000000000000}
image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel'},
'size': 20000000}
image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77',
'name': 'fakeimage123456',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'size': 1024,
'status': 'active',
'visibility': 'public',
'protected': True,
'container_format': 'ami',
'disk_format': 'ami',
'properties': {
'kernel_id': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'ramdisk_id': None},
'size': 50000}
image6 = {'id': 'a440c04b-79fa-479c-bed1-0b816eaec379',
'name': 'fakeimage6',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'False'},
'size': 7777777}
image7 = {'id': '70a599e0-31e7-49b7-b260-868f441e862b',
'name': 'fakeimage7',
'created_at': timestamp,
'updated_at': timestamp,
'deleted_at': None,
'deleted': False,
'status': 'active',
'visibility': 'public',
'protected': False,
'container_format': 'ova',
'disk_format': 'vhd',
'properties': {'kernel_id': 'nokernel',
'ramdisk_id': 'nokernel',
'architecture': 'x86_64',
'auto_disk_config': 'True'},
'size': 1234000000}
self.create(None, image1)
self.create(None, image2)
self.create(None, image3)
self.create(None, image4)
self.create(None, image5)
self.create(None, image6)
self.create(None, image7)
self._imagedata = {}
self.temp_images = mock.MagicMock()
super(_FakeImageService, self).__init__()
# TODO(bcwaldon): implement optional kwargs such as limit, sort_dir
def detail(self, context, **kwargs):
"""Return list of detailed image information."""
return copy.deepcopy(self.images.values())
def download(self, context, image_id, data):
self.show(context, image_id)
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
"""Get data about specified image.
Returns a dict containing image data for the given opaque image id.
"""
image = self.images.get(str(image_id))
if image:
return copy.deepcopy(image)
raise exception.ImageNotFound(image_id=image_id)
def create(self, context, metadata, data=None):
"""Store the image data and return the new image id.
:raises Duplicate: if the image already exist.
"""
image_id = str(metadata.get('id', uuid.uuid4()))
metadata['id'] = image_id
if image_id in self.images:
raise exception.Duplicate()
self.images[image_id] = copy.deepcopy(metadata)
if data:
self._imagedata[image_id] = data.read()
return self.images[image_id]
def update(self, context, image_id, metadata, data=None,
purge_props=False):
"""Replace the contents of the given image with the new data.
:raises ImageNotFound: if the image does not exist.
"""
if not self.images.get(image_id):
raise exception.ImageNotFound(image_id=image_id)
if purge_props:
self.images[image_id] = copy.deepcopy(metadata)
else:
image = self.images[image_id]
try:
image['properties'].update(metadata.pop('properties'))
except Exception:
pass
image.update(metadata)
return self.images[image_id]
def delete(self, context, image_id):
"""Delete the given image.
:raises ImageNotFound: if the image does not exist.
"""
removed = self.images.pop(image_id, None)
if not removed:
raise exception.ImageNotFound(image_id=image_id)
def get_location(self, context, image_id):
if image_id in self.images:
return 'fake_location'
return None
def add_location(self, context, image_id, url, metadata):
self.update(context, image_id, {'locations': [{'url': url,
'metadata': metadata}]})
return True
_fakeImageService = _FakeImageService()
def FakeImageService():
return _fakeImageService
def FakeImageService_reset():
global _fakeImageService
_fakeImageService = _FakeImageService()
def mock_image_service(testcase):
testcase.mock_object(cinder.image.glance, 'get_remote_image_service',
lambda x, y: (FakeImageService(), y))
testcase.mock_object(cinder.image.glance, 'get_default_image_service',
mock.Mock(side_effect=FakeImageService))
|
ajose01/rethinkdb
|
refs/heads/next
|
external/v8_3.30.33.16/tools/process-heap-prof.py
|
146
|
#!/usr/bin/env python
#
# Copyright 2009 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This is an utility for converting V8 heap logs into .hp files that can
# be further processed using 'hp2ps' tool (bundled with GHC and Valgrind)
# to produce heap usage histograms.
# Sample usage:
# $ ./shell --log-gc script.js
# $ tools/process-heap-prof.py v8.log | hp2ps -c > script-heap-graph.ps
# ('-c' enables color, see hp2ps manual page for more options)
# or
# $ tools/process-heap-prof.py --js-cons-profile v8.log | hp2ps -c > script-heap-graph.ps
# to get JS constructor profile
import csv, sys, time, optparse
def ProcessLogFile(filename, options):
if options.js_cons_profile:
itemname = 'heap-js-cons-item'
else:
itemname = 'heap-sample-item'
first_call_time = None
sample_time = 0.0
sampling = False
try:
logfile = open(filename, 'rb')
try:
logreader = csv.reader(logfile)
print('JOB "v8"')
print('DATE "%s"' % time.asctime(time.localtime()))
print('SAMPLE_UNIT "seconds"')
print('VALUE_UNIT "bytes"')
for row in logreader:
if row[0] == 'heap-sample-begin' and row[1] == 'Heap':
sample_time = float(row[3])/1000.0
if first_call_time == None:
first_call_time = sample_time
sample_time -= first_call_time
print('BEGIN_SAMPLE %.2f' % sample_time)
sampling = True
elif row[0] == 'heap-sample-end' and row[1] == 'Heap':
print('END_SAMPLE %.2f' % sample_time)
sampling = False
elif row[0] == itemname and sampling:
print(row[1]),
if options.count:
print('%d' % (int(row[2]))),
if options.size:
print('%d' % (int(row[3]))),
print
finally:
logfile.close()
except:
sys.exit('can\'t open %s' % filename)
def BuildOptions():
result = optparse.OptionParser()
result.add_option("--js_cons_profile", help="Constructor profile",
default=False, action="store_true")
result.add_option("--size", help="Report object size",
default=False, action="store_true")
result.add_option("--count", help="Report object count",
default=False, action="store_true")
return result
def ProcessOptions(options):
if not options.size and not options.count:
options.size = True
return True
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
sys.exit();
if not args:
print "Missing logfile"
sys.exit();
ProcessLogFile(args[0], options)
if __name__ == '__main__':
sys.exit(Main())
|
dimdung/boto
|
refs/heads/develop
|
tests/unit/kms/__init__.py
|
473
|
# Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
|
UXE/local-edx
|
refs/heads/master
|
common/lib/xmodule/xmodule/tests/test_conditional.py
|
37
|
import json
import unittest
from fs.memoryfs import MemoryFS
from mock import Mock, patch
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.error_module import NonStaffErrorDescriptor
from opaque_keys.edx.locations import SlashSeparatedCourseKey, Location
from xmodule.modulestore.xml import ImportSystem, XMLModuleStore, CourseLocationGenerator
from xmodule.conditional_module import ConditionalDescriptor
from xmodule.tests import DATA_DIR, get_test_system, get_test_descriptor_system
from xmodule.x_module import STUDENT_VIEW
ORG = 'test_org'
COURSE = 'conditional' # name of directory with course data
class DummySystem(ImportSystem):
@patch('xmodule.modulestore.xml.OSFS', lambda directory: MemoryFS())
def __init__(self, load_error_modules):
xmlstore = XMLModuleStore("data_dir", course_dirs=[], load_error_modules=load_error_modules)
super(DummySystem, self).__init__(
xmlstore=xmlstore,
course_id=SlashSeparatedCourseKey(ORG, COURSE, 'test_run'),
course_dir='test_dir',
error_tracker=Mock(),
parent_tracker=Mock(),
load_error_modules=load_error_modules,
)
def render_template(self, template, context):
raise Exception("Shouldn't be called")
class ConditionalFactory(object):
"""
A helper class to create a conditional module and associated source and child modules
to allow for testing.
"""
@staticmethod
def create(system, source_is_error_module=False):
"""
return a dict of modules: the conditional with a single source and a single child.
Keys are 'cond_module', 'source_module', and 'child_module'.
if the source_is_error_module flag is set, create a real ErrorModule for the source.
"""
descriptor_system = get_test_descriptor_system()
# construct source descriptor and module:
source_location = Location("edX", "conditional_test", "test_run", "problem", "SampleProblem", None)
if source_is_error_module:
# Make an error descriptor and module
source_descriptor = NonStaffErrorDescriptor.from_xml(
'some random xml data',
system,
id_generator=CourseLocationGenerator(SlashSeparatedCourseKey('edX', 'conditional_test', 'test_run')),
error_msg='random error message'
)
else:
source_descriptor = Mock()
source_descriptor.location = source_location
source_descriptor.runtime = descriptor_system
source_descriptor.render = lambda view, context=None: descriptor_system.render(source_descriptor, view, context)
# construct other descriptors:
child_descriptor = Mock()
child_descriptor._xmodule.student_view.return_value.content = u'<p>This is a secret</p>'
child_descriptor.student_view = child_descriptor._xmodule.student_view
child_descriptor.displayable_items.return_value = [child_descriptor]
child_descriptor.runtime = descriptor_system
child_descriptor.xmodule_runtime = get_test_system()
child_descriptor.render = lambda view, context=None: descriptor_system.render(child_descriptor, view, context)
child_descriptor.location = source_location.replace(category='html', name='child')
descriptor_system.load_item = {
child_descriptor.location: child_descriptor,
source_location: source_descriptor
}.get
# construct conditional module:
cond_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'attempted': 'true'},
'children': [child_descriptor.location],
})
cond_descriptor = ConditionalDescriptor(
descriptor_system,
field_data,
ScopeIds(None, None, cond_location, cond_location)
)
cond_descriptor.xmodule_runtime = system
system.get_module = lambda desc: desc
cond_descriptor.get_required_module_descriptors = Mock(return_value=[source_descriptor])
# return dict:
return {'cond_module': cond_descriptor,
'source_module': source_descriptor,
'child_module': child_descriptor}
class ConditionalModuleBasicTest(unittest.TestCase):
"""
Make sure that conditional module works, using mocks for
other modules.
"""
def setUp(self):
self.test_system = get_test_system()
def test_icon_class(self):
'''verify that get_icon_class works independent of condition satisfaction'''
modules = ConditionalFactory.create(self.test_system)
for attempted in ["false", "true"]:
for icon_class in ['other', 'problem', 'video']:
modules['source_module'].is_attempted = attempted
modules['child_module'].get_icon_class = lambda: icon_class
self.assertEqual(modules['cond_module'].get_icon_class(), icon_class)
def test_get_html(self):
modules = ConditionalFactory.create(self.test_system)
# because get_test_system returns the repr of the context dict passed to render_template,
# we reverse it here
html = modules['cond_module'].render(STUDENT_VIEW).content
expected = modules['cond_module'].xmodule_runtime.render_template('conditional_ajax.html', {
'ajax_url': modules['cond_module'].xmodule_runtime.ajax_url,
'element_id': u'i4x-edX-conditional_test-conditional-SampleConditional',
'depends': u'i4x-edX-conditional_test-problem-SampleProblem',
})
self.assertEquals(expected, html)
def test_handle_ajax(self):
modules = ConditionalFactory.create(self.test_system)
modules['source_module'].is_attempted = "false"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# now change state of the capa problem to make it completed
modules['source_module'].is_attempted = "true"
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_error_as_source(self):
'''
Check that handle_ajax works properly if the source is really an ErrorModule,
and that the condition is not satisfied.
'''
modules = ConditionalFactory.create(self.test_system, source_is_error_module=True)
ajax = json.loads(modules['cond_module'].handle_ajax('', ''))
modules['cond_module'].save()
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
class ConditionalModuleXmlTest(unittest.TestCase):
"""
Make sure ConditionalModule works, by loading data in from an XML-defined course.
"""
@staticmethod
def get_system(load_error_modules=True):
'''Get a dummy system'''
return DummySystem(load_error_modules)
def setUp(self):
self.test_system = get_test_system()
def get_course(self, name):
"""Get a test course by directory name. If there's more than one, error."""
print "Importing {0}".format(name)
modulestore = XMLModuleStore(DATA_DIR, course_dirs=[name])
courses = modulestore.get_courses()
self.modulestore = modulestore
self.assertEquals(len(courses), 1)
return courses[0]
def test_conditional_module(self):
"""Make sure that conditional module works"""
print "Starting import"
course = self.get_course('conditional_and_poll')
print "Course: ", course
print "id: ", course.id
def inner_get_module(descriptor):
if isinstance(descriptor, Location):
location = descriptor
descriptor = self.modulestore.get_item(location, depth=None)
descriptor.xmodule_runtime = get_test_system()
descriptor.xmodule_runtime.get_module = inner_get_module
return descriptor
# edx - HarvardX
# cond_test - ER22x
location = Location("HarvardX", "ER22x", "2013_Spring", "conditional", "condone")
def replace_urls(text, staticfiles_prefix=None, replace_prefix='/static/', course_namespace=None):
return text
self.test_system.replace_urls = replace_urls
self.test_system.get_module = inner_get_module
module = inner_get_module(location)
print "module: ", module
print "module children: ", module.get_children()
print "module display items (children): ", module.get_display_items()
html = module.render(STUDENT_VIEW).content
print "html type: ", type(html)
print "html: ", html
html_expect = module.xmodule_runtime.render_template(
'conditional_ajax.html',
{
# Test ajax url is just usage-id / handler_name
'ajax_url': '{}/xmodule_handler'.format(location.to_deprecated_string()),
'element_id': u'i4x-HarvardX-ER22x-conditional-condone',
'depends': u'i4x-HarvardX-ER22x-problem-choiceprob'
}
)
self.assertEqual(html, html_expect)
gdi = module.get_display_items()
print "gdi=", gdi
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "ajax: ", ajax
html = ajax['html']
self.assertFalse(any(['This is a secret' in item for item in html]))
# Now change state of the capa problem to make it completed
inner_module = inner_get_module(location.replace(category="problem", name='choiceprob'))
inner_module.attempts = 1
# Save our modifications to the underlying KeyValueStore so they can be persisted
inner_module.save()
ajax = json.loads(module.handle_ajax('', ''))
module.save()
print "post-attempt ajax: ", ajax
html = ajax['html']
self.assertTrue(any(['This is a secret' in item for item in html]))
def test_conditional_module_with_empty_sources_list(self):
"""
If a ConditionalDescriptor is initialized with an empty sources_list, we assert that the sources_list is set
via generating UsageKeys from the values in xml_attributes['sources']
"""
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.sources_list[0],
conditional.location.course_key.make_usage_key_from_deprecated_string(conditional.xml_attributes['sources'])
)
def test_conditional_module_parse_sources(self):
dummy_system = Mock()
dummy_location = Location("edX", "conditional_test", "test_run", "conditional", "SampleConditional", None)
dummy_scope_ids = ScopeIds(None, None, dummy_location, dummy_location)
dummy_field_data = DictFieldData({
'data': '<conditional/>',
'xml_attributes': {'sources': 'i4x://HarvardX/ER22x/poll_question/T15_poll;i4x://HarvardX/ER22x/poll_question/T16_poll'},
'children': None,
})
conditional = ConditionalDescriptor(
dummy_system,
dummy_field_data,
dummy_scope_ids,
)
self.assertEqual(
conditional.parse_sources(conditional.xml_attributes),
['i4x://HarvardX/ER22x/poll_question/T15_poll', 'i4x://HarvardX/ER22x/poll_question/T16_poll']
)
|
tracyjacks/PyMetWeather
|
refs/heads/master
|
pymetweather/get_args.py
|
1
|
import argparse
from configparser import RawConfigParser
from os import mkdir
import os.path
def get_command_line_args():
parser = argparse.ArgumentParser(description=(
'Retreive and display weather forecast from the met '
'office. Default behaviour is to check for updates if '
'the saved forecast is more than ninety minutes old'
))
parser.add_argument(
'-l',
'--location',
help='location of forecast'
)
parser.add_argument(
'-d',
'--dont-update',
dest='dont_update',
action='store_true',
help='do not check for updates'
)
parser.add_argument(
'-q',
'--quiet-update',
dest='quiet_update',
action='store_true',
help='check for updates and quit'
)
return vars(parser.parse_args())
def get_config_args():
cp = RawConfigParser({
'api_key': '',
'datadir': os.path.expanduser('~/.metweather')})
if os.path.isfile(os.path.expanduser('~/.metweatherrc')):
cp.read([os.path.expanduser('~/.metweatherrc')])
args = dict(cp.items('default'))
else:
args = cp.defaults()
if not args['api_key']:
raise Exception("No API key given")
args['datadir'] = os.path.expanduser(args['datadir'])
if not os.path.isdir(args['datadir']):
mkdir(args['datadir'])
return args
|
mccheung/kbengine
|
refs/heads/master
|
kbe/res/scripts/common/Lib/ctypes/test/test_simplesubclasses.py
|
170
|
import unittest
from ctypes import *
class MyInt(c_int):
def __eq__(self, other):
if type(other) != MyInt:
return NotImplementedError
return self.value == other.value
class Test(unittest.TestCase):
def test_compare(self):
self.assertEqual(MyInt(3), MyInt(3))
self.assertNotEqual(MyInt(42), MyInt(43))
def test_ignore_retval(self):
# Test if the return value of a callback is ignored
# if restype is None
proto = CFUNCTYPE(None)
def func():
return (1, "abc", None)
cb = proto(func)
self.assertEqual(None, cb())
def test_int_callback(self):
args = []
def func(arg):
args.append(arg)
return arg
cb = CFUNCTYPE(None, MyInt)(func)
self.assertEqual(None, cb(42))
self.assertEqual(type(args[-1]), MyInt)
cb = CFUNCTYPE(c_int, c_int)(func)
self.assertEqual(42, cb(42))
self.assertEqual(type(args[-1]), int)
def test_int_struct(self):
class X(Structure):
_fields_ = [("x", MyInt)]
self.assertEqual(X().x, MyInt())
s = X()
s.x = MyInt(42)
self.assertEqual(s.x, MyInt(42))
if __name__ == "__main__":
unittest.main()
|
russel1237/scikit-learn
|
refs/heads/master
|
sklearn/covariance/tests/test_graph_lasso.py
|
272
|
""" Test the graph_lasso module.
"""
import sys
import numpy as np
from scipy import linalg
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_less
from sklearn.covariance import (graph_lasso, GraphLasso, GraphLassoCV,
empirical_covariance)
from sklearn.datasets.samples_generator import make_sparse_spd_matrix
from sklearn.externals.six.moves import StringIO
from sklearn.utils import check_random_state
from sklearn import datasets
def test_graph_lasso(random_state=0):
# Sample data from a sparse multivariate normal
dim = 20
n_samples = 100
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.95,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
emp_cov = empirical_covariance(X)
for alpha in (0., .1, .25):
covs = dict()
icovs = dict()
for method in ('cd', 'lars'):
cov_, icov_, costs = graph_lasso(emp_cov, alpha=alpha, mode=method,
return_costs=True)
covs[method] = cov_
icovs[method] = icov_
costs, dual_gap = np.array(costs).T
# Check that the costs always decrease (doesn't hold if alpha == 0)
if not alpha == 0:
assert_array_less(np.diff(costs), 0)
# Check that the 2 approaches give similar results
assert_array_almost_equal(covs['cd'], covs['lars'], decimal=4)
assert_array_almost_equal(icovs['cd'], icovs['lars'], decimal=4)
# Smoke test the estimator
model = GraphLasso(alpha=.25).fit(X)
model.score(X)
assert_array_almost_equal(model.covariance_, covs['cd'], decimal=4)
assert_array_almost_equal(model.covariance_, covs['lars'], decimal=4)
# For a centered matrix, assume_centered could be chosen True or False
# Check that this returns indeed the same result for centered data
Z = X - X.mean(0)
precs = list()
for assume_centered in (False, True):
prec_ = GraphLasso(assume_centered=assume_centered).fit(Z).precision_
precs.append(prec_)
assert_array_almost_equal(precs[0], precs[1])
def test_graph_lasso_iris():
# Hard-coded solution from R glasso package for alpha=1.0
# The iris datasets in R and sklearn do not match in a few places, these
# values are for the sklearn version
cov_R = np.array([
[0.68112222, 0.0, 0.2651911, 0.02467558],
[0.00, 0.1867507, 0.0, 0.00],
[0.26519111, 0.0, 3.0924249, 0.28774489],
[0.02467558, 0.0, 0.2877449, 0.57853156]
])
icov_R = np.array([
[1.5188780, 0.0, -0.1302515, 0.0],
[0.0, 5.354733, 0.0, 0.0],
[-0.1302515, 0.0, 0.3502322, -0.1686399],
[0.0, 0.0, -0.1686399, 1.8123908]
])
X = datasets.load_iris().data
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=1.0, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R)
assert_array_almost_equal(icov, icov_R)
def test_graph_lasso_iris_singular():
# Small subset of rows to test the rank-deficient case
# Need to choose samples such that none of the variances are zero
indices = np.arange(10, 13)
# Hard-coded solution from R glasso package for alpha=0.01
cov_R = np.array([
[0.08, 0.056666662595, 0.00229729713223, 0.00153153142149],
[0.056666662595, 0.082222222222, 0.00333333333333, 0.00222222222222],
[0.002297297132, 0.003333333333, 0.00666666666667, 0.00009009009009],
[0.001531531421, 0.002222222222, 0.00009009009009, 0.00222222222222]
])
icov_R = np.array([
[24.42244057, -16.831679593, 0.0, 0.0],
[-16.83168201, 24.351841681, -6.206896552, -12.5],
[0.0, -6.206896171, 153.103448276, 0.0],
[0.0, -12.499999143, 0.0, 462.5]
])
X = datasets.load_iris().data[indices, :]
emp_cov = empirical_covariance(X)
for method in ('cd', 'lars'):
cov, icov = graph_lasso(emp_cov, alpha=0.01, return_costs=False,
mode=method)
assert_array_almost_equal(cov, cov_R, decimal=5)
assert_array_almost_equal(icov, icov_R, decimal=5)
def test_graph_lasso_cv(random_state=1):
# Sample data from a sparse multivariate normal
dim = 5
n_samples = 6
random_state = check_random_state(random_state)
prec = make_sparse_spd_matrix(dim, alpha=.96,
random_state=random_state)
cov = linalg.inv(prec)
X = random_state.multivariate_normal(np.zeros(dim), cov, size=n_samples)
# Capture stdout, to smoke test the verbose mode
orig_stdout = sys.stdout
try:
sys.stdout = StringIO()
# We need verbose very high so that Parallel prints on stdout
GraphLassoCV(verbose=100, alphas=5, tol=1e-1).fit(X)
finally:
sys.stdout = orig_stdout
# Smoke test with specified alphas
GraphLassoCV(alphas=[0.8, 0.5], tol=1e-1, n_jobs=1).fit(X)
|
HHS-IntroProgramming/Sine-Cosine
|
refs/heads/master
|
sinecosine.py
|
1
|
"""
sinecosine.py
Author: <your name here>
Credit: <list sources used, if any>
Assignment:
In this assignment you must use *list comprehensions* to generate sprites that show the behavior
of certain mathematical functions: sine and cosine.
The sine and cosine functions are provided in the Python math library. These functions are used
to relate *angles* to *rectangular* (x,y) coordinate systems and can be very useful in computer
game design.
Unlike the last assignment using ggame`, this one will not provide any "skeleton" code to fill
in. You should use your submission for the Picture assignment
(https://github.com/HHS-IntroProgramming/Picture) as a reference for starting this assignment.
See:
https://github.com/HHS-IntroProgramming/Sine-Cosine/blob/master/README.md
for a detailed list of requirements for this assignment.
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Displaying-Graphics
for general information on how to use ggame.
https://github.com/HHS-IntroProgramming/Standards-and-Syllabus/wiki/Programmed-Graphics
for general information on using list comprehensions to generate graphics.
http://brythonserver.github.io/ggame/
for detailed information on ggame.
"""
|
Carreau/readthedocs.org
|
refs/heads/master
|
readthedocs/donate/admin.py
|
7
|
from django.contrib import admin
from .models import Supporter
class SupporterAdmin(admin.ModelAdmin):
model = Supporter
raw_id_fields = ('user',)
list_display = ('name', 'email', 'dollars', 'public')
list_filter = ('name', 'email', 'dollars', 'public')
admin.site.register(Supporter, SupporterAdmin)
|
linjoahow/w17g
|
refs/heads/master
|
static/Brython3.1.1-20150328-091302/Lib/antigravity.py
|
917
|
import webbrowser
import hashlib
webbrowser.open("http://xkcd.com/353/")
def geohash(latitude, longitude, datedow):
'''Compute geohash() using the Munroe algorithm.
>>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
37.857713 -122.544543
'''
# http://xkcd.com/426/
h = hashlib.md5(datedow).hexdigest()
p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
|
ageron/tensorflow
|
refs/heads/master
|
tensorflow/contrib/tpu/python/tpu/feature_column.py
|
8
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Stub file to maintain backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=wildcard-import,unused-import
from tensorflow.python.tpu.feature_column import *
# used by tests
from tensorflow.python.tpu.feature_column import _is_running_on_cpu
from tensorflow.python.tpu.feature_column import _record_variable_scope_and_name
from tensorflow.python.tpu.feature_column import _TPU_FC_TO_SCOPE
from tensorflow.python.tpu.feature_column import _TPUBaseEmbeddingColumn
from tensorflow.python.tpu.feature_column import _TPUEmbeddingColumn
from tensorflow.python.tpu.feature_column import _TPUSharedEmbeddingColumn
# pylint: enable=wildcard-import,unused-import
|
astrofrog/glue-vispy-viewers
|
refs/heads/master
|
glue_vispy_viewers/extern/vispy/util/profiler.py
|
21
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# Adapted from PyQtGraph
import sys
from . import ptime
from .. import config
class Profiler(object):
"""Simple profiler allowing directed, hierarchical measurement of time
intervals.
By default, profilers are disabled. To enable profiling, set the
environment variable `VISPYPROFILE` to a comma-separated list of
fully-qualified names of profiled functions.
Calling a profiler registers a message (defaulting to an increasing
counter) that contains the time elapsed since the last call. When the
profiler is about to be garbage-collected, the messages are passed to the
outer profiler if one is running, or printed to stdout otherwise.
If `delayed` is set to False, messages are immediately printed instead.
Example:
def function(...):
profiler = Profiler()
... do stuff ...
profiler('did stuff')
... do other stuff ...
profiler('did other stuff')
# profiler is garbage-collected and flushed at function end
If this function is a method of class C, setting `VISPYPROFILE` to
"C.function" (without the module name) will enable this profiler.
For regular functions, use the qualified name of the function, stripping
only the initial "vispy.." prefix from the module.
"""
_profilers = (config['profile'].split(",") if config['profile'] is not None
else [])
_depth = 0
_msgs = []
# set this flag to disable all or individual profilers at runtime
disable = False
class DisabledProfiler(object):
def __init__(self, *args, **kwds):
pass
def __call__(self, *args):
pass
def finish(self):
pass
def mark(self, msg=None):
pass
_disabled_profiler = DisabledProfiler()
def __new__(cls, msg=None, disabled='env', delayed=True):
"""Optionally create a new profiler based on caller's qualname.
"""
if (disabled is True or
(disabled == 'env' and len(cls._profilers) == 0)):
return cls._disabled_profiler
# determine the qualified name of the caller function
caller_frame = sys._getframe(1)
try:
caller_object_type = type(caller_frame.f_locals["self"])
except KeyError: # we are in a regular function
qualifier = caller_frame.f_globals["__name__"].split(".", 1)[1]
else: # we are in a method
qualifier = caller_object_type.__name__
func_qualname = qualifier + "." + caller_frame.f_code.co_name
if (disabled == 'env' and func_qualname not in cls._profilers and
'all' not in cls._profilers): # don't do anything
return cls._disabled_profiler
# create an actual profiling object
cls._depth += 1
obj = super(Profiler, cls).__new__(cls)
obj._name = msg or func_qualname
obj._delayed = delayed
obj._mark_count = 0
obj._finished = False
obj._firstTime = obj._last_time = ptime.time()
obj._new_msg("> Entering " + obj._name)
return obj
def __call__(self, msg=None, *args):
"""Register or print a new message with timing information.
"""
if self.disable:
return
if msg is None:
msg = str(self._mark_count)
self._mark_count += 1
new_time = ptime.time()
elapsed = (new_time - self._last_time) * 1000
self._new_msg(" " + msg + ": %0.4f ms", *(args + (elapsed,)))
self._last_time = new_time
def mark(self, msg=None):
self(msg)
def _new_msg(self, msg, *args):
msg = " " * (self._depth - 1) + msg
if self._delayed:
self._msgs.append((msg, args))
else:
self.flush()
print(msg % args)
def __del__(self):
self.finish()
def finish(self, msg=None):
"""Add a final message; flush the message list if no parent profiler.
"""
if self._finished or self.disable:
return
self._finished = True
if msg is not None:
self(msg)
self._new_msg("< Exiting %s, total time: %0.4f ms",
self._name, (ptime.time() - self._firstTime) * 1000)
type(self)._depth -= 1
if self._depth < 1:
self.flush()
def flush(self):
if self._msgs:
print("\n".join([m[0] % m[1] for m in self._msgs]))
type(self)._msgs = []
|
ojengwa/oh-mainline
|
refs/heads/master
|
vendor/packages/python-social-auth/social/apps/webpy_app/__init__.py
|
81
|
from social.strategies.utils import set_current_strategy_getter
from social.apps.webpy_app.utils import load_strategy
set_current_strategy_getter(load_strategy)
|
Azure/azure-sdk-for-python
|
refs/heads/sync-eng/common-js-nightly-docs-2-1768-ForTestPipeline
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_08_01/aio/operations/_nat_rules_operations.py
|
1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class NatRulesOperations:
"""NatRulesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
gateway_name: str,
nat_rule_name: str,
**kwargs
) -> "_models.VpnGatewayNatRule":
"""Retrieves the details of a nat ruleGet.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param nat_rule_name: The name of the nat rule.
:type nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VpnGatewayNatRule, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_08_01.models.VpnGatewayNatRule
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGatewayNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnGatewayNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/natRules/{natRuleName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
gateway_name: str,
nat_rule_name: str,
nat_rule_parameters: "_models.VpnGatewayNatRule",
**kwargs
) -> "_models.VpnGatewayNatRule":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGatewayNatRule"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(nat_rule_parameters, 'VpnGatewayNatRule')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VpnGatewayNatRule', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VpnGatewayNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/natRules/{natRuleName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
gateway_name: str,
nat_rule_name: str,
nat_rule_parameters: "_models.VpnGatewayNatRule",
**kwargs
) -> AsyncLROPoller["_models.VpnGatewayNatRule"]:
"""Creates a nat rule to a scalable vpn gateway if it doesn't exist else updates the existing nat
rules.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param nat_rule_name: The name of the nat rule.
:type nat_rule_name: str
:param nat_rule_parameters: Parameters supplied to create or Update a Nat Rule.
:type nat_rule_parameters: ~azure.mgmt.network.v2020_08_01.models.VpnGatewayNatRule
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnGatewayNatRule or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_08_01.models.VpnGatewayNatRule]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VpnGatewayNatRule"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
nat_rule_name=nat_rule_name,
nat_rule_parameters=nat_rule_parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnGatewayNatRule', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/natRules/{natRuleName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
gateway_name: str,
nat_rule_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/natRules/{natRuleName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
gateway_name: str,
nat_rule_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a nat rule.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:param nat_rule_name: The name of the nat rule.
:type nat_rule_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
gateway_name=gateway_name,
nat_rule_name=nat_rule_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
'natRuleName': self._serialize.url("nat_rule_name", nat_rule_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/natRules/{natRuleName}'} # type: ignore
def list_by_vpn_gateway(
self,
resource_group_name: str,
gateway_name: str,
**kwargs
) -> AsyncIterable["_models.ListVpnGatewayNatRulesResult"]:
"""Retrieves all nat rules for a particular virtual wan vpn gateway.
:param resource_group_name: The resource group name of the VpnGateway.
:type resource_group_name: str
:param gateway_name: The name of the gateway.
:type gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListVpnGatewayNatRulesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_08_01.models.ListVpnGatewayNatRulesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListVpnGatewayNatRulesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_vpn_gateway.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'gatewayName': self._serialize.url("gateway_name", gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ListVpnGatewayNatRulesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_vpn_gateway.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/vpnGateways/{gatewayName}/natRules'} # type: ignore
|
CIFASIS/ManFuzzer
|
refs/heads/master
|
values/filegen.py
|
2
|
'''
Created on Dec 30, 2012
@author: Peter
'''
import tempfile
import os
import binascii
import logging
from values.datagen import DataValueGenerator
class FileValueGenerator(object):
'''
Generates random files.
'''
logger = logging.getLogger('filegen')
def __init__(self,meanlen,stdlen):
self.meanlen = meanlen
self.stdlen = stdlen
def generate(self):
dvg = DataValueGenerator(self.meanlen, self.stdlen)
data = dvg.generate()
fp = tempfile.NamedTemporaryFile(delete=False)
fp.write(binascii.a2b_hex(data))
fp.close()
self.logger.debug("Temp file name: %s " % fp.name)
return '"' + os.path.abspath(fp.name) + '"'
|
jlamarque/symfony
|
refs/heads/master
|
vendor/doctrine/orm/docs/en/conf.py
|
2448
|
# -*- coding: utf-8 -*-
#
# Doctrine 2 ORM documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 3 18:10:24 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_exts'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['configurationblock']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Doctrine 2 ORM'
copyright = u'2010-12, Doctrine Project Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2'
# The full version, including alpha/beta/rc tags.
release = '2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'doctrine'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Doctrine2ORMdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation',
u'Doctrine Project Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
primary_domain = "dcorm"
def linkcode_resolve(domain, info):
if domain == 'dcorm':
return 'http://'
return None
|
kamenim/samba
|
refs/heads/master
|
third_party/dnspython/dns/namedict.py
|
99
|
# Copyright (C) 2003-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""DNS name dictionary"""
import dns.name
class NameDict(dict):
"""A dictionary whose keys are dns.name.Name objects.
@ivar max_depth: the maximum depth of the keys that have ever been
added to the dictionary.
@type max_depth: int
"""
def __init__(self, *args, **kwargs):
super(NameDict, self).__init__(*args, **kwargs)
self.max_depth = 0
def __setitem__(self, key, value):
if not isinstance(key, dns.name.Name):
raise ValueError('NameDict key must be a name')
depth = len(key)
if depth > self.max_depth:
self.max_depth = depth
super(NameDict, self).__setitem__(key, value)
def get_deepest_match(self, name):
"""Find the deepest match to I{name} in the dictionary.
The deepest match is the longest name in the dictionary which is
a superdomain of I{name}.
@param name: the name
@type name: dns.name.Name object
@rtype: (key, value) tuple
"""
depth = len(name)
if depth > self.max_depth:
depth = self.max_depth
for i in xrange(-depth, 0):
n = dns.name.Name(name[i:])
if self.has_key(n):
return (n, self[n])
v = self[dns.name.empty]
return (dns.name.empty, v)
|
freedesktop-unofficial-mirror/telepathy__telepathy-qt4-yell
|
refs/heads/master
|
tools/qt4-client-gen.py
|
2
|
#!/usr/bin/python
#
# Copyright (C) 2008 Collabora Limited <http://www.collabora.co.uk>
# Copyright (C) 2008 Nokia Corporation
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from sys import argv
import xml.dom.minidom
import codecs
from getopt import gnu_getopt
from libtpcodegen import NS_TP, get_descendant_text, get_by_path
from libqt4codegen import binding_from_usage, extract_arg_or_member_info, format_docstring, gather_externals, gather_custom_lists, get_headerfile_cmd, get_qt4_name, qt4_identifier_escape
class Generator(object):
def __init__(self, opts):
try:
self.group = opts.get('--group', '')
self.headerfile = opts['--headerfile']
self.implfile = opts['--implfile']
self.namespace = opts['--namespace']
self.typesnamespace = opts['--typesnamespace']
self.realinclude = opts['--realinclude']
self.prettyinclude = opts.get('--prettyinclude')
self.extraincludes = opts.get('--extraincludes', None)
self.mainiface = opts.get('--mainiface', None)
self.must_define = opts.get('--must-define', None)
self.dbus_proxy = opts.get('--dbus-proxy',
'Tp::DBusProxy')
self.visibility = opts.get('--visibility', '')
ifacedom = xml.dom.minidom.parse(opts['--ifacexml'])
specdom = xml.dom.minidom.parse(opts['--specxml'])
except KeyError, k:
assert False, 'Missing required parameter %s' % k.args[0]
self.hs = []
self.bs = []
self.ifacenodes = ifacedom.getElementsByTagName('node')
self.spec, = get_by_path(specdom, "spec")
self.custom_lists = gather_custom_lists(self.spec, self.typesnamespace)
self.externals = gather_externals(self.spec)
def __call__(self):
# Output info header and includes
self.h("""\
/*
* This file contains D-Bus client proxy classes generated by qt4-client-gen.py.
*
* This file can be distributed under the same terms as the specification from
* which it was generated.
*/
""")
if self.must_define:
self.h('\n')
self.h('#ifndef %s\n' % self.must_define)
self.h('#error %s\n' % self.must_define)
self.h('#endif\n')
self.h('\n')
if self.extraincludes:
for include in self.extraincludes.split(','):
self.h('#include %s\n' % include)
self.h("""
#include <QtGlobal>
#include <QString>
#include <QObject>
#include <QVariant>
#include <QDBusPendingReply>
#include <TelepathyQt4/AbstractInterface>
#include <TelepathyQt4/DBusProxy>
#include <TelepathyQt4/Global>
namespace Tp
{
class PendingVariant;
class PendingOperation;
}
""")
if self.must_define:
self.b("""#define %s\n""" % (self.must_define))
self.b("""#include "%s"
""" % self.realinclude)
# Begin namespace
for ns in self.namespace.split('::'):
self.hb("""\
namespace %s
{
""" % ns)
# Output interface proxies
def ifacenodecmp(x, y):
xname, yname = [self.namespace + '::' + node.getAttribute('name').replace('/', '').replace('_', '') + 'Interface' for node in x, y]
if xname == self.mainiface:
return -1
elif yname == self.mainiface:
return 1
else:
return cmp(xname, yname)
self.ifacenodes.sort(cmp=ifacenodecmp)
for ifacenode in self.ifacenodes:
self.do_ifacenode(ifacenode)
# End namespace
self.hb(''.join(['}\n' for ns in self.namespace.split('::')]))
# Add metatype declaration - otherwise QTBUG #2151 might be triggered
for ifacenode in self.ifacenodes:
classname = ifacenode.getAttribute('name').replace('/', '').replace('_', '') + 'Interface'
self.h("Q_DECLARE_METATYPE(" + self.namespace + "::" + classname + "*)\n")
# Write output to files
(codecs.getwriter('utf-8')(open(self.headerfile, 'w'))).write(''.join(self.hs))
(codecs.getwriter('utf-8')(open(self.implfile, 'w'))).write(''.join(self.bs))
def do_ifacenode(self, ifacenode):
# Extract info
name = ifacenode.getAttribute('name').replace('/', '').replace('_', '') + 'Interface'
iface, = get_by_path(ifacenode, 'interface')
dbusname = iface.getAttribute('name')
# Begin class, constructors
self.h("""
/**
* \\class %(name)s
%(headercmd)s\
%(groupcmd)s\
*
* Proxy class providing a 1:1 mapping of the D-Bus interface "%(dbusname)s."
*/
class %(visibility)s %(name)s : public Tp::AbstractInterface
{
Q_OBJECT
public:
/**
* Returns the name of the interface "%(dbusname)s", which this class
* represents.
*
* \\return The D-Bus interface name.
*/
static inline QLatin1String staticInterfaceName()
{
return QLatin1String("%(dbusname)s");
}
/**
* Creates a %(name)s associated with the given object on the session bus.
*
* \\param busName Name of the service the object is on.
* \\param objectPath Path to the object on the service.
* \\param parent Passed to the parent class constructor.
*/
%(name)s(
const QString& busName,
const QString& objectPath,
QObject* parent = 0
);
/**
* Creates a %(name)s associated with the given object on the given bus.
*
* \\param connection The bus via which the object can be reached.
* \\param busName Name of the service the object is on.
* \\param objectPath Path to the object on the service.
* \\param parent Passed to the parent class constructor.
*/
%(name)s(
const QDBusConnection& connection,
const QString& busName,
const QString& objectPath,
QObject* parent = 0
);
""" % {'name' : name,
'headercmd' : get_headerfile_cmd(self.realinclude, self.prettyinclude),
'groupcmd' : self.group and (' * \\ingroup %s\n' % self.group),
'dbusname' : dbusname,
'visibility': self.visibility,
})
self.b("""
%(name)s::%(name)s(const QString& busName, const QString& objectPath, QObject *parent)
: Tp::AbstractInterface(busName, objectPath, staticInterfaceName(), QDBusConnection::sessionBus(), parent)
{
}
%(name)s::%(name)s(const QDBusConnection& connection, const QString& busName, const QString& objectPath, QObject *parent)
: Tp::AbstractInterface(busName, objectPath, staticInterfaceName(), connection, parent)
{
}
""" % {'name' : name})
# Construct from DBusProxy subclass
self.h("""
/**
* Creates a %(name)s associated with the same object as the given proxy.
*
* \\param proxy The proxy to use. It will also be the QObject::parent()
* for this object.
*/
%(name)s(%(dbus_proxy)s *proxy);
""" % {'name' : name,
'dbus_proxy' : self.dbus_proxy})
self.b("""
%(name)s::%(name)s(%(dbus_proxy)s *proxy)
: Tp::AbstractInterface(proxy, staticInterfaceName())
{
}
""" % {'name' : name,
'dbus_proxy' : self.dbus_proxy})
# Main interface
mainiface = self.mainiface or 'Tp::AbstractInterface'
if mainiface != self.namespace + '::' + name:
self.h("""
/**
* Creates a %(name)s associated with the same object as the given proxy.
* Additionally, the created proxy will have the same parent as the given
* proxy.
*
* \\param mainInterface The proxy to use.
*/
explicit %(name)s(const %(mainiface)s& mainInterface);
/**
* Creates a %(name)s associated with the same object as the given proxy.
* However, a different parent object can be specified.
*
* \\param mainInterface The proxy to use.
* \\param parent Passed to the parent class constructor.
*/
%(name)s(const %(mainiface)s& mainInterface, QObject* parent);
""" % {'name' : name,
'mainiface' : mainiface})
self.b("""
%(name)s::%(name)s(const %(mainiface)s& mainInterface)
: Tp::AbstractInterface(mainInterface.service(), mainInterface.path(), staticInterfaceName(), mainInterface.connection(), mainInterface.parent())
{
}
%(name)s::%(name)s(const %(mainiface)s& mainInterface, QObject *parent)
: Tp::AbstractInterface(mainInterface.service(), mainInterface.path(), staticInterfaceName(), mainInterface.connection(), parent)
{
}
""" % {'name' : name,
'mainiface' : mainiface})
# Properties
has_props = False
for prop in get_by_path(iface, 'property'):
# Skip tp:properties
if not prop.namespaceURI:
self.do_prop(prop)
has_props = True
self.h("""
/**
* Request all of the DBus properties on the interface.
*
* \\return A pending variant map which will emit finished when the properties have
* been retrieved.
*/
Tp::PendingVariantMap *requestAllProperties() const
{
return internalRequestAllProperties();
}
""")
# Methods
methods = get_by_path(iface, 'method')
if methods:
self.h("""
public Q_SLOTS:\
""")
for method in methods:
self.do_method(method)
# Signals
signals = get_by_path(iface, 'signal')
if signals:
self.h("""
Q_SIGNALS:\
""")
for signal in signals:
self.do_signal(signal)
# invalidated handler (already a slot in the superclass)
# we can't just use disconnect(this, NULL, NULL, NULL) because
# (a) that would disconnect QObject::destroyed() and other non-D-Bus
# signals, and (b) QtDBus doesn't support that usage anyway (it needs
# specific signals in order to remove its signal match rules)
self.h("""
protected:
virtual void invalidate(Tp::DBusProxy *, const QString &, const QString &);
""")
self.b("""
void %(name)s::invalidate(Tp::DBusProxy *proxy,
const QString &error, const QString &message)
{
""" % {'name' : name})
for signal in signals:
self.do_signal_disconnect(signal)
self.b("""
Tp::AbstractInterface::invalidate(proxy, error, message);
}
""")
# Close class
self.h("""\
};
""")
def do_prop(self, prop):
name = prop.getAttribute('name')
access = prop.getAttribute('access')
gettername = name
settername = None
sig = prop.getAttribute('type')
tptype = prop.getAttributeNS(NS_TP, 'type')
binding = binding_from_usage(sig, tptype, self.custom_lists, (sig, tptype) in self.externals, self.typesnamespace)
if 'write' in access:
settername = 'set' + name
if 'read' in access:
self.h("""
/**
* Asynchronous getter for the remote object property "%(name)s" of type %(val)s.
*
%(docstring)s\
*
* \\return A pending variant which will emit finished when the property has been
* retrieved.
*/
inline Tp::PendingVariant *%(gettername)s() const
{
return internalRequestProperty(QLatin1String("%(name)s"));
}
""" % {'name' : name,
'docstring' : format_docstring(prop, ' * ').replace('*/',
'*/'),
'val' : binding.val,
'name' : name,
'gettername' : 'requestProperty' + name})
if 'write' in access:
self.h("""
/**
* Asynchronous setter for the remote object property "%(name)s" of type %(type)s.
*
%(docstring)s\
*
* \\return A pending operation which will emit finished when the property has been
* set.
*/
inline Tp::PendingOperation *%(settername)s(%(type)s newValue)
{
return internalSetProperty(QLatin1String("%(name)s"), QVariant::fromValue(newValue));
}
""" % {'name' : name,
'docstring' : format_docstring(prop, ' * ').replace('*/',
'*/'),
'type' : binding.val,
'name' : name,
'settername' : 'setProperty' + name})
def do_method(self, method):
name = method.getAttribute('name')
args = get_by_path(method, 'arg')
argnames, argdocstrings, argbindings = extract_arg_or_member_info(args, self.custom_lists, self.externals, self.typesnamespace, ' * ')
inargs = []
outargs = []
for i in xrange(len(args)):
if args[i].getAttribute('direction') == 'out':
outargs.append(i)
else:
inargs.append(i)
assert argnames[i] != None, 'No argument name for input argument at index %d for method %s' % (i, name)
rettypes = ', '.join([argbindings[i].val for i in outargs])
params = ', '.join([argbindings[i].inarg + ' ' + argnames[i] for i in inargs])
if params:
params += ', int timeout = -1'
else:
params = 'int timeout = -1'
self.h("""
/**
* Begins a call to the D-Bus method "%s" on the remote object.
%s\
*
* Note that \\a timeout is ignored as of now. It will be used once
* http://bugreports.qt.nokia.com/browse/QTBUG-11775 is fixed.
*
""" % (name, format_docstring(method, ' * ')))
for i in inargs:
if argdocstrings[i]:
self.h("""\
*
* \\param %s
%s\
""" % (argnames[i], argdocstrings[i]))
self.h("""\
* \\param timeout The timeout in milliseconds.
""")
for i in outargs:
if argdocstrings[i]:
self.h("""\
*
* \\return
%s\
""" % argdocstrings[i])
self.h("""\
*/
inline QDBusPendingReply<%(rettypes)s> %(name)s(%(params)s)
{
if (!invalidationReason().isEmpty()) {
return QDBusPendingReply<%(rettypes)s>(QDBusMessage::createError(
invalidationReason(),
invalidationMessage()
));
}
""" % {'rettypes' : rettypes,
'name' : name,
'params' : params})
if inargs:
self.h("""
QDBusMessage callMessage = QDBusMessage::createMethodCall(this->service(), this->path(),
this->staticInterfaceName(), QLatin1String("%s"));
callMessage << %s;
return this->connection().asyncCall(callMessage, timeout);
}
""" % (name, ' << '.join(['QVariant::fromValue(%s)' % argnames[i] for i in inargs])))
else:
self.h("""
QDBusMessage callMessage = QDBusMessage::createMethodCall(this->service(), this->path(),
this->staticInterfaceName(), QLatin1String("%s"));
return this->connection().asyncCall(callMessage, timeout);
}
""" % name)
def do_signal(self, signal):
name = signal.getAttribute('name')
argnames, argdocstrings, argbindings = extract_arg_or_member_info(get_by_path(signal, 'arg'), self.custom_lists, self.externals, self.typesnamespace, ' * ')
self.h("""
/**
* Represents the signal "%s" on the remote object.
%s\
""" % (name, format_docstring(signal, ' * ')))
for i in xrange(len(argnames)):
assert argnames[i] != None, 'Name missing from argument at index %d for signal %s' % (i, name)
if argdocstrings[i]:
self.h("""\
*
* \\param %s
%s\
""" % (argnames[i], argdocstrings[i]))
self.h("""\
*/
void %s(%s);
""" % (name, ', '.join(['%s %s' % (binding.inarg, name) for binding, name in zip(argbindings, argnames)])))
def do_signal_disconnect(self, signal):
name = signal.getAttribute('name')
_, _, argbindings = extract_arg_or_member_info(get_by_path(signal, 'arg'), self.custom_lists, self.externals, self.typesnamespace, ' * ')
self.b("""\
disconnect(this, SIGNAL(%s(%s)), NULL, NULL);
""" % (name, ', '.join([binding.inarg for binding in argbindings])))
def h(self, str):
self.hs.append(str)
def b(self, str):
self.bs.append(str)
def hb(self, str):
self.h(str)
self.b(str)
if __name__ == '__main__':
options, argv = gnu_getopt(argv[1:], '',
['group=',
'namespace=',
'typesnamespace=',
'headerfile=',
'implfile=',
'ifacexml=',
'specxml=',
'realinclude=',
'prettyinclude=',
'extraincludes=',
'mainiface=',
'must-define=',
'dbus-proxy=',
'visibility='])
Generator(dict(options))()
|
zikzakmedia/python-mediawiki
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
MediaWiki Syntax to HTML
:copyright: (c) 2012 by Raimon Esteve.
:license: GPLv3, see LICENSE for more details
A simple to use python library to access convert wiki syntax
to HTML
'''
from setuptools import setup
import mediawiki
setup(
name = 'mediawiki',
version=mediawiki.__version__,
url='http://github.com/zikzakmedia/python-mediawiki',
license='GPLv3+',
author='Raimon Esteve',
author_email='zikzak@zikzakmedia.com',
description='MediaWiki Syntax to HTML',
packages=[
'mediawiki',
'mediawiki.wikimarkup',
'mediawiki.doc',
],
zip_safe=False,
platforms='any',
install_requires=[
],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
TRKP/android_kernel_samsung_s5312
|
refs/heads/kitkat
|
tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
|
4653
|
# EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
|
zcmarkyoung/node-gyp
|
refs/heads/master
|
gyp/test/make/gyptest-noload.py
|
362
|
#!/usr/bin/env python
# Copyright (c) 2010 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Tests the use of the NO_LOAD flag which makes loading sub .mk files
optional.
"""
# Python 2.5 needs this for the with statement.
from __future__ import with_statement
import os
import TestGyp
test = TestGyp.TestGyp(formats=['make'])
test.run_gyp('all.gyp', chdir='noload')
test.relocate('noload', 'relocate/noload')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload')
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Just sanity test that NO_LOAD=lib doesn't break anything.
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=z'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Make sure we can rebuild without reloading the sub .mk file.
with open('relocate/noload/main.c', 'a') as src_file:
src_file.write("\n")
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
# Change shared.c, but verify that it doesn't get rebuild if we don't load it.
with open('relocate/noload/lib/shared.c', 'w') as shared_file:
shared_file.write(
'#include "shared.h"\n'
'const char kSharedStr[] = "modified";\n'
)
test.build('build/all.gyp', test.ALL, chdir='relocate/noload',
arguments=['NO_LOAD=lib'])
test.run_built_executable('exe', chdir='relocate/noload',
stdout='Hello from shared.c.\n')
test.pass_test()
|
larroy/clearskies_core
|
refs/heads/master
|
tools/gyp/pylib/gyp/MSVSUserFile.py
|
2710
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
awduda/awduda.github.io
|
refs/heads/master
|
venv/lib/python2.7/site-packages/pip/req/req_set.py
|
338
|
from __future__ import absolute_import
from collections import defaultdict
from itertools import chain
import logging
import os
from pip._vendor import pkg_resources
from pip._vendor import requests
from pip.compat import expanduser
from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path,
unpack_url)
from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled,
DistributionNotFound, PreviousBuildDirError,
HashError, HashErrors, HashUnpinned,
DirectoryUrlHashUnsupported, VcsHashUnsupported,
UnsupportedPythonVersion)
from pip.req.req_install import InstallRequirement
from pip.utils import (
display_path, dist_in_usersite, ensure_dir, normalize_path)
from pip.utils.hashes import MissingHashes
from pip.utils.logging import indent_log
from pip.utils.packaging import check_dist_requires_python
from pip.vcs import vcs
from pip.wheel import Wheel
logger = logging.getLogger(__name__)
class Requirements(object):
def __init__(self):
self._keys = []
self._dict = {}
def keys(self):
return self._keys
def values(self):
return [self._dict[key] for key in self._keys]
def __contains__(self, item):
return item in self._keys
def __setitem__(self, key, value):
if key not in self._keys:
self._keys.append(key)
self._dict[key] = value
def __getitem__(self, key):
return self._dict[key]
def __repr__(self):
values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()]
return 'Requirements({%s})' % ', '.join(values)
class DistAbstraction(object):
"""Abstracts out the wheel vs non-wheel prepare_files logic.
The requirements for anything installable are as follows:
- we must be able to determine the requirement name
(or we can't correctly handle the non-upgrade case).
- we must be able to generate a list of run-time dependencies
without installing any additional packages (or we would
have to either burn time by doing temporary isolated installs
or alternatively violate pips 'don't start installing unless
all requirements are available' rule - neither of which are
desirable).
- for packages with setup requirements, we must also be able
to determine their requirements without installing additional
packages (for the same reason as run-time dependencies)
- we must be able to create a Distribution object exposing the
above metadata.
"""
def __init__(self, req_to_install):
self.req_to_install = req_to_install
def dist(self, finder):
"""Return a setuptools Dist object."""
raise NotImplementedError(self.dist)
def prep_for_dist(self):
"""Ensure that we can get a Dist for this requirement."""
raise NotImplementedError(self.dist)
def make_abstract_dist(req_to_install):
"""Factory to make an abstract dist object.
Preconditions: Either an editable req with a source_dir, or satisfied_by or
a wheel link, or a non-editable req with a source_dir.
:return: A concrete DistAbstraction.
"""
if req_to_install.editable:
return IsSDist(req_to_install)
elif req_to_install.link and req_to_install.link.is_wheel:
return IsWheel(req_to_install)
else:
return IsSDist(req_to_install)
class IsWheel(DistAbstraction):
def dist(self, finder):
return list(pkg_resources.find_distributions(
self.req_to_install.source_dir))[0]
def prep_for_dist(self):
# FIXME:https://github.com/pypa/pip/issues/1112
pass
class IsSDist(DistAbstraction):
def dist(self, finder):
dist = self.req_to_install.get_dist()
# FIXME: shouldn't be globally added:
if dist.has_metadata('dependency_links.txt'):
finder.add_dependency_links(
dist.get_metadata_lines('dependency_links.txt')
)
return dist
def prep_for_dist(self):
self.req_to_install.run_egg_info()
self.req_to_install.assert_source_matches_version()
class Installed(DistAbstraction):
def dist(self, finder):
return self.req_to_install.satisfied_by
def prep_for_dist(self):
pass
class RequirementSet(object):
def __init__(self, build_dir, src_dir, download_dir, upgrade=False,
upgrade_strategy=None, ignore_installed=False, as_egg=False,
target_dir=None, ignore_dependencies=False,
force_reinstall=False, use_user_site=False, session=None,
pycompile=True, isolated=False, wheel_download_dir=None,
wheel_cache=None, require_hashes=False,
ignore_requires_python=False):
"""Create a RequirementSet.
:param wheel_download_dir: Where still-packed .whl files should be
written to. If None they are written to the download_dir parameter.
Separate to download_dir to permit only keeping wheel archives for
pip wheel.
:param download_dir: Where still packed archives should be written to.
If None they are not saved, and are deleted immediately after
unpacking.
:param wheel_cache: The pip wheel cache, for passing to
InstallRequirement.
"""
if session is None:
raise TypeError(
"RequirementSet() missing 1 required keyword argument: "
"'session'"
)
self.build_dir = build_dir
self.src_dir = src_dir
# XXX: download_dir and wheel_download_dir overlap semantically and may
# be combined if we're willing to have non-wheel archives present in
# the wheelhouse output by 'pip wheel'.
self.download_dir = download_dir
self.upgrade = upgrade
self.upgrade_strategy = upgrade_strategy
self.ignore_installed = ignore_installed
self.force_reinstall = force_reinstall
self.requirements = Requirements()
# Mapping of alias: real_name
self.requirement_aliases = {}
self.unnamed_requirements = []
self.ignore_dependencies = ignore_dependencies
self.ignore_requires_python = ignore_requires_python
self.successfully_downloaded = []
self.successfully_installed = []
self.reqs_to_cleanup = []
self.as_egg = as_egg
self.use_user_site = use_user_site
self.target_dir = target_dir # set from --target option
self.session = session
self.pycompile = pycompile
self.isolated = isolated
if wheel_download_dir:
wheel_download_dir = normalize_path(wheel_download_dir)
self.wheel_download_dir = wheel_download_dir
self._wheel_cache = wheel_cache
self.require_hashes = require_hashes
# Maps from install_req -> dependencies_of_install_req
self._dependencies = defaultdict(list)
def __str__(self):
reqs = [req for req in self.requirements.values()
if not req.comes_from]
reqs.sort(key=lambda req: req.name.lower())
return ' '.join([str(req.req) for req in reqs])
def __repr__(self):
reqs = [req for req in self.requirements.values()]
reqs.sort(key=lambda req: req.name.lower())
reqs_str = ', '.join([str(req.req) for req in reqs])
return ('<%s object; %d requirement(s): %s>'
% (self.__class__.__name__, len(reqs), reqs_str))
def add_requirement(self, install_req, parent_req_name=None,
extras_requested=None):
"""Add install_req as a requirement to install.
:param parent_req_name: The name of the requirement that needed this
added. The name is used because when multiple unnamed requirements
resolve to the same name, we could otherwise end up with dependency
links that point outside the Requirements set. parent_req must
already be added. Note that None implies that this is a user
supplied requirement, vs an inferred one.
:param extras_requested: an iterable of extras used to evaluate the
environement markers.
:return: Additional requirements to scan. That is either [] if
the requirement is not applicable, or [install_req] if the
requirement is applicable and has just been added.
"""
name = install_req.name
if not install_req.match_markers(extras_requested):
logger.warning("Ignoring %s: markers '%s' don't match your "
"environment", install_req.name,
install_req.markers)
return []
# This check has to come after we filter requirements with the
# environment markers.
if install_req.link and install_req.link.is_wheel:
wheel = Wheel(install_req.link.filename)
if not wheel.supported():
raise InstallationError(
"%s is not a supported wheel on this platform." %
wheel.filename
)
install_req.as_egg = self.as_egg
install_req.use_user_site = self.use_user_site
install_req.target_dir = self.target_dir
install_req.pycompile = self.pycompile
install_req.is_direct = (parent_req_name is None)
if not name:
# url or path requirement w/o an egg fragment
self.unnamed_requirements.append(install_req)
return [install_req]
else:
try:
existing_req = self.get_requirement(name)
except KeyError:
existing_req = None
if (parent_req_name is None and existing_req and not
existing_req.constraint and
existing_req.extras == install_req.extras and not
existing_req.req.specifier == install_req.req.specifier):
raise InstallationError(
'Double requirement given: %s (already in %s, name=%r)'
% (install_req, existing_req, name))
if not existing_req:
# Add requirement
self.requirements[name] = install_req
# FIXME: what about other normalizations? E.g., _ vs. -?
if name.lower() != name:
self.requirement_aliases[name.lower()] = name
result = [install_req]
else:
# Assume there's no need to scan, and that we've already
# encountered this for scanning.
result = []
if not install_req.constraint and existing_req.constraint:
if (install_req.link and not (existing_req.link and
install_req.link.path == existing_req.link.path)):
self.reqs_to_cleanup.append(install_req)
raise InstallationError(
"Could not satisfy constraints for '%s': "
"installation from path or url cannot be "
"constrained to a version" % name)
# If we're now installing a constraint, mark the existing
# object for real installation.
existing_req.constraint = False
existing_req.extras = tuple(
sorted(set(existing_req.extras).union(
set(install_req.extras))))
logger.debug("Setting %s extras to: %s",
existing_req, existing_req.extras)
# And now we need to scan this.
result = [existing_req]
# Canonicalise to the already-added object for the backref
# check below.
install_req = existing_req
if parent_req_name:
parent_req = self.get_requirement(parent_req_name)
self._dependencies[parent_req].append(install_req)
return result
def has_requirement(self, project_name):
name = project_name.lower()
if (name in self.requirements and
not self.requirements[name].constraint or
name in self.requirement_aliases and
not self.requirements[self.requirement_aliases[name]].constraint):
return True
return False
@property
def has_requirements(self):
return list(req for req in self.requirements.values() if not
req.constraint) or self.unnamed_requirements
@property
def is_download(self):
if self.download_dir:
self.download_dir = expanduser(self.download_dir)
if os.path.exists(self.download_dir):
return True
else:
logger.critical('Could not find download directory')
raise InstallationError(
"Could not find or access download directory '%s'"
% display_path(self.download_dir))
return False
def get_requirement(self, project_name):
for name in project_name, project_name.lower():
if name in self.requirements:
return self.requirements[name]
if name in self.requirement_aliases:
return self.requirements[self.requirement_aliases[name]]
raise KeyError("No project with the name %r" % project_name)
def uninstall(self, auto_confirm=False):
for req in self.requirements.values():
if req.constraint:
continue
req.uninstall(auto_confirm=auto_confirm)
req.commit_uninstall()
def prepare_files(self, finder):
"""
Prepare process. Create temp directories, download and/or unpack files.
"""
# make the wheelhouse
if self.wheel_download_dir:
ensure_dir(self.wheel_download_dir)
# If any top-level requirement has a hash specified, enter
# hash-checking mode, which requires hashes from all.
root_reqs = self.unnamed_requirements + self.requirements.values()
require_hashes = (self.require_hashes or
any(req.has_hash_options for req in root_reqs))
if require_hashes and self.as_egg:
raise InstallationError(
'--egg is not allowed with --require-hashes mode, since it '
'delegates dependency resolution to setuptools and could thus '
'result in installation of unhashed packages.')
# Actually prepare the files, and collect any exceptions. Most hash
# exceptions cannot be checked ahead of time, because
# req.populate_link() needs to be called before we can make decisions
# based on link type.
discovered_reqs = []
hash_errors = HashErrors()
for req in chain(root_reqs, discovered_reqs):
try:
discovered_reqs.extend(self._prepare_file(
finder,
req,
require_hashes=require_hashes,
ignore_dependencies=self.ignore_dependencies))
except HashError as exc:
exc.req = req
hash_errors.append(exc)
if hash_errors:
raise hash_errors
def _is_upgrade_allowed(self, req):
return self.upgrade and (
self.upgrade_strategy == "eager" or (
self.upgrade_strategy == "only-if-needed" and req.is_direct
)
)
def _check_skip_installed(self, req_to_install, finder):
"""Check if req_to_install should be skipped.
This will check if the req is installed, and whether we should upgrade
or reinstall it, taking into account all the relevant user options.
After calling this req_to_install will only have satisfied_by set to
None if the req_to_install is to be upgraded/reinstalled etc. Any
other value will be a dist recording the current thing installed that
satisfies the requirement.
Note that for vcs urls and the like we can't assess skipping in this
routine - we simply identify that we need to pull the thing down,
then later on it is pulled down and introspected to assess upgrade/
reinstalls etc.
:return: A text reason for why it was skipped, or None.
"""
# Check whether to upgrade/reinstall this req or not.
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
upgrade_allowed = self._is_upgrade_allowed(req_to_install)
# Is the best version is installed.
best_installed = False
if upgrade_allowed:
# For link based requirements we have to pull the
# tree down and inspect to assess the version #, so
# its handled way down.
if not (self.force_reinstall or req_to_install.link):
try:
finder.find_requirement(
req_to_install, upgrade_allowed)
except BestVersionAlreadyInstalled:
best_installed = True
except DistributionNotFound:
# No distribution found, so we squash the
# error - it will be raised later when we
# re-try later to do the install.
# Why don't we just raise here?
pass
if not best_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
# Figure out a nice message to say why we're skipping this.
if best_installed:
skip_reason = 'already up-to-date'
elif self.upgrade_strategy == "only-if-needed":
skip_reason = 'not upgraded as not directly required'
else:
skip_reason = 'already satisfied'
return skip_reason
else:
return None
def _prepare_file(self,
finder,
req_to_install,
require_hashes=False,
ignore_dependencies=False):
"""Prepare a single requirements file.
:return: A list of additional InstallRequirements to also install.
"""
# Tell user what we are doing for this requirement:
# obtain (editable), skipping, processing (local url), collecting
# (remote url or package name)
if req_to_install.constraint or req_to_install.prepared:
return []
req_to_install.prepared = True
# ###################### #
# # print log messages # #
# ###################### #
if req_to_install.editable:
logger.info('Obtaining %s', req_to_install)
else:
# satisfied_by is only evaluated by calling _check_skip_installed,
# so it must be None here.
assert req_to_install.satisfied_by is None
if not self.ignore_installed:
skip_reason = self._check_skip_installed(
req_to_install, finder)
if req_to_install.satisfied_by:
assert skip_reason is not None, (
'_check_skip_installed returned None but '
'req_to_install.satisfied_by is set to %r'
% (req_to_install.satisfied_by,))
logger.info(
'Requirement %s: %s', skip_reason,
req_to_install)
else:
if (req_to_install.link and
req_to_install.link.scheme == 'file'):
path = url_to_path(req_to_install.link.url)
logger.info('Processing %s', display_path(path))
else:
logger.info('Collecting %s', req_to_install)
with indent_log():
# ################################ #
# # vcs update or unpack archive # #
# ################################ #
if req_to_install.editable:
if require_hashes:
raise InstallationError(
'The editable requirement %s cannot be installed when '
'requiring hashes, because there is no single file to '
'hash.' % req_to_install)
req_to_install.ensure_has_source_dir(self.src_dir)
req_to_install.update_editable(not self.is_download)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
req_to_install.archive(self.download_dir)
req_to_install.check_if_exists()
elif req_to_install.satisfied_by:
if require_hashes:
logger.debug(
'Since it is already installed, we are trusting this '
'package without checking its hash. To ensure a '
'completely repeatable environment, install into an '
'empty virtualenv.')
abstract_dist = Installed(req_to_install)
else:
# @@ if filesystem packages are not marked
# editable in a req, a non deterministic error
# occurs when the script attempts to unpack the
# build directory
req_to_install.ensure_has_source_dir(self.build_dir)
# If a checkout exists, it's unwise to keep going. version
# inconsistencies are logged later, but do not fail the
# installation.
# FIXME: this won't upgrade when there's an existing
# package unpacked in `req_to_install.source_dir`
if os.path.exists(
os.path.join(req_to_install.source_dir, 'setup.py')):
raise PreviousBuildDirError(
"pip can't proceed with requirements '%s' due to a"
" pre-existing build directory (%s). This is "
"likely due to a previous installation that failed"
". pip is being responsible and not assuming it "
"can delete this. Please delete it and try again."
% (req_to_install, req_to_install.source_dir)
)
req_to_install.populate_link(
finder,
self._is_upgrade_allowed(req_to_install),
require_hashes
)
# We can't hit this spot and have populate_link return None.
# req_to_install.satisfied_by is None here (because we're
# guarded) and upgrade has no impact except when satisfied_by
# is not None.
# Then inside find_requirement existing_applicable -> False
# If no new versions are found, DistributionNotFound is raised,
# otherwise a result is guaranteed.
assert req_to_install.link
link = req_to_install.link
# Now that we have the real link, we can tell what kind of
# requirements we have and raise some more informative errors
# than otherwise. (For example, we can raise VcsHashUnsupported
# for a VCS URL rather than HashMissing.)
if require_hashes:
# We could check these first 2 conditions inside
# unpack_url and save repetition of conditions, but then
# we would report less-useful error messages for
# unhashable requirements, complaining that there's no
# hash provided.
if is_vcs_url(link):
raise VcsHashUnsupported()
elif is_file_url(link) and is_dir_url(link):
raise DirectoryUrlHashUnsupported()
if (not req_to_install.original_link and
not req_to_install.is_pinned):
# Unpinned packages are asking for trouble when a new
# version is uploaded. This isn't a security check, but
# it saves users a surprising hash mismatch in the
# future.
#
# file:/// URLs aren't pinnable, so don't complain
# about them not being pinned.
raise HashUnpinned()
hashes = req_to_install.hashes(
trust_internet=not require_hashes)
if require_hashes and not hashes:
# Known-good hashes are missing for this requirement, so
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
hashes = MissingHashes()
try:
download_dir = self.download_dir
# We always delete unpacked sdists after pip ran.
autodelete_unpacked = True
if req_to_install.link.is_wheel \
and self.wheel_download_dir:
# when doing 'pip wheel` we download wheels to a
# dedicated dir.
download_dir = self.wheel_download_dir
if req_to_install.link.is_wheel:
if download_dir:
# When downloading, we only unpack wheels to get
# metadata.
autodelete_unpacked = True
else:
# When installing a wheel, we use the unpacked
# wheel.
autodelete_unpacked = False
unpack_url(
req_to_install.link, req_to_install.source_dir,
download_dir, autodelete_unpacked,
session=self.session, hashes=hashes)
except requests.HTTPError as exc:
logger.critical(
'Could not install requirement %s because '
'of error %s',
req_to_install,
exc,
)
raise InstallationError(
'Could not install requirement %s because '
'of HTTP error %s for URL %s' %
(req_to_install, exc, req_to_install.link)
)
abstract_dist = make_abstract_dist(req_to_install)
abstract_dist.prep_for_dist()
if self.is_download:
# Make a .zip of the source_dir we already created.
if req_to_install.link.scheme in vcs.all_schemes:
req_to_install.archive(self.download_dir)
# req_to_install.req is only avail after unpack for URL
# pkgs repeat check_if_exists to uninstall-on-upgrade
# (#14)
if not self.ignore_installed:
req_to_install.check_if_exists()
if req_to_install.satisfied_by:
if self.upgrade or self.ignore_installed:
# don't uninstall conflict if user install and
# conflict is not user install
if not (self.use_user_site and not
dist_in_usersite(
req_to_install.satisfied_by)):
req_to_install.conflicts_with = \
req_to_install.satisfied_by
req_to_install.satisfied_by = None
else:
logger.info(
'Requirement already satisfied (use '
'--upgrade to upgrade): %s',
req_to_install,
)
# ###################### #
# # parse dependencies # #
# ###################### #
dist = abstract_dist.dist(finder)
try:
check_dist_requires_python(dist)
except UnsupportedPythonVersion as e:
if self.ignore_requires_python:
logger.warning(e.args[0])
else:
req_to_install.remove_temporary_source()
raise
more_reqs = []
def add_req(subreq, extras_requested):
sub_install_req = InstallRequirement(
str(subreq),
req_to_install,
isolated=self.isolated,
wheel_cache=self._wheel_cache,
)
more_reqs.extend(self.add_requirement(
sub_install_req, req_to_install.name,
extras_requested=extras_requested))
# We add req_to_install before its dependencies, so that we
# can refer to it when adding dependencies.
if not self.has_requirement(req_to_install.name):
# 'unnamed' requirements will get added here
self.add_requirement(req_to_install, None)
if not ignore_dependencies:
if (req_to_install.extras):
logger.debug(
"Installing extra requirements: %r",
','.join(req_to_install.extras),
)
missing_requested = sorted(
set(req_to_install.extras) - set(dist.extras)
)
for missing in missing_requested:
logger.warning(
'%s does not provide the extra \'%s\'',
dist, missing
)
available_requested = sorted(
set(dist.extras) & set(req_to_install.extras)
)
for subreq in dist.requires(available_requested):
add_req(subreq, extras_requested=available_requested)
# cleanup tmp src
self.reqs_to_cleanup.append(req_to_install)
if not req_to_install.editable and not req_to_install.satisfied_by:
# XXX: --no-install leads this to report 'Successfully
# downloaded' for only non-editable reqs, even though we took
# action on them.
self.successfully_downloaded.append(req_to_install)
return more_reqs
def cleanup_files(self):
"""Clean up files, remove builds."""
logger.debug('Cleaning up...')
with indent_log():
for req in self.reqs_to_cleanup:
req.remove_temporary_source()
def _to_install(self):
"""Create the installation order.
The installation order is topological - requirements are installed
before the requiring thing. We break cycles at an arbitrary point,
and make no other guarantees.
"""
# The current implementation, which we may change at any point
# installs the user specified things in the order given, except when
# dependencies must come earlier to achieve topological order.
order = []
ordered_reqs = set()
def schedule(req):
if req.satisfied_by or req in ordered_reqs:
return
if req.constraint:
return
ordered_reqs.add(req)
for dep in self._dependencies[req]:
schedule(dep)
order.append(req)
for install_req in self.requirements.values():
schedule(install_req)
return order
def install(self, install_options, global_options=(), *args, **kwargs):
"""
Install everything in this set (after having downloaded and unpacked
the packages)
"""
to_install = self._to_install()
if to_install:
logger.info(
'Installing collected packages: %s',
', '.join([req.name for req in to_install]),
)
with indent_log():
for requirement in to_install:
if requirement.conflicts_with:
logger.info(
'Found existing installation: %s',
requirement.conflicts_with,
)
with indent_log():
requirement.uninstall(auto_confirm=True)
try:
requirement.install(
install_options,
global_options,
*args,
**kwargs
)
except:
# if install did not succeed, rollback previous uninstall
if (requirement.conflicts_with and not
requirement.install_succeeded):
requirement.rollback_uninstall()
raise
else:
if (requirement.conflicts_with and
requirement.install_succeeded):
requirement.commit_uninstall()
requirement.remove_temporary_source()
self.successfully_installed = to_install
|
SanziShi/KVMGT-kernel
|
refs/heads/master
|
tools/perf/util/setup.py
|
989
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = getenv('CFLAGS', '').split()
# switch off several checks (need to be at the end of cflags list)
cflags += ['-fno-strict-aliasing', '-Wno-write-strings', '-Wno-unused-parameter' ]
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
libtraceevent = getenv('LIBTRACEEVENT')
libapikfs = getenv('LIBAPIKFS')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
extra_objects = [libtraceevent, libapikfs],
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
keithlee/shakeAppPyDev
|
refs/heads/master
|
django/db/models/related.py
|
231
|
from django.utils.encoding import smart_unicode
from django.db.models.fields import BLANK_CHOICE_DASH
class BoundRelatedObject(object):
def __init__(self, related_object, field_mapping, original):
self.relation = related_object
self.field_mappings = field_mapping[related_object.name]
def template_name(self):
raise NotImplementedError
def __repr__(self):
return repr(self.__dict__)
class RelatedObject(object):
def __init__(self, parent_model, model, field):
self.parent_model = parent_model
self.model = model
self.opts = model._meta
self.field = field
self.name = '%s:%s' % (self.opts.app_label, self.opts.module_name)
self.var_name = self.opts.object_name.lower()
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH,
limit_to_currently_related=False):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field.
Analogue of django.db.models.fields.Field.get_choices, provided
initially for utilisation by RelatedFilterSpec.
"""
first_choice = include_blank and blank_choice or []
queryset = self.model._default_manager.all()
if limit_to_currently_related:
queryset = queryset.complex_filter(
{'%s__isnull' % self.parent_model._meta.module_name: False})
lst = [(x._get_pk_val(), smart_unicode(x)) for x in queryset]
return first_choice + lst
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
# Defer to the actual field definition for db prep
return self.field.get_db_prep_lookup(lookup_type, value,
connection=connection, prepared=prepared)
def editable_fields(self):
"Get the fields in this class that should be edited inline."
return [f for f in self.opts.fields + self.opts.many_to_many if f.editable and f != self.field]
def __repr__(self):
return "<RelatedObject: %s related to %s>" % (self.name, self.field.name)
def bind(self, field_mapping, original, bound_related_object_class=BoundRelatedObject):
return bound_related_object_class(self, field_mapping, original)
def get_accessor_name(self):
# This method encapsulates the logic that decides what name to give an
# accessor descriptor that retrieves related many-to-one or
# many-to-many objects. It uses the lower-cased object_name + "_set",
# but this can be overridden with the "related_name" option.
if self.field.rel.multiple:
# If this is a symmetrical m2m relation on self, there is no reverse accessor.
if getattr(self.field.rel, 'symmetrical', False) and self.model == self.parent_model:
return None
return self.field.rel.related_name or (self.opts.object_name.lower() + '_set')
else:
return self.field.rel.related_name or (self.opts.object_name.lower())
def get_cache_name(self):
return "_%s_cache" % self.get_accessor_name()
|
opennetworkinglab/OpenVirteX
|
refs/heads/master
|
utils/embedder.py
|
8
|
#!/usr/bin/env python
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
import sys
import json
import threading
import logging as log
import urllib2
from argparse import ArgumentParser
import subprocess
import time
CLONE_VM = '/usr/bin/VBoxManage clonevm OVX --snapshot Master --mode machine --options link --name %s --register'
GET_IP_VM = '/usr/bin/VBoxManage guestcontrol %s execute --image /home/ovx/get-ip.sh --wait-exit --username ovx --password ovx --wait-stdout -- eth0'
START_VM = '/usr/bin/VBoxManage startvm %s --type headless'
#START_VM = '/usr/bin/VBoxManage startvm %s'
STOP_VM = '/usr/bin/VBoxManage controlvm %s poweroff'
UNREGISTER_VM = '/usr/bin/VBoxManage unregistervm %s --delete'
class ERROR_CODE:
PARSE_ERROR = -32700 # Invalid JSON was received by the server.
INVALID_REQ = -32600 # The JSON sent is not a valid Request object.
METHOD_NOT_FOUND = -32601 # The method does not exist / is not available.
INVALID_PARAMS = -32602 # Invalid method parameter(s).
INTERNAL_ERROR = -32603 # Internal JSON-RPC error.
class OVXException(Exception):
def __init__(self, code, msg, tenantId, rollback=False):
self.code = code
self.msg = msg
self.rollback = rollback
self.tenantId = tenantId
def __str__(self):
return '%s (%s)' % (self.msg, self.code)
class EmbedderException(Exception):
def __init__(self, code, msg):
self.code = code
self.msg = msg
def __str__(self):
return '%s (%s)' % (self.msg, self.code)
# Convert dotted hex to long value
def hexToLong(h):
return int(h.replace(':', ''), 16)
# Convert long value to dotted hex value with specified length in bytes
def longToHex(l, length=8):
h = ("%x" % l)
if len(h) % 2 != 0:
h = '0' + h
result = ':'.join([h[i:i+2] for i in range(0, len(h), 2)])
prefix = '00:' * (length - (len(h) / 2) - (len(h) % 2))
return prefix + result
class Routing():
def __init__(self, topology):
try:
self.nodes = topology['switches']
self.links = {}
for link in topology['links']:
src = link['src']
dst = link['dst']
self.links[(src['dpid'], src['port'])] = (dst['dpid'], dst['port'])
self.SP = {}
except:
pass
def _neighbours(self, node):
"""Returns list of nodes that are neighbour to node.
Assumes nodes are connected on at most one port, i.e., multigraphs are not supported (should
delete duplicate entries from result.
"""
return [dst_node for (src_node,src_port),(dst_node,dst_port) in self.links.iteritems() if src_node == node]
def _shortestPath(self, src, dst):
"""Calculates shortest path between src and dst switches and stores it in the SP dict.
Assumes graph is connected.
"""
distance = {}
previous = {}
for node in self.nodes:
distance[node] = sys.maxint
distance[src] = 0
# Sort Q according to distance
Q = sorted(distance, key=distance.get)
while Q:
current = Q.pop(0)
if distance[current] == sys.maxint:
log.error("Graph is disconnected")
# TODO: raise expection
break
for neighbour in self._neighbours(current):
alt = distance[current] + 1
if alt < distance[neighbour]:
distance[neighbour] = alt
previous[neighbour] = current
# TODO: really should use a heap instead of resorting every time
Q = sorted(distance, key=distance.get)
# Path is between current and src (first iteration of outer while: current == src, previous[current] undefined)
x = current
path = []
while previous.get(x) >= 0:
path.append(x)
x = previous[x]
path.append(src)
path.reverse()
self.SP[(src, current)] = path
def _findPorts(self, dpid1, dpid2):
"""Returns tuple (port_out, port_in) with port_out on dpid1 and port_in on dpid2, None if switches are not connected."""
# Iterates over all links in worst case!
for (dpid_out, port_out), (dpid_in, port_in) in self.links.iteritems():
if (dpid1 == dpid_out) and (dpid2 == dpid_in):
return (port_out, port_in)
return None
def _findOutPort(self, dpid1, dpid2):
"""Returns output port on dpid1 that connects to dpid2, None if switches are not connected."""
return self._findPorts(dpid1, dpid2)[0]
def _findInPort(self, dpid1, dpid2):
"""Returns input port on dpid2 that is connected to dpid1, None if switches are not connected."""
return self._findPorts(dpid1, dpid2)[1]
def getRoute(self, dpid_in, dpid_out):
"""Find route between dpid_in and dpid_out.
Route is of form [ dpid ]
"""
# Catch trivial path
if dpid_in == dpid_out:
return [ (dpid_in) ]
# Calculate path
if (dpid_in, dpid_out) not in self.SP.keys():
self._shortestPath(dpid_in, dpid_out)
route = self.SP[(dpid_in, dpid_out)]
return route
def parseRoute(self, route):
"""Parse route specified and return OVX-type path string.
Input route is of form [ dpid ], while return path is of form dpid1/port1-dpid2/port2,...
"""
path = ''
for index in xrange(0, len(route) - 1):
outPort = self._findOutPort(route[index], route[index + 1])
inPort = self._findInPort(route[index], route[index + 1])
path += "%s/%s-%s/%s," % (parseDpid(route[index]), outPort, parseDpid(route[index + 1]), inPort)
# Remove final comma
return path[:-1]
class OVXClient():
def __init__(self, host, port, user, password):
self.host = host
self.port = port
self.user = user
self.password = password
self.base_url = "http://%s:%s/" % (self.host, self.port)
self.tenant_url = self.base_url + 'tenant'
self.status_url = self.base_url + 'status'
def _buildRequest(self, data, url, cmd):
j = { "id" : "ovxembedder", "method" : cmd, "jsonrpc" : "2.0" }
h = {"Content-Type" : "application/json"}
if data is not None:
j['params'] = data
return urllib2.Request(url, json.dumps(j), h)
def _parseResponse(self, data):
j = json.loads(data)
if 'error' in j:
e = OVXException(j['error']['code'], j['error']['message'], -1)
log.error(e)
raise e
return j['result']
def _connect(self, cmd, url, data=None):
log.debug("%s: %s" % (cmd, data))
try:
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
passman.add_password(None, url, self.user, self.password)
authhandler = urllib2.HTTPBasicAuthHandler(passman)
opener = urllib2.build_opener(authhandler)
req = self._buildRequest(data, url, cmd)
ph = opener.open(req)
return self._parseResponse(ph.read())
except urllib2.URLError as e:
log.error(e)
sys.exit(1)
except urllib2.HTTPError as e:
if e.code == 401:
log.error("Authentication failed: invalid password")
# TODO
sys.exit(1)
elif e.code == 504:
log.error("HTTP Error 504: Gateway timeout")
# TODO
sys.exit(1)
else:
log.error(e)
except RuntimeError as e:
log.error(e)
def createNetwork(self, ctrls, net_address, net_mask):
req = {'controllerUrls': ctrls,
'networkAddress': net_address, 'mask': net_mask}
try:
ret = self._connect("createNetwork", self.tenant_url, data=req)
tenantId = ret.get('tenantId')
if tenantId:
log.info("Network with tenantId %s has been created" % tenantId)
return tenantId
except OVXException as e:
e.rollback = False
raise
def removeNetwork(self, tenantId):
req = {'tenantId': tenantId}
try:
ret = self._connect("removeNetwork", self.tenant_url, data=req)
log.info("Network with tenantId %s has been removed" % tenantId)
except OVXException as e:
e.rollback = False
raise
def createSwitch(self, tenantId, dpids, dpid=None):
req = {'tenantId': tenantId, 'dpids': dpids}
if dpid:
req["vdpid"] = dpid
try:
ret = self._connect("createSwitch", self.tenant_url, data=req)
switchId = ret.get('vdpid')
if switchId:
log.info("Switch with switchId %s has been created" % longToHex(switchId))
return switchId
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def createPort(self, tenantId, dpid, port):
req = {'tenantId': tenantId, 'dpid': dpid, 'port': port}
try:
ret = self._connect("createPort", self.tenant_url, data=req)
switchId = ret.get('vdpid')
portId = ret.get('vport')
if switchId and portId:
log.info("Port on switch %s with port number %s has been created" % (longToHex(switchId), portId))
return (switchId, portId)
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def connectLink(self, tenantId, srcDpid, srcPort, dstDpid, dstPort, algorithm, backup_num):
req = {'tenantId': tenantId, 'srcDpid': srcDpid, 'srcPort': srcPort, 'dstDpid': dstDpid, 'dstPort': dstPort, 'algorithm': algorithm, 'backup_num': backup_num}
try:
ret = self._connect("connectLink", self.tenant_url, data=req)
linkId = ret.get('linkId')
if linkId:
log.info("Link with linkId %s has been created" % linkId)
return linkId
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def setLinkPath(self, tenantId, linkId, path, priority):
req = {'tenantId': tenantId, 'linkId': linkId, 'path': path, 'priority': priority}
try:
ret = self._connect("setLinkPath", self.tenant_url, data=req)
if ret:
log.info("Path on link %s has been set" % linkId)
return ret
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def connectHost(self, tenantId, dpid, port, mac):
req = {'tenantId': tenantId, 'vdpid': dpid, 'vport': port, 'mac': mac}
try:
ret = self._connect("connectHost", self.tenant_url, data=req)
hostId = ret.get('hostId')
if hostId:
log.info("Host with hostId %s connected" % hostId)
return hostId
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def connectRoute(self, tenantId, switchId, srcPort, dstPort, path):
req = {'tenantId': tenantId, 'vdpid': switchId, 'srcPort': srcPort, 'dstPort': dstPort, 'path': path}
try:
ret = self._connect("connectRoute", self.tenant_url, data=req)
routeId = reg.get('routeId')
if routeId:
log.info("Route with routeId %s on switch %s between ports (%s,%s) created" % (routeId, switchId, srcPort, dstPort))
return routeId
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def createSwitchRoute(self, tenantId, switchId, srcPort, dstPort, path):
req = {'tenantId': tenantId, 'dpid': switchId, 'srcPort': srcPort, 'dstPort': dstPort, 'path': path}
try:
ret = self._connect("createSwitchRoute", self.tenant_url, data=req)
if ret:
log.info("Route on switch %s between ports (%s,%s) created" % (switchId, srcPort, dstPort))
return ret
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def startNetwork(self, tenantId):
req = {'tenantId': tenantId}
try:
ret = self._connect("startNetwork", self.tenant_url, data=req)
if ret:
log.info("Network with tenantId %s has been started" % tenantId)
return ret
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
def getPhysicalTopology(self):
ret = self._connect("getPhysicalTopology", self.status_url)
try:
if ret:
log.info("Physical network topology received")
return ret
except OVXException as e:
e.rollback = False
raise
def setInternalRouting(self, tenantId, dpid, algorithm, backup_num):
req = {'tenantId': tenantId, 'vdpid': dpid, 'algorithm': algorithm, 'backup_num': backup_num}
try:
ret = self._connect("setInternalRouting", self.tenant_url, data=req)
if ret:
log.info("Internal routing of switch %s has been set to %s" % (longToHex(dpid), algorithm))
return ret
except OVXException as e:
e.rollback = True
e.tenantId = tenantId
raise
class OVXEmbedderHandler(BaseHTTPRequestHandler):
"""
Implementation of JSON-RPC API, defines all API handler methods.
"""
def _buildResponse(self, json_id, result=None, error=None):
"""Returns JSON 2.0 compliant response"""
res = {}
res['jsonrpc'] = '2.0'
# result and error are mutually exclusive
if result is not None:
res['result'] = result
elif error is not None:
res['error'] = error
res['id'] = json_id
return res
def _buildError(self, code, message, data=None):
"""Returns JSON RPC 2.0 error object"""
res = {}
res['code'] = code
res['message'] = message
if data:
res['data'] = data
return res
def doBigSwitchNetwork(self, controller, routing, subnet, hosts):
"""Create OVX network that is a single big switch"""
client = self.server.client
# request physical topology
phyTopo = client.getPhysicalTopology()
# spawn controller if necessary
# TODO: do proper string comparison
if controller['type'] == 'default':
proto = self.server.ctrlProto
host = self.server._spawnController()
port = self.server.ctrlPort
ctrls = ["%s:%s:%s" % (proto, host, port)]
elif controller['type'] == 'custom':
ctrls = controller['ctrls']
else:
raise EmbedderException(ERROR_CODE.INVALID_REQ, 'Unsupported controller type')
# split subnet in netaddress and netmask
(net_address, net_mask) = subnet.split('/')
# create virtual network
tenantId = client.createNetwork(ctrls, net_address, int(net_mask))
# create virtual switch with all physical dpids
dpids = [hexToLong(dpid) for dpid in phyTopo['switches']]
switchId = client.createSwitch(tenantId, dpids)
# set routing algorithm and number of backups
client.setInternalRouting(tenantId, switchId, routing['algorithm'], routing['backup_num'])
# create virtual ports and connect hosts
for host in hosts:
(vdpid, vport) = client.createPort(tenantId, hexToLong(host['dpid']), host['port'])
client.connectHost(tenantId, vdpid, vport, host['mac'])
# Start virtual network
client.startNetwork(tenantId)
return tenantId
def doPhysicalNetwork(self, controller, routing, subnet, hosts, copyDpid = False):
"""Create OVX network that is clone of physical network"""
client = self.server.client
# request physical topology
phyTopo = client.getPhysicalTopology()
# spawn controller if necessary
if controller['type'] == 'default':
proto = self.server.ctrlProto
host = self.server._spawnController()
port = self.server.ctrlPort
ctrls = ["%s:%s:%s" % (proto, host, port)]
elif controller['type'] == 'custom':
ctrls = controller['ctrls']
else:
raise EmbedderException(ERROR_CODE.INVALID_REQ, 'Unsupported controller type')
# split subnet in netaddress and netmask
(net_address, net_mask) = subnet.split('/')
# create virtual network
tenantId = client.createNetwork(ctrls, net_address, int(net_mask))
# create virtual switch per physical dpid
for dpid in phyTopo['switches']:
if copyDpid:
client.createSwitch(tenantId, [hexToLong(dpid)], dpid=hexToLong(dpid))
else:
client.createSwitch(tenantId, [hexToLong(dpid)])
# create virtual ports and connect hosts
for host in hosts:
(vdpid, vport) = client.createPort(tenantId, hexToLong(host['dpid']), host['port'])
client.connectHost(tenantId, vdpid, vport, host['mac'])
# create virtual ports and connect virtual links
connected = []
for link in phyTopo['links']:
if (link['src']['dpid'], link['src']['port']) not in connected:
srcDpid = hexToLong(link['src']['dpid'])
# Type conversions needed because OVX JSON output is stringified
srcPort = int(link['src']['port'])
(srcVDpid, srcVPort) = client.createPort(tenantId, srcDpid, srcPort)
dstDpid = hexToLong(link['dst']['dpid'])
dstPort = int(link['dst']['port'])
(dstVDpid, dstVPort) = client.createPort(tenantId, dstDpid, dstPort)
src = "%s/%s" % (srcDpid, srcPort)
dst = "%s/%s" % (dstDpid, dstPort)
path = "%s-%s" % (src, dst)
client.connectLink(tenantId, srcVDpid, srcVPort, dstVDpid, dstVPort, routing['algorithm'], routing['backup_num'])
connected.append((link['dst']['dpid'], link['dst']['port']))
# boot network
client.startNetwork(tenantId)
return tenantId
def _exec_createNetwork(self, json_id, params):
"""Handler for automated network creation"""
try:
p = params.get('network')
if p == None:
raise EmbedderException(ERROR_CODE.INVALID_REQ, 'Missing network section')
tenantId = -1
networkType = p.get('type')
if networkType == None:
raise EmbedderException(ERROR_CODE.INVALID_REQ, 'Missing network type')
elif networkType == 'bigswitch':
tenantId = self.doBigSwitchNetwork(p['controller'], p['routing'], p['subnet'], p['hosts'])
elif networkType == 'physical':
tenantId = self.doPhysicalNetwork(p['controller'], p['routing'], p['subnet'], p['hosts'], copyDpid=p.get('copy-dpid', False))
else:
raise EmbedderException(ERROR_CODE.INVALID_REQ, 'Unsupported network type')
response = self._buildResponse(json_id, result={ 'tenantId' : tenantId })
except OVXException as e:
if e.rollback:
client = self.server.client
client.removeNetwork(e.tenantId)
err = self._buildError(e.code, e.msg)
response = self._buildResponse(json_id, error=err)
except EmbedderException as e:
log.error(e)
err = self._buildError(e.code, e.msg)
response = self._buildResponse(json_id, error=err)
return response
def do_POST(self):
"""Handle HTTP POST calls"""
def reply(response):
response = json.dumps(response) + '\n'
self.send_response(200, "OK")
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", len(response))
self.end_headers()
self.wfile.write(response)
# Put JSON message in data dict
l = self.headers.get("Content-Length", "")
data = ''
if l == "":
data = self.rfile.read()
else:
data = self.rfile.read(int(l))
try:
data = json.loads(data)
except:
msg = "Error parsing JSON request"
log.error(msg)
err = self._buildError(ERROR_CODE.PARSE_ERROR, msg)
result = self._buildResponse(None, error=err)
# Check if JSONRPC 2.0 compliant (correct version and json_id given)
json_id = data.get('id', None)
# Setup method to call
try:
methodName = "_exec_" + data.get('method')
method = getattr(self, methodName)
log.info(methodName)
except:
msg = "Method not found"
log.info(msg)
err = self._buildError(ERROR_CODE.METHOD_NOT_FOUND, msg)
result = self._buildResponse(json_id, error=err)
# Get method parameters
params = data.get('params', {})
# Call method
result = method(json_id, params)
reply(result)
class OVXEmbedderServer(HTTPServer):
def __init__(self, opts):
HTTPServer.__init__(self, (opts['host'], opts['port']), OVXEmbedderHandler)
self.client = OVXClient(opts['ovxhost'], opts['ovxport'], opts['ovxuser'], opts['ovxpass'])
self.ctrlProto = opts['ctrlproto']
self.ctrlPort = opts['ctrlport']
self.controllers = []
def _spawnController(self):
ctrl = "OVX-%s" % len(self.controllers)
devnull = open('/dev/null', 'w')
log.info("Spawning controller VM %s" % ctrl)
clone_cmd = CLONE_VM % ctrl
subprocess.call(clone_cmd.split(), stdout=devnull, stderr=devnull)
start_cmd = START_VM % ctrl
subprocess.call(start_cmd.split(), stdout=devnull, stderr=devnull)
get_ip_cmd = GET_IP_VM % ctrl
while True:
try:
ret = subprocess.check_output(get_ip_cmd.split(), stderr=devnull)
except subprocess.CalledProcessError:
time.sleep(1)
continue
ip = ret
break
self.controllers.append(ctrl)
log.info("Controller %s ready on %s" % (ctrl, ip))
return ip
def closeControllers(self):
for controller in self.controllers:
stop_cmd = STOP_VM % controller
subprocess.call(stop_cmd.split())
del_cmd = UNREGISTER_VM % controller
subprocess.call(del_cmd.split())
class OVXEmbedder(threading.Thread):
"""
OpenVirteX planner JSON RPC 2.0 server
"""
def __init__(self, opts):
threading.Thread.__init__(self)
self.httpd = OVXEmbedderServer(opts)
self.setDaemon(True)
# Multi-threaded webserver
def run(self):
"""
Main function run by thread
"""
log.info("JSON RPC server starting")
try:
self.httpd.serve_forever()
finally:
self.httpd.server_close()
self.httpd.closeControllers()
if __name__ == '__main__':
parser = ArgumentParser(description="OpenVirteX network embedding tool.")
parser.add_argument('--host', default='localhost', help='OpenVirteX embedder host (default="localhost")')
parser.add_argument('--port', default=8000, type=int, help='OpenVirteX embedder port (default=8000)')
parser.add_argument('--ovxhost', default='localhost', help='host where OpenVirteX is running (default="localhost")')
parser.add_argument('--ovxport', default=8080, type=int, help='port where OpenVirteX is running (default=8080)')
parser.add_argument('--ovxuser', default='admin', help='OpenVirteX user (default="admin")')
parser.add_argument('--ovxpass', default='admin', help='OpenVirteX password (default="admin")')
parser.add_argument('--ctrlproto', default='tcp', help='default controller protocol (default="tcp")')
parser.add_argument('--ctrlport', default=10000, type=int, help='default controller port (default=10000)')
parser.add_argument('--loglevel', default='INFO', help='log level (default="INFO")')
parser.add_argument('--version', action='version', version='%(prog)s 0.1')
args = parser.parse_args()
opts = vars(args)
log.basicConfig(format='%(asctime)s %(message)s', level=getattr(log, opts['loglevel'].upper()))
embedder = OVXEmbedder(opts)
embedder.run()
|
kmonsoor/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/PC/VC6/build_tkinter.py
|
91
|
import os
import sys
import subprocess
TCL_MAJOR = 8
TCL_MINOR = 5
TCL_PATCH = 2
TIX_MAJOR = 8
TIX_MINOR = 4
TIX_PATCH = 3
def abspath(name):
par = os.path.pardir
return os.path.abspath(os.path.join(__file__, par, par, par, par, name))
TCL_DIR = abspath("tcl%d.%d.%d" % (TCL_MAJOR, TCL_MINOR, TCL_PATCH))
TK_DIR = abspath("tk%d.%d.%d" % (TCL_MAJOR, TCL_MINOR, TCL_PATCH))
TIX_DIR = abspath("tix%d.%d.%d" % (TIX_MAJOR, TIX_MINOR, TIX_PATCH))
OUT_DIR = abspath("tcltk")
def have_args(*a):
return any(s in sys.argv[1:] for s in a)
def enter(dir):
os.chdir(os.path.join(dir, "win"))
def main():
debug = have_args("-d", "--debug")
clean = have_args("clean")
install = have_args("install")
tcl = have_args("tcl")
tk = have_args("tk")
tix = have_args("tix")
if not(tcl) and not(tk) and not(tix):
tcl = tk = tix = True
def nmake(makefile, *a):
args = ["nmake", "/nologo", "/f", makefile, "DEBUG=%d" % debug]
args.extend(a)
subprocess.check_call(args)
if tcl:
enter(TCL_DIR)
def nmake_tcl(*a):
nmake("makefile.vc", *a)
if clean:
nmake_tcl("clean")
elif install:
nmake_tcl("install", "INSTALLDIR=" + OUT_DIR)
else:
nmake_tcl()
if tk:
enter(TK_DIR)
def nmake_tk(*a):
nmake("makefile.vc", "TCLDIR=" + TCL_DIR, *a)
if clean:
nmake_tk("clean")
elif install:
nmake_tk("install", "INSTALLDIR=" + OUT_DIR)
else:
nmake_tk()
if tix:
enter(TIX_DIR)
def nmake_tix(*a):
nmake("python.mak",
"TCL_MAJOR=%d" % TCL_MAJOR,
"TCL_MINOR=%d" % TCL_MINOR,
"TCL_PATCH=%d" % TCL_PATCH,
"MACHINE=IX86", *a)
if clean:
nmake_tix("clean")
elif install:
nmake_tix("install", "INSTALL_DIR=" + OUT_DIR)
else:
nmake_tix()
if __name__ == '__main__':
main()
|
nephila/django-html5-appcache
|
refs/heads/develop
|
html5_appcache/cache.py
|
1
|
# -*- coding: utf-8 -*-
"""
Cache maintains two version of the manifest file:
1. if ``get_cache_version() == 1`` it means the all the data are up to date
2. if ``get_cache_version() == 2`` manifest data are stale and need to be regenerated
The use of a version permit to continue serving older, but somehow still valid,
manifest data. Depending on the actual data, it may need immediate update.
"""
from html5_appcache.settings import get_setting
manifest_cache_keys = (
"manifest", "timestamp", "data_clean"
)
def get_cache_key(key):
return "%s:%s" % (
get_setting('CACHE_KEY'), key)
def get_cache_version_key():
return "%s:version" % get_setting('CACHE_KEY')
def get_cache_version():
return get_cached_value("data_clean", 1)
def get_cached_value(key, version=None):
from django.core.cache import cache
if not version:
version = get_cache_version()
return cache.get(get_cache_key(key), version=version)
def set_cached_value(key, value, version=1):
from django.core.cache import cache
return cache.set(get_cache_key(key), value, get_setting('CACHE_DURATION'),
version=version)
def get_cached_manifest():
return get_cached_value("manifest")
def set_cached_manifest(manifest):
"""
When setting new manifest, both versions are updated.
"""
set_cached_value("data_clean", 1)
set_cached_value("manifest", manifest, 1)
set_cached_value("manifest", manifest, 2)
def reset_cache_manifest():
"""
Move to version 2 (meaning stale data).
"""
set_cached_value("data_clean", 2)
def is_manifest_clean():
"""
Signals if the cache is
"""
return get_cache_version() == 1
def clear_cache_manifest():
"""
Clear all the values in the cache for both version
"""
for key in manifest_cache_keys:
set_cached_value(key, None, 1)
set_cached_value(key, None, 2)
|
raildo/nova
|
refs/heads/master
|
nova/servicegroup/drivers/db.py
|
18
|
# Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
import six
from nova.i18n import _, _LI, _LW
from nova.servicegroup import api
from nova.servicegroup.drivers import base
CONF = cfg.CONF
CONF.import_opt('service_down_time', 'nova.service')
LOG = logging.getLogger(__name__)
class DbDriver(base.Driver):
def __init__(self, *args, **kwargs):
self.service_down_time = CONF.service_down_time
def join(self, member, group, service=None):
"""Add a new member to a service group.
:param member: the joined member ID/name
:param group: the group ID/name, of the joined member
:param service: a `nova.service.Service` object
"""
LOG.debug('DB_Driver: join new ServiceGroup member %(member)s to '
'the %(group)s group, service = %(service)s',
{'member': member, 'group': group,
'service': service})
if service is None:
raise RuntimeError(_('service is a mandatory argument for DB based'
' ServiceGroup driver'))
report_interval = service.report_interval
if report_interval:
service.tg.add_timer(report_interval, self._report_state,
api.INITIAL_REPORTING_DELAY, service)
def is_up(self, service_ref):
"""Moved from nova.utils
Check whether a service is up based on last heartbeat.
"""
# Keep checking 'updated_at' if 'last_seen_up' isn't set.
# Should be able to use only 'last_seen_up' in the M release
last_heartbeat = (service_ref.get('last_seen_up') or
service_ref['updated_at'] or service_ref['created_at'])
if isinstance(last_heartbeat, six.string_types):
# NOTE(russellb) If this service_ref came in over rpc via
# conductor, then the timestamp will be a string and needs to be
# converted back to a datetime.
last_heartbeat = timeutils.parse_strtime(last_heartbeat)
else:
# Objects have proper UTC timezones, but the timeutils comparison
# below does not (and will fail)
last_heartbeat = last_heartbeat.replace(tzinfo=None)
# Timestamps in DB are UTC.
elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow())
is_up = abs(elapsed) <= self.service_down_time
if not is_up:
LOG.debug('Seems service is down. Last heartbeat was %(lhb)s. '
'Elapsed time is %(el)s',
{'lhb': str(last_heartbeat), 'el': str(elapsed)})
return is_up
def _report_state(self, service):
"""Update the state of this service in the datastore."""
try:
service.service_ref.report_count += 1
service.service_ref.save()
# TODO(termie): make this pattern be more elegant.
if getattr(service, 'model_disconnected', False):
service.model_disconnected = False
LOG.info(
_LI('Recovered connection to nova-conductor '
'for reporting service status.'))
# because we are communicating over conductor, a failure to
# connect is going to be a messaging failure, not a db error.
except messaging.MessagingTimeout:
if not getattr(service, 'model_disconnected', False):
service.model_disconnected = True
LOG.warn(_LW('Lost connection to nova-conductor '
'for reporting service status.'))
|
Pablo126/SSBW
|
refs/heads/master
|
Tarea4/tarea4/lib/python3.5/site-packages/django/db/backends/postgresql_psycopg2/version.py
|
427
|
from ..postgresql.version import * # NOQA
|
trankmichael/numpy
|
refs/heads/master
|
numpy/linalg/lapack_lite/fortran.py
|
132
|
from __future__ import division, absolute_import, print_function
import re
import itertools
def isBlank(line):
return not line
def isLabel(line):
return line[0].isdigit()
def isComment(line):
return line[0] != ' '
def isContinuation(line):
return line[5] != ' '
COMMENT, STATEMENT, CONTINUATION = 0, 1, 2
def lineType(line):
"""Return the type of a line of Fortan code."""
if isBlank(line):
return COMMENT
elif isLabel(line):
return STATEMENT
elif isComment(line):
return COMMENT
elif isContinuation(line):
return CONTINUATION
else:
return STATEMENT
class LineIterator(object):
"""LineIterator(iterable)
Return rstrip()'d lines from iterable, while keeping a count of the
line number in the .lineno attribute.
"""
def __init__(self, iterable):
object.__init__(self)
self.iterable = iter(iterable)
self.lineno = 0
def __iter__(self):
return self
def __next__(self):
self.lineno += 1
line = next(self.iterable)
line = line.rstrip()
return line
next = __next__
class PushbackIterator(object):
"""PushbackIterator(iterable)
Return an iterator for which items can be pushed back into.
Call the .pushback(item) method to have item returned as the next
value of .next().
"""
def __init__(self, iterable):
object.__init__(self)
self.iterable = iter(iterable)
self.buffer = []
def __iter__(self):
return self
def __next__(self):
if self.buffer:
return self.buffer.pop()
else:
return next(self.iterable)
def pushback(self, item):
self.buffer.append(item)
next = __next__
def fortranSourceLines(fo):
"""Return an iterator over statement lines of a Fortran source file.
Comment and blank lines are stripped out, and continuation lines are
merged.
"""
numberingiter = LineIterator(fo)
# add an extra '' at the end
with_extra = itertools.chain(numberingiter, [''])
pushbackiter = PushbackIterator(with_extra)
for line in pushbackiter:
t = lineType(line)
if t == COMMENT:
continue
elif t == STATEMENT:
lines = [line]
# this is where we need the extra '', so we don't finish reading
# the iterator when we don't want to handle that
for next_line in pushbackiter:
t = lineType(next_line)
if t == CONTINUATION:
lines.append(next_line[6:])
else:
pushbackiter.pushback(next_line)
break
yield numberingiter.lineno, ''.join(lines)
else:
raise ValueError("jammed: continuation line not expected: %s:%d" %
(fo.name, numberingiter.lineno))
def getDependencies(filename):
"""For a Fortran source file, return a list of routines declared as EXTERNAL
in it.
"""
fo = open(filename)
external_pat = re.compile(r'^\s*EXTERNAL\s', re.I)
routines = []
for lineno, line in fortranSourceLines(fo):
m = external_pat.match(line)
if m:
names = line = line[m.end():].strip().split(',')
names = [n.strip().lower() for n in names]
names = [n for n in names if n]
routines.extend(names)
fo.close()
return routines
|
osh/gnuradio
|
refs/heads/master
|
gr-filter/python/filter/design/fir_design.py
|
47
|
# Copyright 2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import scipy
from gnuradio import filter
from PyQt4 import QtGui
# Filter design functions using a window
def design_win_lpf(fs, gain, wintype, mainwin):
ret = True
pb,r = mainwin.gui.endofLpfPassBandEdit.text().toDouble()
ret = r and ret
sb,r = mainwin.gui.startofLpfStopBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.lpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
tb = sb - pb
try:
taps = filter.firdes.low_pass_2(gain, fs, pb, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "lpf", "pbend": pb, "sbstart": sb,
"atten": atten, "ntaps": len(taps)}
return (taps, params, ret)
else:
return ([], [], ret)
def design_win_bpf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.band_pass_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "bpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_cbpf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.complex_band_pass_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "cbpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_bnf(fs, gain, wintype, mainwin):
ret = True
pb1,r = mainwin.gui.startofBnfStopBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBnfStopBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bnfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bnfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.firdes.band_reject_2(gain, fs, pb1, pb2, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
return ([], [], ret)
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "bnf", "sbstart": pb1, "sbend": pb2,
"tb": tb, "atten": atten, "ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],ret)
def design_win_hpf(fs, gain, wintype, mainwin):
ret = True
sb,r = mainwin.gui.endofHpfStopBandEdit.text().toDouble()
ret = r and ret
pb,r = mainwin.gui.startofHpfPassBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.hpfStopBandAttenEdit.text().toDouble()
ret = r and ret
if(ret):
tb = pb - sb
try:
taps = filter.firdes.high_pass_2(gain, fs, pb, tb,
atten, wintype)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "hpf", "sbend": sb, "pbstart": pb,
"atten": atten, "ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_hb(fs, gain, wintype, mainwin):
ret = True
filtord,r = mainwin.gui.firhbordEdit.text().toDouble()
ret = r and ret
trwidth,r = mainwin.gui.firhbtrEdit.text().toDouble()
ret = r and ret
filtwin = { filter.firdes.WIN_HAMMING : 'hamming',
filter.firdes.WIN_HANN : 'hanning',
filter.firdes.WIN_BLACKMAN : 'blackman',
filter.firdes.WIN_RECTANGULAR: 'boxcar',
filter.firdes.WIN_KAISER: ('kaiser', 4.0),
filter.firdes.WIN_BLACKMAN_hARRIS: 'blackmanharris'}
if int(filtord) & 1:
reply = QtGui.QMessageBox.information(mainwin, "Filter order should be even",
"Filter order should be even","&Ok")
return ([],[],False)
if(ret):
taps = scipy.signal.firwin(int(filtord)+1, 0.5, window = filtwin[wintype])
taps[abs(taps) <= 1e-6] = 0.
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "hb","ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_rrc(fs, gain, wintype, mainwin):
ret = True
sr,r = mainwin.gui.rrcSymbolRateEdit.text().toDouble()
ret = r and ret
alpha,r = mainwin.gui.rrcAlphaEdit.text().toDouble()
ret = r and ret
ntaps,r = mainwin.gui.rrcNumTapsEdit.text().toInt()
ret = r and ret
if(ret):
try:
taps = filter.firdes.root_raised_cosine(gain, fs, sr,
alpha, ntaps)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "rrc", "srate": sr, "alpha": alpha,
"ntaps": ntaps}
return (taps,params,ret)
else:
return ([],[],ret)
def design_win_gaus(fs, gain, wintype, mainwin):
ret = True
sr,r = mainwin.gui.gausSymbolRateEdit.text().toDouble()
ret = r and ret
bt,r = mainwin.gui.gausBTEdit.text().toDouble()
ret = r and ret
ntaps,r = mainwin.gui.gausNumTapsEdit.text().toInt()
ret = r and ret
if(ret):
spb = fs / sr
try:
taps = filter.firdes.gaussian(gain, spb, bt, ntaps)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Runtime Error",
e.args[0], "&Ok")
else:
params = {"fs": fs, "gain": gain, "wintype": wintype,
"filttype": "gaus", "srate": sr, "bt": bt,
"ntaps": ntaps}
return (taps,params,ret)
else:
return ([],[],ret)
# Design Functions for Equiripple Filters
def design_opt_lpf(fs, gain, mainwin):
ret = True
pb,r = mainwin.gui.endofLpfPassBandEdit.text().toDouble()
ret = r and ret
sb,r = mainwin.gui.startofLpfStopBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.lpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.lpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.optfir.low_pass(gain, fs, pb, sb,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "lpf", "pbend": pb, "sbstart": sb,
"atten": atten, "ripple": ripple, "ntaps": len(taps)}
return (taps, params, ret)
else:
return ([], [], ret)
def design_opt_bpf(fs, gain, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(r):
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.band_pass(gain, fs, sb1, pb1, pb2, sb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],r)
def design_opt_cbpf(fs, gain, mainwin):
ret = True
pb1,r = mainwin.gui.startofBpfPassBandEdit.text().toDouble()
ret = r and ret
pb2,r = mainwin.gui.endofBpfPassBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bpfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(r):
sb1 = pb1 - tb
sb2 = pb2 + tb
try:
taps = filter.optfir.complex_band_pass(gain, fs, sb1, pb1, pb2, sb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "cbpf", "pbstart": pb1, "pbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,r)
else:
return ([],[],r)
def design_opt_bnf(fs, gain, mainwin):
ret = True
sb1,r = mainwin.gui.startofBnfStopBandEdit.text().toDouble()
ret = r and ret
sb2,r = mainwin.gui.endofBnfStopBandEdit.text().toDouble()
ret = r and ret
tb,r = mainwin.gui.bnfTransitionEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.bnfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.bnfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
pb1 = sb1 - tb
pb2 = sb2 + tb
try:
taps = filter.optfir.band_reject(gain, fs, pb1, sb1, sb2, pb2,
ripple, atten)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": mainwin.EQUIRIPPLE_FILT,
"filttype": "bnf", "sbstart": pb1, "sbend": pb2,
"tb": tb, "atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_opt_hb(fs, gain, mainwin):
ret = True
filtord,r = mainwin.gui.firhbordEdit.text().toDouble()
ret = r and ret
trwidth,r = mainwin.gui.firhbtrEdit.text().toDouble()
ret = r and ret
if int(filtord) & 1:
reply = QtGui.QMessageBox.information(mainwin, "Filter order should be even",
"Filter order should be even","&Ok")
return ([],[],False)
if(ret):
try:
bands = [0,.25 - (trwidth/fs), .25 + (trwidth/fs), 0.5]
taps = scipy.signal.remez(int(filtord)+1, bands, [1,0], [1,1])
taps[abs(taps) <= 1e-6] = 0.
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter Design Error",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "hb", "ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
def design_opt_hpf(fs, gain, mainwin):
ret = True
sb,r = mainwin.gui.endofHpfStopBandEdit.text().toDouble()
ret = r and ret
pb,r = mainwin.gui.startofHpfPassBandEdit.text().toDouble()
ret = r and ret
atten,r = mainwin.gui.hpfStopBandAttenEdit.text().toDouble()
ret = r and ret
ripple,r = mainwin.gui.hpfPassBandRippleEdit.text().toDouble()
ret = r and ret
if(ret):
try:
taps = filter.optfir.high_pass(gain, fs, sb, pb,
atten, ripple)
except RuntimeError, e:
reply = QtGui.QMessageBox.information(mainwin, "Filter did not converge",
e.args[0], "&Ok")
return ([],[],False)
else:
params = {"fs": fs, "gain": gain, "wintype": self.EQUIRIPPLE_FILT,
"filttype": "hpf", "sbend": sb, "pbstart": pb,
"atten": atten, "ripple": ripple,
"ntaps": len(taps)}
return (taps,params,ret)
else:
return ([],[],ret)
|
kapilt/cloud-custodian
|
refs/heads/master
|
tools/c7n_gcp/c7n_gcp/resources/storage.py
|
5
|
# Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from c7n_gcp.provider import resources
from c7n_gcp.query import QueryResourceManager, TypeInfo
@resources.register('bucket')
class Bucket(QueryResourceManager):
class resource_type(TypeInfo):
service = 'storage'
version = 'v1'
component = 'buckets'
scope = 'project'
enum_spec = ('list', 'items[]', {'projection': 'full'})
id = 'name'
@staticmethod
def get(client, resource_info):
return client.execute_command(
'get', {'bucket': resource_info['bucket_name']})
|
lupyuen/RaspberryPiImage
|
refs/heads/master
|
home/pi/GrovePi/Software/Python/others/temboo/Library/Dropbox/OAuth/__init__.py
|
5
|
from temboo.Library.Dropbox.OAuth.FinalizeOAuth import FinalizeOAuth, FinalizeOAuthInputSet, FinalizeOAuthResultSet, FinalizeOAuthChoreographyExecution
from temboo.Library.Dropbox.OAuth.InitializeOAuth import InitializeOAuth, InitializeOAuthInputSet, InitializeOAuthResultSet, InitializeOAuthChoreographyExecution
|
dyn888/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/cinemassacre.py
|
11
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import ExtractorError
from .screenwavemedia import ScreenwaveMediaIE
class CinemassacreIE(InfoExtractor):
_VALID_URL = 'https?://(?:www\.)?cinemassacre\.com/(?P<date_y>[0-9]{4})/(?P<date_m>[0-9]{2})/(?P<date_d>[0-9]{2})/(?P<display_id>[^?#/]+)'
_TESTS = [
{
'url': 'http://cinemassacre.com/2012/11/10/avgn-the-movie-trailer/',
'md5': 'fde81fbafaee331785f58cd6c0d46190',
'info_dict': {
'id': 'Cinemassacre-19911',
'ext': 'mp4',
'upload_date': '20121110',
'title': '“Angry Video Game Nerd: The Movie” – Trailer',
'description': 'md5:fb87405fcb42a331742a0dce2708560b',
},
},
{
'url': 'http://cinemassacre.com/2013/10/02/the-mummys-hand-1940',
'md5': 'd72f10cd39eac4215048f62ab477a511',
'info_dict': {
'id': 'Cinemassacre-521be8ef82b16',
'ext': 'mp4',
'upload_date': '20131002',
'title': 'The Mummy’s Hand (1940)',
},
},
{
# Youtube embedded video
'url': 'http://cinemassacre.com/2006/12/07/chronologically-confused-about-bad-movie-and-video-game-sequel-titles/',
'md5': 'df4cf8a1dcedaec79a73d96d83b99023',
'info_dict': {
'id': 'OEVzPCY2T-g',
'ext': 'mp4',
'title': 'AVGN: Chronologically Confused about Bad Movie and Video Game Sequel Titles',
'upload_date': '20061207',
'uploader': 'Cinemassacre',
'uploader_id': 'JamesNintendoNerd',
'description': 'md5:784734696c2b8b7f4b8625cc799e07f6',
}
},
{
# Youtube embedded video
'url': 'http://cinemassacre.com/2006/09/01/mckids/',
'md5': '6eb30961fa795fedc750eac4881ad2e1',
'info_dict': {
'id': 'FnxsNhuikpo',
'ext': 'mp4',
'upload_date': '20060901',
'uploader': 'Cinemassacre Extras',
'description': 'md5:de9b751efa9e45fbaafd9c8a1123ed53',
'uploader_id': 'Cinemassacre',
'title': 'AVGN: McKids',
}
},
{
'url': 'http://cinemassacre.com/2015/05/25/mario-kart-64-nintendo-64-james-mike-mondays/',
'md5': '1376908e49572389e7b06251a53cdd08',
'info_dict': {
'id': 'Cinemassacre-555779690c440',
'ext': 'mp4',
'description': 'Let’s Play Mario Kart 64 !! Mario Kart 64 is a classic go-kart racing game released for the Nintendo 64 (N64). Today James & Mike do 4 player Battle Mode with Kyle and Bootsy!',
'title': 'Mario Kart 64 (Nintendo 64) James & Mike Mondays',
'upload_date': '20150525',
}
}
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
video_date = mobj.group('date_y') + mobj.group('date_m') + mobj.group('date_d')
webpage = self._download_webpage(url, display_id)
playerdata_url = self._search_regex(
[
ScreenwaveMediaIE.EMBED_PATTERN,
r'<iframe[^>]+src="(?P<url>(?:https?:)?//(?:[^.]+\.)?youtube\.com/.+?)"',
],
webpage, 'player data URL', default=None, group='url')
if not playerdata_url:
raise ExtractorError('Unable to find player data')
video_title = self._html_search_regex(
r'<title>(?P<title>.+?)\|', webpage, 'title')
video_description = self._html_search_regex(
r'<div class="entry-content">(?P<description>.+?)</div>',
webpage, 'description', flags=re.DOTALL, fatal=False)
video_thumbnail = self._og_search_thumbnail(webpage)
return {
'_type': 'url_transparent',
'display_id': display_id,
'title': video_title,
'description': video_description,
'upload_date': video_date,
'thumbnail': video_thumbnail,
'url': playerdata_url,
}
|
ishank08/scikit-learn
|
refs/heads/master
|
examples/cluster/plot_kmeans_silhouette_analysis.py
|
83
|
"""
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
|
TyMaszWeb/django-cookie-law
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
from itertools import chain
from glob import glob
import cookielaw
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Session',
]
package_data_globs = (
'cookielaw/templates/cookielaw/*.html',
'cookielaw/static/cookielaw/*/*',
'cookielaw/locale/*/*/*'
)
package_data = []
for f in chain(*map(glob, package_data_globs)):
package_data.append(f.split('/', 1)[1])
setup(
author='Piotr Kilczuk',
author_email='piotr@tymaszweb.pl',
name='django-cookie-law',
version='.'.join(str(v) for v in cookielaw.VERSION),
description='Helps your Django project comply with EU cookie law regulations',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/TyMaszWeb/django-cookie-law',
license='BSD License',
platforms=['OS Independent'],
classifiers=CLASSIFIERS,
install_requires=[
'Django>=1.8',
'django-classy-tags>=0.3.0',
'pytest-runner>=2.11.1'
],
tests_require=[
'six',
'pytest>=3.0.0',
'pytest-django>=3.1.2',
'pytest-selenium>=1.9.1',
'selenium>=3.3.1',
],
packages=find_packages(),
package_data={'cookielaw': package_data},
include_package_data=False,
zip_safe=False,
test_suite='tests',
)
|
infobloxopen/infoblox-netmri
|
refs/heads/master
|
infoblox_netmri/api/broker/v2_8_0/neighbor_broker.py
|
13
|
from ..broker import Broker
class NeighborBroker(Broker):
controller = "neighbors"
def show(self, **kwargs):
"""Shows the details for the specified neighbor.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbor: The neighbor identified by the specified NeighborID.
:rtype neighbor: Neighbor
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def index(self, **kwargs):
"""Lists the available neighbors. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` NeighborID
:param sort: The data field(s) to use for sorting the output. Default is NeighborID. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Neighbor. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbors: An array of the Neighbor objects that match the specified input criteria.
:rtype neighbors: Array of Neighbor
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def search(self, **kwargs):
"""Lists the available neighbors matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param BGPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a BGP route.
:type BGPRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BGPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a BGP route.
:type BGPRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's CDP entries.
:type CDPInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's CDP entries.
:type CDPInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CDPNeighborID: The internal NetMRI identifier for the CdpNeighbor object associated with this neighbor entry (if any).
:type CDPNeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CDPNeighborID: The internal NetMRI identifier for the CdpNeighbor object associated with this neighbor entry (if any).
:type CDPNeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param CombinedInd: A flag indicating that these devices have basic layer 1/2 connectivity.
:type CombinedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param CombinedInd: A flag indicating that these devices have basic layer 1/2 connectivity.
:type CombinedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record.
:type DataSourceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship.
:type DeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param DirectEthernetInd: A flag indicating that this neighbor relationship was derived using the NetMRI direct Ethernet neighbor detection algorithm (for example, two routers directly connected via Ethernet, without any switches between them).
:type DirectEthernetInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DirectEthernetInd: A flag indicating that this neighbor relationship was derived using the NetMRI direct Ethernet neighbor detection algorithm (for example, two routers directly connected via Ethernet, without any switches between them).
:type DirectEthernetInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IGRPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an IGRP or EIGRP route.
:type IGRPRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IGRPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an IGRP or EIGRP route.
:type IGRPRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param IPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship; that is, the destination device is a next hop for at least one route on the source device.
:type IPRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param IPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship; that is, the destination device is a next hop for at least one route on the source device.
:type IPRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship.
:type InterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param LLDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's LLDP entries.
:type LLDPInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param LLDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's LLDP entries.
:type LLDPInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param LLDPNeighborID: The internal NetMRI identifier for this LLDP table entry.
:type LLDPNeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param LLDPNeighborID: The internal NetMRI identifier for this LLDP table entry.
:type LLDPNeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param LocalRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a local route.
:type LocalRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param LocalRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a local route.
:type LocalRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborChangedCols: The fields that changed between this revision of the record and the previous revision.
:type NeighborChangedCols: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborChangedCols: The fields that changed between this revision of the record and the previous revision.
:type NeighborChangedCols: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship.
:type NeighborDeviceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type NeighborEndTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect.
:type NeighborEndTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborFirstSeenTime: The date and time this neighbor was first seen on the network, and since which it has been continuously present.
:type NeighborFirstSeenTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborFirstSeenTime: The date and time this neighbor was first seen on the network, and since which it has been continuously present.
:type NeighborFirstSeenTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborIfIndex: The SNMP interface index of the destination device interface.
:type NeighborIfIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborIfIndex: The SNMP interface index of the destination device interface.
:type NeighborIfIndex: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship.
:type NeighborInterfaceID: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborNetworkDeviceInd: A flag indicating if the destination device is a network device or an end host.
:type NeighborNetworkDeviceInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborNetworkDeviceInd: A flag indicating if the destination device is a network device or an end host.
:type NeighborNetworkDeviceInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborStartTime: The starting effective time of this revision of the record.
:type NeighborStartTime: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborStartTime: The starting effective time of this revision of the record.
:type NeighborStartTime: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NeighborTimestamp: The date and time this record was collected or calculated.
:type NeighborTimestamp: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NeighborTimestamp: The date and time this record was collected or calculated.
:type NeighborTimestamp: Array of DateTime
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param NetworkDeviceInd: A flag indicating if the source device is a network device or an end host.
:type NetworkDeviceInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param NetworkDeviceInd: A flag indicating if the source device is a network device or an end host.
:type NetworkDeviceInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param OSPFRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an OSPF route.
:type OSPFRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param OSPFRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an OSPF route.
:type OSPFRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ProtoRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a dynamic protocol defined route.
:type ProtoRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ProtoRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a dynamic protocol defined route.
:type ProtoRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param RevSwitchFwdInd: A flag indicating that this neighbor relationship was derived by reversing a switch forwarding neighbor relationship.
:type RevSwitchFwdInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param RevSwitchFwdInd: A flag indicating that this neighbor relationship was derived by reversing a switch forwarding neighbor relationship.
:type RevSwitchFwdInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SerialInd: A flag indicating that this neighbor relationship was derived using the NetMRI point-to-point neighbor detection algorithm. Despite the name this may include point-to-point relationships on interfaces other than serial interfaces.
:type SerialInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SerialInd: A flag indicating that this neighbor relationship was derived using the NetMRI point-to-point neighbor detection algorithm. Despite the name this may include point-to-point relationships on interfaces other than serial interfaces.
:type SerialInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param StaticRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a static route.
:type StaticRoutedInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param StaticRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a static route.
:type StaticRoutedInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param SwitchFwdInd: A flag indicating that this neighbor relationship was derived using the NetMRI switch forwarding neighbor detection algorithm.
:type SwitchFwdInd: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param SwitchFwdInd: A flag indicating that this neighbor relationship was derived using the NetMRI switch forwarding neighbor detection algorithm.
:type SwitchFwdInd: Array of Boolean
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP interface index of the source device interface.
:type ifIndex: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ifIndex: The SNMP interface index of the source device interface.
:type ifIndex: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` NeighborID
:param sort: The data field(s) to use for sorting the output. Default is NeighborID. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Neighbor. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against neighbors, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: BGPRoutedInd, CDPInd, CDPNeighborID, CombinedInd, DataSourceID, DeviceID, DirectEthernetInd, IGRPRoutedInd, IPRoutedInd, InterfaceID, LLDPInd, LLDPNeighborID, LocalRoutedInd, NeighborChangedCols, NeighborDeviceID, NeighborEndTime, NeighborFirstSeenTime, NeighborID, NeighborIfIndex, NeighborInterfaceID, NeighborNetworkDeviceInd, NeighborStartTime, NeighborTimestamp, NetworkDeviceInd, OSPFRoutedInd, ProtoRoutedInd, RevSwitchFwdInd, SerialInd, StaticRoutedInd, SwitchFwdInd, ifIndex.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbors: An array of the Neighbor objects that match the specified input criteria.
:rtype neighbors: Array of Neighbor
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available neighbors matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: BGPRoutedInd, CDPInd, CDPNeighborID, CombinedInd, DataSourceID, DeviceID, DirectEthernetInd, IGRPRoutedInd, IPRoutedInd, InterfaceID, LLDPInd, LLDPNeighborID, LocalRoutedInd, NeighborChangedCols, NeighborDeviceID, NeighborEndTime, NeighborFirstSeenTime, NeighborID, NeighborIfIndex, NeighborInterfaceID, NeighborNetworkDeviceInd, NeighborStartTime, NeighborTimestamp, NetworkDeviceInd, OSPFRoutedInd, ProtoRoutedInd, RevSwitchFwdInd, SerialInd, StaticRoutedInd, SwitchFwdInd, ifIndex.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_BGPRoutedInd: The operator to apply to the field BGPRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. BGPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a BGP route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_BGPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_BGPRoutedInd: If op_BGPRoutedInd is specified, the field named in this input will be compared to the value in BGPRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_BGPRoutedInd must be specified if op_BGPRoutedInd is specified.
:type val_f_BGPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_BGPRoutedInd: If op_BGPRoutedInd is specified, this value will be compared to the value in BGPRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_BGPRoutedInd must be specified if op_BGPRoutedInd is specified.
:type val_c_BGPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CDPInd: The operator to apply to the field CDPInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's CDP entries. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CDPInd: If op_CDPInd is specified, the field named in this input will be compared to the value in CDPInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CDPInd must be specified if op_CDPInd is specified.
:type val_f_CDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CDPInd: If op_CDPInd is specified, this value will be compared to the value in CDPInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CDPInd must be specified if op_CDPInd is specified.
:type val_c_CDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CDPNeighborID: The operator to apply to the field CDPNeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CDPNeighborID: The internal NetMRI identifier for the CdpNeighbor object associated with this neighbor entry (if any). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CDPNeighborID: If op_CDPNeighborID is specified, the field named in this input will be compared to the value in CDPNeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CDPNeighborID must be specified if op_CDPNeighborID is specified.
:type val_f_CDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CDPNeighborID: If op_CDPNeighborID is specified, this value will be compared to the value in CDPNeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CDPNeighborID must be specified if op_CDPNeighborID is specified.
:type val_c_CDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_CombinedInd: The operator to apply to the field CombinedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. CombinedInd: A flag indicating that these devices have basic layer 1/2 connectivity. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_CombinedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_CombinedInd: If op_CombinedInd is specified, the field named in this input will be compared to the value in CombinedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_CombinedInd must be specified if op_CombinedInd is specified.
:type val_f_CombinedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_CombinedInd: If op_CombinedInd is specified, this value will be compared to the value in CombinedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_CombinedInd must be specified if op_CombinedInd is specified.
:type val_c_CombinedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DataSourceID: The operator to apply to the field DataSourceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DataSourceID: The internal NetMRI identifier for the collector NetMRI that collected this data record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DataSourceID: If op_DataSourceID is specified, the field named in this input will be compared to the value in DataSourceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DataSourceID must be specified if op_DataSourceID is specified.
:type val_f_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DataSourceID: If op_DataSourceID is specified, this value will be compared to the value in DataSourceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DataSourceID must be specified if op_DataSourceID is specified.
:type val_c_DataSourceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DeviceID: The operator to apply to the field DeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DeviceID: The internal NetMRI identifier for the source device in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DeviceID: If op_DeviceID is specified, the field named in this input will be compared to the value in DeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DeviceID must be specified if op_DeviceID is specified.
:type val_f_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DeviceID: If op_DeviceID is specified, this value will be compared to the value in DeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DeviceID must be specified if op_DeviceID is specified.
:type val_c_DeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_DirectEthernetInd: The operator to apply to the field DirectEthernetInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. DirectEthernetInd: A flag indicating that this neighbor relationship was derived using the NetMRI direct Ethernet neighbor detection algorithm (for example, two routers directly connected via Ethernet, without any switches between them). For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_DirectEthernetInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_DirectEthernetInd: If op_DirectEthernetInd is specified, the field named in this input will be compared to the value in DirectEthernetInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_DirectEthernetInd must be specified if op_DirectEthernetInd is specified.
:type val_f_DirectEthernetInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_DirectEthernetInd: If op_DirectEthernetInd is specified, this value will be compared to the value in DirectEthernetInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_DirectEthernetInd must be specified if op_DirectEthernetInd is specified.
:type val_c_DirectEthernetInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IGRPRoutedInd: The operator to apply to the field IGRPRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IGRPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an IGRP or EIGRP route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IGRPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IGRPRoutedInd: If op_IGRPRoutedInd is specified, the field named in this input will be compared to the value in IGRPRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IGRPRoutedInd must be specified if op_IGRPRoutedInd is specified.
:type val_f_IGRPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IGRPRoutedInd: If op_IGRPRoutedInd is specified, this value will be compared to the value in IGRPRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IGRPRoutedInd must be specified if op_IGRPRoutedInd is specified.
:type val_c_IGRPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_IPRoutedInd: The operator to apply to the field IPRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. IPRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship; that is, the destination device is a next hop for at least one route on the source device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_IPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_IPRoutedInd: If op_IPRoutedInd is specified, the field named in this input will be compared to the value in IPRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_IPRoutedInd must be specified if op_IPRoutedInd is specified.
:type val_f_IPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_IPRoutedInd: If op_IPRoutedInd is specified, this value will be compared to the value in IPRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_IPRoutedInd must be specified if op_IPRoutedInd is specified.
:type val_c_IPRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_InterfaceID: The operator to apply to the field InterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. InterfaceID: The internal NetMRI identifier for the source interface in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_InterfaceID: If op_InterfaceID is specified, the field named in this input will be compared to the value in InterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_InterfaceID must be specified if op_InterfaceID is specified.
:type val_f_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_InterfaceID: If op_InterfaceID is specified, this value will be compared to the value in InterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_InterfaceID must be specified if op_InterfaceID is specified.
:type val_c_InterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_LLDPInd: The operator to apply to the field LLDPInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. LLDPInd: A flag indicating that this neighbor relationship was derived based upon the source device's LLDP entries. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_LLDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_LLDPInd: If op_LLDPInd is specified, the field named in this input will be compared to the value in LLDPInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_LLDPInd must be specified if op_LLDPInd is specified.
:type val_f_LLDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_LLDPInd: If op_LLDPInd is specified, this value will be compared to the value in LLDPInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_LLDPInd must be specified if op_LLDPInd is specified.
:type val_c_LLDPInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_LLDPNeighborID: The operator to apply to the field LLDPNeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. LLDPNeighborID: The internal NetMRI identifier for this LLDP table entry. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_LLDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_LLDPNeighborID: If op_LLDPNeighborID is specified, the field named in this input will be compared to the value in LLDPNeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_LLDPNeighborID must be specified if op_LLDPNeighborID is specified.
:type val_f_LLDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_LLDPNeighborID: If op_LLDPNeighborID is specified, this value will be compared to the value in LLDPNeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_LLDPNeighborID must be specified if op_LLDPNeighborID is specified.
:type val_c_LLDPNeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_LocalRoutedInd: The operator to apply to the field LocalRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. LocalRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a local route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_LocalRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_LocalRoutedInd: If op_LocalRoutedInd is specified, the field named in this input will be compared to the value in LocalRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_LocalRoutedInd must be specified if op_LocalRoutedInd is specified.
:type val_f_LocalRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_LocalRoutedInd: If op_LocalRoutedInd is specified, this value will be compared to the value in LocalRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_LocalRoutedInd must be specified if op_LocalRoutedInd is specified.
:type val_c_LocalRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborChangedCols: The operator to apply to the field NeighborChangedCols. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborChangedCols: The fields that changed between this revision of the record and the previous revision. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborChangedCols: If op_NeighborChangedCols is specified, the field named in this input will be compared to the value in NeighborChangedCols using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborChangedCols must be specified if op_NeighborChangedCols is specified.
:type val_f_NeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborChangedCols: If op_NeighborChangedCols is specified, this value will be compared to the value in NeighborChangedCols using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborChangedCols must be specified if op_NeighborChangedCols is specified.
:type val_c_NeighborChangedCols: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborDeviceID: The operator to apply to the field NeighborDeviceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborDeviceID: The internal NetMRI identifier for the destination device in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborDeviceID: If op_NeighborDeviceID is specified, the field named in this input will be compared to the value in NeighborDeviceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborDeviceID must be specified if op_NeighborDeviceID is specified.
:type val_f_NeighborDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborDeviceID: If op_NeighborDeviceID is specified, this value will be compared to the value in NeighborDeviceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborDeviceID must be specified if op_NeighborDeviceID is specified.
:type val_c_NeighborDeviceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborEndTime: The operator to apply to the field NeighborEndTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborEndTime: The ending effective time of this revision of this record, or empty if still in effect. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborEndTime: If op_NeighborEndTime is specified, the field named in this input will be compared to the value in NeighborEndTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborEndTime must be specified if op_NeighborEndTime is specified.
:type val_f_NeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborEndTime: If op_NeighborEndTime is specified, this value will be compared to the value in NeighborEndTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborEndTime must be specified if op_NeighborEndTime is specified.
:type val_c_NeighborEndTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborFirstSeenTime: The operator to apply to the field NeighborFirstSeenTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborFirstSeenTime: The date and time this neighbor was first seen on the network, and since which it has been continuously present. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborFirstSeenTime: If op_NeighborFirstSeenTime is specified, the field named in this input will be compared to the value in NeighborFirstSeenTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborFirstSeenTime must be specified if op_NeighborFirstSeenTime is specified.
:type val_f_NeighborFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborFirstSeenTime: If op_NeighborFirstSeenTime is specified, this value will be compared to the value in NeighborFirstSeenTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborFirstSeenTime must be specified if op_NeighborFirstSeenTime is specified.
:type val_c_NeighborFirstSeenTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborID: The operator to apply to the field NeighborID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborID: The internal NetMRI identifier for this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborID: If op_NeighborID is specified, the field named in this input will be compared to the value in NeighborID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborID must be specified if op_NeighborID is specified.
:type val_f_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborID: If op_NeighborID is specified, this value will be compared to the value in NeighborID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborID must be specified if op_NeighborID is specified.
:type val_c_NeighborID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborIfIndex: The operator to apply to the field NeighborIfIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborIfIndex: The SNMP interface index of the destination device interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborIfIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborIfIndex: If op_NeighborIfIndex is specified, the field named in this input will be compared to the value in NeighborIfIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborIfIndex must be specified if op_NeighborIfIndex is specified.
:type val_f_NeighborIfIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborIfIndex: If op_NeighborIfIndex is specified, this value will be compared to the value in NeighborIfIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborIfIndex must be specified if op_NeighborIfIndex is specified.
:type val_c_NeighborIfIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborInterfaceID: The operator to apply to the field NeighborInterfaceID. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborInterfaceID: The internal NetMRI identifier for the destination interface in this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborInterfaceID: If op_NeighborInterfaceID is specified, the field named in this input will be compared to the value in NeighborInterfaceID using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborInterfaceID must be specified if op_NeighborInterfaceID is specified.
:type val_f_NeighborInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborInterfaceID: If op_NeighborInterfaceID is specified, this value will be compared to the value in NeighborInterfaceID using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborInterfaceID must be specified if op_NeighborInterfaceID is specified.
:type val_c_NeighborInterfaceID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborNetworkDeviceInd: The operator to apply to the field NeighborNetworkDeviceInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborNetworkDeviceInd: A flag indicating if the destination device is a network device or an end host. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborNetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborNetworkDeviceInd: If op_NeighborNetworkDeviceInd is specified, the field named in this input will be compared to the value in NeighborNetworkDeviceInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborNetworkDeviceInd must be specified if op_NeighborNetworkDeviceInd is specified.
:type val_f_NeighborNetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborNetworkDeviceInd: If op_NeighborNetworkDeviceInd is specified, this value will be compared to the value in NeighborNetworkDeviceInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborNetworkDeviceInd must be specified if op_NeighborNetworkDeviceInd is specified.
:type val_c_NeighborNetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborStartTime: The operator to apply to the field NeighborStartTime. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborStartTime: The starting effective time of this revision of the record. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborStartTime: If op_NeighborStartTime is specified, the field named in this input will be compared to the value in NeighborStartTime using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborStartTime must be specified if op_NeighborStartTime is specified.
:type val_f_NeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborStartTime: If op_NeighborStartTime is specified, this value will be compared to the value in NeighborStartTime using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborStartTime must be specified if op_NeighborStartTime is specified.
:type val_c_NeighborStartTime: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NeighborTimestamp: The operator to apply to the field NeighborTimestamp. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NeighborTimestamp: The date and time this record was collected or calculated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NeighborTimestamp: If op_NeighborTimestamp is specified, the field named in this input will be compared to the value in NeighborTimestamp using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NeighborTimestamp must be specified if op_NeighborTimestamp is specified.
:type val_f_NeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NeighborTimestamp: If op_NeighborTimestamp is specified, this value will be compared to the value in NeighborTimestamp using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NeighborTimestamp must be specified if op_NeighborTimestamp is specified.
:type val_c_NeighborTimestamp: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_NetworkDeviceInd: The operator to apply to the field NetworkDeviceInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. NetworkDeviceInd: A flag indicating if the source device is a network device or an end host. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_NetworkDeviceInd: If op_NetworkDeviceInd is specified, the field named in this input will be compared to the value in NetworkDeviceInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_NetworkDeviceInd must be specified if op_NetworkDeviceInd is specified.
:type val_f_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_NetworkDeviceInd: If op_NetworkDeviceInd is specified, this value will be compared to the value in NetworkDeviceInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_NetworkDeviceInd must be specified if op_NetworkDeviceInd is specified.
:type val_c_NetworkDeviceInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_OSPFRoutedInd: The operator to apply to the field OSPFRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. OSPFRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon an OSPF route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_OSPFRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_OSPFRoutedInd: If op_OSPFRoutedInd is specified, the field named in this input will be compared to the value in OSPFRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_OSPFRoutedInd must be specified if op_OSPFRoutedInd is specified.
:type val_f_OSPFRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_OSPFRoutedInd: If op_OSPFRoutedInd is specified, this value will be compared to the value in OSPFRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_OSPFRoutedInd must be specified if op_OSPFRoutedInd is specified.
:type val_c_OSPFRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ProtoRoutedInd: The operator to apply to the field ProtoRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ProtoRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a dynamic protocol defined route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ProtoRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ProtoRoutedInd: If op_ProtoRoutedInd is specified, the field named in this input will be compared to the value in ProtoRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ProtoRoutedInd must be specified if op_ProtoRoutedInd is specified.
:type val_f_ProtoRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ProtoRoutedInd: If op_ProtoRoutedInd is specified, this value will be compared to the value in ProtoRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ProtoRoutedInd must be specified if op_ProtoRoutedInd is specified.
:type val_c_ProtoRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_RevSwitchFwdInd: The operator to apply to the field RevSwitchFwdInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. RevSwitchFwdInd: A flag indicating that this neighbor relationship was derived by reversing a switch forwarding neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_RevSwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_RevSwitchFwdInd: If op_RevSwitchFwdInd is specified, the field named in this input will be compared to the value in RevSwitchFwdInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_RevSwitchFwdInd must be specified if op_RevSwitchFwdInd is specified.
:type val_f_RevSwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_RevSwitchFwdInd: If op_RevSwitchFwdInd is specified, this value will be compared to the value in RevSwitchFwdInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_RevSwitchFwdInd must be specified if op_RevSwitchFwdInd is specified.
:type val_c_RevSwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SerialInd: The operator to apply to the field SerialInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SerialInd: A flag indicating that this neighbor relationship was derived using the NetMRI point-to-point neighbor detection algorithm. Despite the name this may include point-to-point relationships on interfaces other than serial interfaces. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SerialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SerialInd: If op_SerialInd is specified, the field named in this input will be compared to the value in SerialInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SerialInd must be specified if op_SerialInd is specified.
:type val_f_SerialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SerialInd: If op_SerialInd is specified, this value will be compared to the value in SerialInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SerialInd must be specified if op_SerialInd is specified.
:type val_c_SerialInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_StaticRoutedInd: The operator to apply to the field StaticRoutedInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. StaticRoutedInd: A flag indicating that this neighbor relationship represents an IP routing relationship based upon a static route. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_StaticRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_StaticRoutedInd: If op_StaticRoutedInd is specified, the field named in this input will be compared to the value in StaticRoutedInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_StaticRoutedInd must be specified if op_StaticRoutedInd is specified.
:type val_f_StaticRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_StaticRoutedInd: If op_StaticRoutedInd is specified, this value will be compared to the value in StaticRoutedInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_StaticRoutedInd must be specified if op_StaticRoutedInd is specified.
:type val_c_StaticRoutedInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_SwitchFwdInd: The operator to apply to the field SwitchFwdInd. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. SwitchFwdInd: A flag indicating that this neighbor relationship was derived using the NetMRI switch forwarding neighbor detection algorithm. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_SwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_SwitchFwdInd: If op_SwitchFwdInd is specified, the field named in this input will be compared to the value in SwitchFwdInd using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_SwitchFwdInd must be specified if op_SwitchFwdInd is specified.
:type val_f_SwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_SwitchFwdInd: If op_SwitchFwdInd is specified, this value will be compared to the value in SwitchFwdInd using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_SwitchFwdInd must be specified if op_SwitchFwdInd is specified.
:type val_c_SwitchFwdInd: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ifIndex: The operator to apply to the field ifIndex. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ifIndex: The SNMP interface index of the source device interface. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ifIndex: If op_ifIndex is specified, the field named in this input will be compared to the value in ifIndex using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ifIndex must be specified if op_ifIndex is specified.
:type val_f_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ifIndex: If op_ifIndex is specified, this value will be compared to the value in ifIndex using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ifIndex must be specified if op_ifIndex is specified.
:type val_c_ifIndex: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_network_id: The operator to apply to the field network_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. network_id: The Network View ID assigned to this neighbor relationship. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_network_id: If op_network_id is specified, the field named in this input will be compared to the value in network_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_network_id must be specified if op_network_id is specified.
:type val_f_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_network_id: If op_network_id is specified, this value will be compared to the value in network_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_network_id must be specified if op_network_id is specified.
:type val_c_network_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param DeviceGroupID: The internal NetMRI identifier of the device groups to which to limit the results.
:type DeviceGroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param timestamp: The data returned will represent the neighbors as of this date and time. If omitted, the result will indicate the most recently collected data.
:type timestamp: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param methods: A list of neighbor methods. The listed methods will be called on each neighbor returned and included in the output. Available methods are: network_id, device, interface, neighbor_device, neighbor_interface, infradevice.
:type methods: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param include: A list of associated object types to include in the output. The listed associations will be returned as outputs named according to the association name (see outputs below). Available includes are: device, interface, neighbor_device, neighbor_interface.
:type include: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` NeighborID
:param sort: The data field(s) to use for sorting the output. Default is NeighborID. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each Neighbor. Valid values are DataSourceID, NeighborID, DeviceID, InterfaceID, ifIndex, NeighborDeviceID, NeighborInterfaceID, NeighborIfIndex, NeighborFirstSeenTime, NeighborStartTime, NeighborEndTime, NeighborChangedCols, NeighborTimestamp, CombinedInd, CDPInd, LLDPInd, SerialInd, SwitchFwdInd, RevSwitchFwdInd, DirectEthernetInd, IPRoutedInd, StaticRoutedInd, LocalRoutedInd, ProtoRoutedInd, BGPRoutedInd, OSPFRoutedInd, IGRPRoutedInd, NetworkDeviceInd, NeighborNetworkDeviceInd, CDPNeighborID, LLDPNeighborID. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return neighbors: An array of the Neighbor objects that match the specified input criteria.
:rtype neighbors: Array of Neighbor
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def interface(self, **kwargs):
"""The source interface in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The source interface in this neighbor relationship.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("interface"), kwargs)
def neighbor_device(self, **kwargs):
"""The destination device in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The destination device in this neighbor relationship.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("neighbor_device"), kwargs)
def neighbor_interface(self, **kwargs):
"""The destination interface in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The destination interface in this neighbor relationship.
:rtype : Interface
"""
return self.api_request(self._get_method_fullname("neighbor_interface"), kwargs)
def infradevice(self, **kwargs):
"""The source device in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The source device in this neighbor relationship.
:rtype : InfraDevice
"""
return self.api_request(self._get_method_fullname("infradevice"), kwargs)
def network_id(self, **kwargs):
"""The Network View ID assigned to this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The Network View ID assigned to this neighbor relationship.
:rtype : Integer
"""
return self.api_request(self._get_method_fullname("network_id"), kwargs)
def device(self, **kwargs):
"""The source device in this neighbor relationship.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param NeighborID: The internal NetMRI identifier for this neighbor relationship.
:type NeighborID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return : The source device in this neighbor relationship.
:rtype : Device
"""
return self.api_request(self._get_method_fullname("device"), kwargs)
def get_n_hops_neighbors(self, **kwargs):
"""Determines the devices that can be reached from a starting device through a given number of Level 2 connections.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_id: The Device ID of the starting device.
:type device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param number_of_hops: The number of hops to make to find the path.
:type number_of_hops: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("get_n_hops_neighbors"), kwargs)
def get_l2_reachability(self, **kwargs):
"""Determine the devices that can be reached from a starting device all Level 2 connections.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_id: The Device ID of the starting device.
:type device_id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("get_l2_reachability"), kwargs)
def get_path(self, **kwargs):
"""Calculates a path between 2 devices.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param start_device_id: The Device ID of the starting device.
:type start_device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param end_device_id: The Device ID of the ending device.
:type end_device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param end_ip_address: The IP Address of the ending device.
:type end_ip_address: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` l2l3
:param path_type: The path level (l2, l3, or l2l3)
:type path_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param default_gateway: The default gateway
:type default_gateway: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param virtual_network_id: The Network View ID of the starting device.
:type virtual_network_id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("get_path"), kwargs)
def get_topology_data(self, **kwargs):
"""Returns topology relationship information. The topology contains device and relationship information. This method only supports XML and JSON output.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the device groups to which to limit the results.
:type GroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param topology_type: The type of topology graph to be generated. Possible values are Network, L2NHop, L3NHop, L2L3MostLikelyPath, L3MostLikelyPath, VLAN, CalculatedPath.
:type topology_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param view_type: The type of view to be generated. Possible values are Aggregate, LinkDiscoveryProtocols, SerialLinks, or SwitchForwarding. Required when topology_type is "Network" or "L2NHop"
:type view_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param protocol: Possible values are All, BGP, IGRP, Local, or OSPF. Required when topology_type is "L3NHop".
:type protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_end_hosts_ind: Flag to return end host data, or not.
:type show_end_hosts_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_issues_ind: Flag to return issue count data, or not, for each device.
:type show_issues_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_changes_ind: Flag to return change count data, or not, for each device.
:type show_changes_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_device_groups_ind: Flag to include a comma separated list of device groups for each device.
:type show_device_groups_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_vlans_ind: Flag to include a comma separated list of vlans for each relationship.
:type show_vlans_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_policy_violations_ind: Flag to return policy violation data, or not, for each device.
:type show_policy_violations_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_networks_ind: Flag to show or hide the virtual network IDs on the topology relationships.
:type show_networks_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param hop_count: Possible values are 1, 2, 3, 4. When running against L2NHop, a value of 'all' is accepted. Required when topology_type is "L2NHop" or "L3NHop".
:type hop_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_device_id: The internal NetMRI identifier for the device for which to start from. Required when topology_type is "L2NHop", "L3NHop", "L2L3MostLikelyPath", or "L3MostLikelyPath".
:type start_device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param path_calculation_id: The internal NetMRI identifier for the calculated paths to display. Required when topology_type is "CalculatedPath".
:type path_calculation_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param path_analysis_id: The internal NetMRI identifier for the specific calculated path to display. This ID is combined with the path_calculation_id to find a specific path. Required when topology_type is "CalculatedPath".
:type path_analysis_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param path_id: The id for the specific calculated path to display. This ID is combined with the path_calculation_id to find a specific path.
:type path_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param target_device_id: The internal NetMRI identifier for the device for which to end. Required when topology_type is "L2L3MostLikelyPath" or "L3MostLikelyPath".
:type target_device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param target_device_ip_address: The IP Address for the device for which to end. Required when topology_type is "L2L3MostLikelyPath" or "L3MostLikelyPath".
:type target_device_ip_address: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0.0.0.0
:param gateway: The Gateway for the device for which to end. Required when topology_type is "L2L3MostLikelyPath" or "L3MostLikelyPath".
:type gateway: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param vlan_id: The internal NetMRI identifier for the VLAN to follow. Required when topology_type is "VLAN".
:type vlan_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_network_id: For L2/L3 Most Likely Path and L3 Most Likely Path, the user may submit a network_id with the request, to specify the virtual network.
:type virtual_network_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19.
:type limit: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("get_topology_data"), kwargs)
def get_topology_relationship_count(self, **kwargs):
"""Returns a count of relationships from the NetMRI topology.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param GroupID: The internal NetMRI identifier for the device groups to which to limit the results.
:type GroupID: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param topology_type: The type of topology graph to be generated. Possible values are Network, L2NHop, L3NHop, L2L3MostLikelyPath, L3MostLikelyPath, VLAN.
:type topology_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param view_type: The type of view to be generated. Possible values are Aggregate, LinkDiscoveryProtocols, SerialLinks, or SwitchForwarding. Required when topology_type is "Network" or "L2NHop"
:type view_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param protocol: Possible values are All, BGP, IGRP, Local, or OSPF. Required when topology_type is "L3NHop".
:type protocol: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param show_end_hosts_ind: Flag to count end host relationships, or not.
:type show_end_hosts_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param hop_count: Possible values are 1, 2, 3, 4. When running against L2NHop, a value of 'all' is accepted. Required when topology_type is "L2NHop" or "L3NHop".
:type hop_count: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_device_id: The internal NetMRI identifier for the device for which to start from. Required when topology_type is "L2NHop", "L3NHop", "L2L3MostLikelyPath", or "L3MostLikelyPath".
:type start_device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param virtual_network_id: The internal NetMRI identifier for the Network View for which to start from. Required when topology_type is "L2NHop", "L3NHop", "L2L3MostLikelyPath", or "L3MostLikelyPath".
:type virtual_network_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param target_device_id: The internal NetMRI identifier for the device for which to end. Required when topology_type is "L2L3MostLikelyPath" or "L3MostLikelyPath".
:type target_device_id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param target_device_ip_address: The IP Address for the device for which to end. Required when topology_type is "L2L3MostLikelyPath" or "L3MostLikelyPath".
:type target_device_ip_address: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0.0.0.0
:param gateway: The Gateway for the device for which to end. Required when topology_type is "L2L3MostLikelyPath" or "L3MostLikelyPath".
:type gateway: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param vlan_id: The internal NetMRI identifier for the VLAN to follow. Required when topology_type is "VLAN".
:type vlan_id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("get_topology_relationship_count"), kwargs)
|
venthur/pyff
|
refs/heads/master
|
src/lib/ipc.py
|
3
|
# ipc.py -
# Copyright (C) 2009 Bastian Venthur
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Inter Process Communication.
this module provides classes to ease the inter process communication (IPC)
between the Feedback Controller and the Feedbacks
"""
import asyncore
import asynchat
import socket
import cPickle as pickle
import logging
import bcixml
# delimiter for IPC messages.
TERMINATOR = "\r\n\r\n"
# Port for IPC connections
IPC_PORT = 12347
LOCALHOST = "127.0.0.1"
import thread
def ipcloop():
"""Start the IPC loop."""
asyncore.loop()
def get_feedbackcontroller_connection():
"""Return a connection to the Feedback Controller."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((LOCALHOST, IPC_PORT))
return sock
class IPCConnectionHandler(asyncore.dispatcher):
"""Waits for incoming connection requests and dispatches a
FeedbackControllerIPCChannel.
"""
def __init__(self, fc):
asyncore.dispatcher.__init__(self)
self.logger = logging.getLogger("IPCConnectionHandler")
self.conn = None
self.addr = None
self.ipcchan = None
self.fc = fc
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.bind((LOCALHOST, IPC_PORT))
self.listen(5)
def handle_accept(self):
"""Handle incoming connection from Feedback."""
self.logger.debug("Accepting.")
self.conn, self.addr = self.accept()
self.ipcchan = FeedbackControllerIPCChannel(self.conn, self.fc)
def handle_close(self):
"""Handle closing of connection."""
self.logger.debug("Closing.")
self.ipcchan = None
def handle_error(self):
"""Handle error."""
self.logger.error("Some error occurred, ignoring it.")
def send_message(self, message):
"""Send the message via the currently open connection."""
if self.ipcchan:
self.ipcchan.send_message(message)
else:
raise Exception("No open IPC channel available.")
def close_channel(self):
"""Close the channel to the Feedback."""
self.logger.debug("Closing channel to Feedback.")
self.ipcchan.close()
class IPCChannel(asynchat.async_chat):
"""IPC Channel.
Base for the channels, the Feedback Controller and the Feedbacks need.
This Class transparently takes care of de-/serialization of the data which
goes through the IPC. Derived classes should implement::
handle_message(self, message)
to do something useful and use::
send_message(self, message)
for sending messages via IPC.
"""
def __init__(self, conn):
"""Initialize the Channel, set terminator and clear input buffer."""
asynchat.async_chat.__init__(self, conn)
self.logger = logging.getLogger("IPCChannel")
self.set_terminator(TERMINATOR)
# input buffer
self.ibuf = ""
def collect_incoming_data(self, data):
"""Append incoming data to input buffer.
:param data: Incoming data
"""
self.ibuf += data
def found_terminator(self):
"""Process message from peer."""
dump = self.ibuf
self.ibuf = ""
ipcmessage = pickle.loads(dump)
try:
self.handle_message(ipcmessage)
except:
self.logger.exception("Handling an ICP message caused an exception:")
def send_message(self, message):
"""Send message to peer.
:param message: Message
"""
dump = pickle.dumps(message, protocol=pickle.HIGHEST_PROTOCOL)
dump += TERMINATOR
self.push(dump)
def handle_close(self):
"""Handle closing of connection."""
self.logger.debug("Closing Connection.")
asynchat.async_chat.handle_close(self)
def handle_message(self, message):
"""Do something with the received message.
This method should be overwritten by derived classes.
:param message: Message
"""
pass
class FeedbackControllerIPCChannel(IPCChannel):
"""IPC Channel for Feedback Contoller's end."""
def __init__(self, conn, fc):
IPCChannel.__init__(self, conn)
self.fc = fc
def handle_message(self, message):
"""Handle message from Feedback.
:param message: Message
"""
self.fc.handle_signal(message)
class FeedbackIPCChannel(IPCChannel):
"""IPC Channel for Feedback's end."""
def __init__(self, conn, feedback):
IPCChannel.__init__(self, conn)
self.feedback = feedback
def handle_message(self, message):
"""Handle message from Feedback Controller.
:param message: Message
"""
self.feedback.logger.debug("Processing signal")
if message.type == bcixml.CONTROL_SIGNAL:
self.feedback._on_control_event(message.data)
return
cmd = message.commands[0][0] if len(message.commands) > 0 else None
if cmd == bcixml.CMD_GET_VARIABLES:
reply = bcixml.BciSignal({"variables" : self.feedback._get_variables()}, None,
bcixml.REPLY_SIGNAL)
reply.peeraddr = message.peeraddr
self.feedback.logger.debug("Sending variables")
self.send_message(reply)
self.feedback._on_interaction_event(message.data)
if cmd == bcixml.CMD_PLAY:
self.feedback._playEvent.set()
elif cmd == bcixml.CMD_PAUSE:
self.feedback._on_pause()
elif cmd == bcixml.CMD_STOP:
self.feedback._on_stop()
elif cmd == bcixml.CMD_QUIT:
self.feedback._on_quit()
elif cmd == bcixml.CMD_SEND_INIT:
self.feedback._on_init()
elif cmd == bcixml.CMD_SAVE_VARIABLES:
filename = message.commands[0][1]['filename']
self.feedback.save_variables(filename)
elif cmd == bcixml.CMD_LOAD_VARIABLES:
filename = message.commands[0][1]['filename']
self.feedback.load_variables(filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.