repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
amisrs/one-eighty
|
refs/heads/master
|
angular_flask/lib/python2.7/site-packages/packaging/requirements.py
|
140
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pyparsing import Literal as L # noqa
from six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
|
SECURED-FP7/secured-webgui
|
refs/heads/master
|
webContent/psarClient.py
|
1
|
#!/usr/bin/python
from requests import get, put, delete, patch, post
import urllib
from keystoneclient.v2_0 import client
import argparse,json, os
'''
Client for the PSAR API. All methods return a Response object.
TO-DO: Currently only tested without authentication.
'''
class Client:
def __init__(self,base_url):
self.base_url=base_url+'/v1/'
def get_token(self,user,password,tenant,auth_URL):
keystone = client.Client(username=user, password=password, tenant_name=tenant, auth_url=auth_URL)
return keystone.get_raw_token_from_identity_service(auth_url=auth_URL,username=user, password=password,tenant_name=tenant).auth_token
def add_token_param(self,token,first_param): #TO-DO:Change authentication so that it goes on the header instead of url
return ('?' if first_param else '&')+urllib.urlencode({'auth_token':token})
#Status
def get_status(self,token=None):
url = self.base_url+'status'
params={}
if token:
params['token']=token
return get(url,params=params)
#PSA
def create_psa(self, name=None, token=None, id=None, manifest_id=None, plugin_id=None, cost=None, latency=None, rating=None, is_generic=None):
url = self.base_url+'PSA/images/'
params={}
if token:
params['token']=token
if name:
params['name']=name
if id:
params['id']=id
if manifest_id:
params['manifest_id']=manifest_id
if plugin_id:
params['plugin_id']=plugin_id
if cost:
params['cost']=cost
if latency:
params['latency']=latency
if rating:
params['rating']=rating
if is_generic:
params['is_generic']=is_generic
return post(url,params=params)
def delete_psa(self,psa_id,token=None):
url = self.base_url+'PSA/images/'+psa_id+'/'
params={}
if token:
params['token']=token
return delete(url,params=params)
def get_image_list(self, id=None,token=None, is_generic=None):
url = self.base_url+'PSA/images/'
params={}
if token:
params['token']=token
if id:
params['id']=id
if is_generic:
params['is_generic']=is_generic
if is_generic == False:
params['is_generic']=is_generic
return get(url,params=params)
#Manifest
def get_manifest(self, psa_id,path,token=None):
url = self.base_url+'PSA/manifest/'+psa_id
params={}
if token:
params['token']=token
r=get(url, params=params)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return r
def delete_manifest (self,psa_id,token=None):
url = self.base_url+'PSA/manifest/'+psa_id+'/file'
params={}
if token:
params['token']=token
return delete(url, params=params)
def put_manifest_file(self,psa_id,path,token=None):
with open(path,'rb') as f:
url=self.base_url+'PSA/manifest/'+str(psa_id)+'/file'
params={}
if token:
params['token']=token
files={'file':f}
return put(url,files=files, params=params)
#Images
def get_image_file(self,psa_id, path,token=None):
url = self.base_url+'PSA/images/'+psa_id+'/file'
params={}
if token:
params['token']=token
r=get(url, params=params)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return r
def put_image_file(self, psa_id, path, disk_format, container_format,token=None):
url=self.base_url+'PSA/images/'+psa_id+'/file?'+urllib.urlencode({"disk_format":disk_format,'container_format':container_format})
params={}
if token:
params['token']=token
with open (path, 'rb') as f:
files={'file':f}
headers={'Content-Type':'application/octet-stream','Content-Disposition':'attachment; filename='+psa_id}
return put(url,data=f,headers=headers, params=params)
#return put(url,files=files,headers=headers)
def put_image(self,psa_id,name=None,token=None,cost=None,latency=None,rating=None,
is_generic=None,owner=None,psa_description=None):
url = self.base_url+'PSA/images/'+psa_id+'/'
params={}
if token is not None:
params['token']=token
if name is not None:
params['name']=name
if cost is not None:
params['cost']=cost
if latency is not None:
params['latency']=latency
if rating is not None:
params['rating']=rating
if is_generic is not None:
params['is_generic']=is_generic
if owner is not None:
params['owner']=owner
if psa_description is not None:
params['psa_description']=psa_description
return put(url, params=params)
def delete_image(self, psa_id,token=None):
url = self.base_url+'PSA/images/'+psa_id+'/file'
params={}
if token:
params['token']=token
return delete(url, params=params)
def get_image_location (self,psa_id,token=None):
url = self.base_url+'PSA/images/'+psa_id+'/image_location'
params={}
if token:
params['token']=token
return get(url, params=params)
def patch_image(self, psa_id, new_status,token=None):
url = self.base_url+'PSA/images/'+psa_id+'/?status='+new_status
params={}
if token:
params['token']=token
return patch(url, params=params)
#Plugin
def get_plugin_file(self,psa_id,path,token=None):
url =self.base_url+'PSA/M2Lplugins/'+psa_id+'/'
params={}
if token:
params['token']=token
r=get(url, params=params)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
f.write(chunk)
return r
def put_plugin_file(self,psa_id,path,token=None):
url=self.base_url+'PSA/M2Lplugins/'+psa_id+'/file'
params={}
if token:
params['token']=token
with open(path,'rb') as f:
files={'file':f}
return put(url,params=params,files=files)
def put_plugin(self, psa_id,name=None,new_url=None,token=None):
url=self.base_url+'PSA/M2Lplugins/'+psa_id+'/'
params={}
if token:
params['token']=token
if name:
params['name']=name
if new_url:
params['new_url']=new_url
return put(url, params=params)
def delete_plugin(self, psa_id,token=None):
url = self.base_url+'PSA/M2Lplugins/'+psa_id+'/file'
params={}
if token:
params['token']=token
return delete(url, params=params)
def get_plugin_location (self, psa_id,token=None):
url = self.base_url+'PSA/M2Lplugins/'+psa_id+'/plugin_location'
params={}
if token:
params['token']=token
return get(url, params=params)
def get_psa_opt_par (self, psa_id, token=None):
url = self.base_url + 'PSA/opt_par/' + psa_id + '/'
params={}
if token:
params['token']=token
return get(url, params=params)
def get_psa_capabilities (self, psa_id, token=None):
url = self.base_url + 'PSA/capabilities/' + psa_id + '/'
params={}
if token:
params['token']=token
return get(url, params=params)
#PSARL
def put_psarl_location(self, psarl_id,new_location,token=None):
url = self.base_url+'PSARLs/'+psarl_id+'/?location='+new_location
params={}
if token:
params['token']=token
return put(url, params=params)
if __name__=='__main__':
#TO-DO: Take arguments (such as the url of the psar) from environment
#Functions
PSAR_URL=os.getenv('PSAR_URL','http://195.235.93.146:8080')
def list_psa(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
if args.id:
r=c.get_image_list(token=args.token)
else:
r=c.get_image_list(token=args.token,id=args.id)
data=json.loads(r.content)
print json.dumps(data,sort_keys=True,indent=4,separators=(',',':'))
def download_image(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.get_image_file(path=args.path,psa_id=args.id,token=args.token)
def upload_image(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.put_image_file(psa_id=args.id,path=args.path,disk_format=args.disk_format,container_format=args.container_format,token=args.token)
def delete_image(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.delete_image(psa_id=args.id,token=args.token)
def download_manifest(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
r=c.get_manifest(psa_id=args.id,token=args.token)
with open(args.path,'w') as f:
f.write(r.content)
def upload_manifest(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.put_manifest_file(psa_id=str(args.id),path=args.path,token=args.token)
def delete_manifest(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.delete_manifest_file(psa_id=args.id,token=args.token)
def download_plugin(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.get_plugin_file(path=args.path,psa_id=args.id,token=args.token)
def upload_plugin(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.put_plugin_file(psa_id=args.id,path=args.path,token=args.token)
def delete_plugin(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
c.delete_plugin_file(psa_id=args.id,token=args.token)
def create_psa(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
r=c.create_psa()
print r.text
def delete_psa(args):
if args.url:
c=Client(args.url)
else:
c=Client(PSAR_URL)
r=c.delete_psa(args.id)
#General
parser=argparse.ArgumentParser(description="Command line client for the PSAR API")
subparsers = parser.add_subparsers(help='groups')
create_parser=subparsers.add_parser('create',help='Creates a new empty PSA')
create_parser.add_argument('--url',action='store', help='URL of the PSAR')
create_parser.add_argument('--token',action='store', help='Authentication token')
create_parser.set_defaults(func=create_psa)
delete_parser=subparsers.add_parser('delete',help='Deletes the PSA')
delete_parser.add_argument('--url',action='store', help='URL of the PSAR')
delete_parser.add_argument('id',action='store', help='ID of the PSA to delete')
delete_parser.add_argument('--token',action='store', help='Authentication token')
delete_parser.set_defaults(func=delete_psa)
list_parser = subparsers.add_parser('list', help='List PSAs stored')
list_parser.add_argument('--id', action='store', help='Returns only matching PSAs')
list_parser.add_argument('--url',action='store', help='URL of the PSAR')
list_parser.add_argument('--token',action='store', help='Authentication token')
list_parser.set_defaults(func=list_psa)
#Images
image_parser=subparsers.add_parser('image',help='Interacts with the images')
image_subparser=image_parser.add_subparsers(help='commands')
# Download
download_image_parser=image_subparser.add_parser('download',help='Download an image')
download_image_parser.add_argument('id', action='store', help='PSA to download')
download_image_parser.add_argument('path', action='store', help='Path')
download_image_parser.add_argument('--url',action='store', help='URL of the PSAR')
download_image_parser.add_argument('--token',action='store', help='Authentication token')
download_image_parser.set_defaults(func=download_image)
# Upload
upload_image_parser=image_subparser.add_parser('upload', help='Upload an image')
upload_image_parser.add_argument('id', action='store', help='PSA to upload')
upload_image_parser.add_argument('path', action='store', help='Path')
upload_image_parser.add_argument('--disk_format', required=True, choices=['qcow2', 'raw', 'vhd', 'vmdk', 'vdi', 'iso', 'aki','ari','ami'] , action='store', help='Disk format')
upload_image_parser.add_argument('--container_format', required=True, choices=['bare', 'ovf', 'aki', 'ari', 'ami'], action='store', help='Container format')
upload_image_parser.add_argument('--name', action='store', help='Name')
upload_image_parser.add_argument('--status', action='store', help='status')
upload_image_parser.add_argument('--manifest_id', action='store', help='Manifest ID')
upload_image_parser.add_argument('--storage_id', action='store', help='Storage ID')
upload_image_parser.add_argument('--plugin_id', action='store', help='Plugin ID')
upload_image_parser.add_argument('--url',action='store', help='URL of the PSAR')
upload_image_parser.add_argument('--token',action='store', help='Authentication token')
upload_image_parser.set_defaults(func=upload_image)
# Delete
delete_image_parser=image_subparser.add_parser('delete', help='Delete an image')
delete_image_parser.add_argument('id', action='store', help='PSA to delete')
delete_image_parser.add_argument('--url',action='store', help='URL of the PSAR')
delete_image_parser.add_argument('--token',action='store', help='Authentication token')
delete_image_parser.set_defaults(func=delete_image)
#Manifest
manifest_parser=subparsers.add_parser('manifest', help='Interacts with the manifests')
manifest_subparser=manifest_parser.add_subparsers(help='commands')
# Download
download_manifest_parser=manifest_subparser.add_parser('download',help='Download a manifest')
download_manifest_parser.add_argument('id', action='store', help='ID of the PSA which manifest is to be downloaded')
download_manifest_parser.add_argument('path', action='store', help='Path')
download_manifest_parser.add_argument('--url',action='store', help='URL of the PSAR')
download_manifest_parser.add_argument('--token',action='store', help='Authentication token')
download_manifest_parser.set_defaults(func=download_manifest)
# Upload
upload_manifest_parser=manifest_subparser.add_parser('upload', help='Upload a manifest')
upload_manifest_parser.add_argument('id', action='store', help='ID of the PSA which manifest is to be uploaded')
upload_manifest_parser.add_argument('path', action='store', help='Path')
upload_manifest_parser.add_argument('--url',action='store', help='URL of the PSAR')
upload_manifest_parser.add_argument('--token',action='store', help='Authentication token')
upload_manifest_parser.set_defaults(func=upload_manifest)
# Delete
delete_manifest_parser=manifest_subparser.add_parser('delete', help='Delete a manifest')
delete_manifest_parser.add_argument('id', action='store', help='ID of the PSA which manifest is to be deleted')
delete_manifest_parser.add_argument('--url',action='store', help='URL of the PSAR')
delete_manifest_parser.add_argument('--token',action='store', help='Authentication token')
delete_manifest_parser.set_defaults(func=delete_manifest)
#Plugins
plugin_parser=subparsers.add_parser('plugin', help='Interacts with the plugins')
plugin_subparser=plugin_parser.add_subparsers(help='commands')
# Download
download_plugin_parser=plugin_subparser.add_parser('download',help='Download a plugin')
download_plugin_parser.add_argument('id', action='store', help='PSA to download')
download_plugin_parser.add_argument('path', action='store', help='Path')
download_plugin_parser.add_argument('--url',action='store', help='URL of the PSAR')
download_plugin_parser.add_argument('--token',action='store', help='Authentication token')
download_plugin_parser.set_defaults(func=download_plugin)
# Upload
upload_plugin_parser=plugin_subparser.add_parser('upload', help='Upload a plugin')
upload_plugin_parser.add_argument('id', action='store', help='PSA to upload')
upload_plugin_parser.add_argument('path', action='store', help='Path')
upload_plugin_parser.set_defaults(func=upload_plugin)
upload_plugin_parser.add_argument('--url',action='store', help='URL of the PSAR')
upload_plugin_parser.add_argument('--token',action='store', help='Authentication token')
# Delete
delete_plugin_parser=plugin_subparser.add_parser('delete', help='Delete a plugin')
delete_plugin_parser.add_argument('id', action='store', help='PSA to delete')
delete_plugin_parser.add_argument('--url',action='store', help='URL of the PSAR')
delete_plugin_parser.add_argument('--token',action='store', help='Authentication token')
delete_plugin_parser.set_defaults(func=delete_plugin)
args= parser.parse_args()
args.func(args)
|
bgxavier/nova
|
refs/heads/master
|
nova/virt/disk/vfs/__init__.py
|
129
|
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Operations on virtual filesystems
"""
|
flgiordano/netcash
|
refs/heads/master
|
+/google-cloud-sdk/lib/surface/sql/ssl_certs/list.py
|
1
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Lists all SSL certs for a Cloud SQL instance."""
from googlecloudsdk.api_lib.sql import errors
from googlecloudsdk.api_lib.sql import validate
from googlecloudsdk.calliope import base
from googlecloudsdk.core import list_printer
class _BaseList(object):
"""Base class for sql ssl_certs list."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
pass
@errors.ReraiseHttpException
def Run(self, args):
"""Lists all SSL certs for a Cloud SQL instance.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A dict object that has the list of sslCerts resources if the api request
was successful.
Raises:
HttpException: A http error response was received while executing api
request.
ToolException: An error other than http error occured while executing the
command.
"""
sql_client = self.context['sql_client']
sql_messages = self.context['sql_messages']
resources = self.context['registry']
validate.ValidateInstanceName(args.instance)
instance_ref = resources.Parse(args.instance, collection='sql.instances')
result = sql_client.sslCerts.List(sql_messages.SqlSslCertsListRequest(
project=instance_ref.project,
instance=instance_ref.instance))
return iter(result.items)
def Display(self, unused_args, result):
list_printer.PrintResourceList('sql.sslCerts', result)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class List(_BaseList, base.Command):
"""Lists all SSL certs for a Cloud SQL instance."""
pass
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class ListBeta(_BaseList, base.Command):
"""Lists all SSL certs for a Cloud SQL instance."""
pass
|
chrissmejia/SpreadLabs
|
refs/heads/master
|
clients/asap_to/tests.py
|
15
|
"""
SpreadLabs - Social media suite
Copyright (C) 2015 Christopher Mejia Montoya - me@chrissmejia.com - chrissmejia.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
|
hazrpg/calibre
|
refs/heads/master
|
src/calibre/utils/imghdr.py
|
14
|
"""Recognize image file formats based on their first few bytes."""
__all__ = ["what"]
# -------------------------#
# Recognize image headers #
# -------------------------#
def what(file, h=None):
if h is None:
if isinstance(file, basestring):
f = open(file, 'rb')
h = f.read(150)
else:
location = file.tell()
h = file.read(150)
file.seek(location)
f = None
else:
f = None
try:
for tf in tests:
res = tf(h, f)
if res:
return res
finally:
if f:
f.close()
# There exist some jpeg files with no headers, only the starting two bits
# If we cannot identify as anything else, identify as jpeg.
if h[:2] == b'\xff\xd8':
return 'jpeg'
return None
# ---------------------------------#
# Subroutines per image file type #
# ---------------------------------#
tests = []
def test_jpeg(h, f):
"""JPEG data in JFIF format (Changed by Kovid to mimic the file utility,
the original code was failing with some jpegs that included ICC_PROFILE
data, for example: http://nationalpostnews.files.wordpress.com/2013/03/budget.jpeg?w=300&h=1571)"""
if (h[6:10] in (b'JFIF', b'Exif')) or (h[:2] == b'\xff\xd8' and (b'JFIF' in h[:32] or b'8BIM' in h[:32])):
return 'jpeg'
tests.append(test_jpeg)
def test_png(h, f):
if h[:8] == "\211PNG\r\n\032\n":
return 'png'
tests.append(test_png)
def test_gif(h, f):
"""GIF ('87 and '89 variants)"""
if h[:6] in ('GIF87a', 'GIF89a'):
return 'gif'
tests.append(test_gif)
def test_tiff(h, f):
"""TIFF (can be in Motorola or Intel byte order)"""
if h[:2] in ('MM', 'II'):
return 'tiff'
tests.append(test_tiff)
def test_webp(h, f):
if h[:4] == b'RIFF' and h[8:12] == b'WEBP':
return 'webp'
tests.append(test_webp)
def test_rgb(h, f):
"""SGI image library"""
if h[:2] == '\001\332':
return 'rgb'
tests.append(test_rgb)
def test_pbm(h, f):
"""PBM (portable bitmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
return 'pbm'
tests.append(test_pbm)
def test_pgm(h, f):
"""PGM (portable graymap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
return 'pgm'
tests.append(test_pgm)
def test_ppm(h, f):
"""PPM (portable pixmap)"""
if len(h) >= 3 and \
h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
return 'ppm'
tests.append(test_ppm)
def test_rast(h, f):
"""Sun raster file"""
if h[:4] == '\x59\xA6\x6A\x95':
return 'rast'
tests.append(test_rast)
def test_xbm(h, f):
"""X bitmap (X10 or X11)"""
s = '#define '
if h[:len(s)] == s:
return 'xbm'
tests.append(test_xbm)
def test_bmp(h, f):
if h[:2] == 'BM':
return 'bmp'
tests.append(test_bmp)
def test_emf(h, f):
if h[:4] == b'\x01\0\0\0' and h[40:44] == b' EMF':
return 'emf'
tests.append(test_emf)
def test_svg(h, f):
if (h[:2] == b'<?' and h[2:5].lower() == 'xml' and b'<svg' in h) or h.startswith(b'<svg'):
return 'svg'
tests.append(test_svg)
# --------------------#
# Small test program #
# --------------------#
def test():
import sys
recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2]
recursive = 1
try:
if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1)
else:
testall(['.'], recursive, 1)
except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n')
sys.exit(1)
def testall(list, recursive, toplevel):
import sys
import os
for filename in list:
if os.path.isdir(filename):
print filename + '/:',
if recursive or toplevel:
print 'recursing down:'
import glob
names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0)
else:
print '*** directory (use -r) ***'
else:
print filename + ':',
sys.stdout.flush()
try:
print what(filename)
except IOError:
print '*** not found ***'
|
kanagasabapathi/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Lib/ctypes/test/test_checkretval.py
|
137
|
import unittest
from ctypes import *
class CHECKED(c_int):
def _check_retval_(value):
# Receives a CHECKED instance.
return str(value.value)
_check_retval_ = staticmethod(_check_retval_)
class Test(unittest.TestCase):
def test_checkretval(self):
import _ctypes_test
dll = CDLL(_ctypes_test.__file__)
self.assertEqual(42, dll._testfunc_p_p(42))
dll._testfunc_p_p.restype = CHECKED
self.assertEqual("42", dll._testfunc_p_p(42))
dll._testfunc_p_p.restype = None
self.assertEqual(None, dll._testfunc_p_p(42))
del dll._testfunc_p_p.restype
self.assertEqual(42, dll._testfunc_p_p(42))
try:
oledll
except NameError:
pass
else:
def test_oledll(self):
self.assertRaises(WindowsError,
oledll.oleaut32.CreateTypeLib2,
0, None, None)
if __name__ == "__main__":
unittest.main()
|
h3biomed/ansible
|
refs/heads/h3
|
lib/ansible/modules/network/junos/junos_l2_interface.py
|
26
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_l2_interface
version_added: "2.5"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage Layer-2 interface on Juniper JUNOS network devices
description:
- This module provides declarative management of Layer-2 interface
on Juniper JUNOS network devices.
options:
name:
description:
- Name of the interface excluding any logical unit number.
description:
description:
- Description of Interface.
aggregate:
description:
- List of Layer-2 interface definitions.
mode:
description:
- Mode in which interface needs to be configured.
choices: ['access', 'trunk']
access_vlan:
description:
- Configure given VLAN in access port. The value of C(access_vlan) should
be vlan name.
trunk_vlans:
description:
- List of VLAN names to be configured in trunk port. The value of C(trunk_vlans) should
be list of vlan names.
native_vlan:
description:
- Native VLAN to be configured in trunk port. The value of C(native_vlan)
should be vlan id.
enhanced_layer:
description:
- True if your device has Enhanced Layer 2 Software (ELS).
default: True
type: bool
version_added: "2.7"
unit:
description:
- Logical interface number. Value of C(unit) should be of type
integer.
default: 0
filter_input:
description:
- The name of input filter of ethernet-switching.
version_added: "2.8"
filter_output:
description:
- The name of output filter of ethernet-switching.
version_added: "2.8"
state:
description:
- State of the Layer-2 Interface configuration.
default: present
choices: ['present', 'absent',]
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: Configure interface in access mode
junos_l2_interface:
name: ge-0/0/1
description: interface-access
mode: access
access_vlan: red
active: True
state: present
- name: Configure interface in trunk mode
junos_l2_interface:
name: ge-0/0/1
description: interface-trunk
mode: trunk
trunk_vlans:
- blue
- green
native_vlan: 100
active: True
state: present
- name: Configure interface in access and trunk mode using aggregate
junos_l2_interface:
aggregate:
- name: ge-0/0/1
description: test-interface-access
mode: access
access_vlan: red
- name: ge-0/0/2
description: test-interface-trunk
mode: trunk
trunk_vlans:
- blue
- green
native_vlan: 100
active: True
state: present
"""
RETURN = """
diff:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: str
sample: >
[edit interfaces]
+ ge-0/0/1 {
+ description "l2 interface configured by Ansible";
+ unit 0 {
+ family ethernet-switching {
+ interface-mode access;
+ vlan {
+ members red;
+ }
+ }
+ }
+ }
"""
import collections
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.junos.junos import junos_argument_spec, tostring
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config, to_param_list
USE_PERSISTENT_CONNECTION = True
def validate_vlan_id(value, module):
if value and not 0 <= value <= 4094:
module.fail_json(msg='vlan_id must be between 1 and 4094')
def validate_param_values(module, obj, param=None):
if not param:
param = module.params
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(param.get(key), module)
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
mode=dict(choices=['access', 'trunk']),
access_vlan=dict(),
native_vlan=dict(type='int'),
trunk_vlans=dict(type='list'),
unit=dict(default=0, type='int'),
filter_input=dict(),
filter_output=dict(),
description=dict(),
enhanced_layer=dict(default=True, type='bool'),
state=dict(default='present', choices=['present', 'absent']),
active=dict(default=True, type='bool')
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate'],
['access_vlan', 'trunk_vlans'],
['access_vlan', 'native_vlan']]
required_if = [('mode', 'access', ('access_vlan',)),
('mode', 'trunk', ('trunk_vlans',))]
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, mutually_exclusive=mutually_exclusive, required_if=required_if),
)
argument_spec.update(element_spec)
argument_spec.update(junos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
required_one_of=required_one_of,
required_if=required_if)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'interfaces/interface'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('name', {'xpath': 'name', 'is_key': True}),
('unit', {'xpath': 'name', 'top': 'unit', 'is_key': True}),
('mode', {'xpath': 'interface-mode', 'top': 'unit/family/ethernet-switching'}),
('access_vlan', {'xpath': 'members', 'top': 'unit/family/ethernet-switching/vlan'}),
('trunk_vlans', {'xpath': 'members', 'top': 'unit/family/ethernet-switching/vlan'}),
('filter_input', {'xpath': 'input', 'top': 'unit/family/ethernet-switching/filter'}),
('filter_output', {'xpath': 'output', 'top': 'unit/family/ethernet-switching/filter'}),
('native_vlan', {'xpath': 'native-vlan-id'}),
('description', 'description')
])
params = to_param_list(module)
requests = list()
for param in params:
# if key doesn't exist in the item, get it from module.params
for key in param:
if param.get(key) is None:
param[key] = module.params[key]
item = param.copy()
validate_param_values(module, param_to_xpath_map, param=item)
param_to_xpath_map['mode']['xpath'] = \
'interface-mode' if param['enhanced_layer'] else 'port-mode'
want = map_params_to_obj(module, param_to_xpath_map, param=item)
requests.append(map_obj_to_ele(module, want, top, param=item))
diff = None
with locked_config(module):
for req in requests:
diff = load_config(module, tostring(req), warnings, action='replace')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
|
jart/tensorflow
|
refs/heads/master
|
tensorflow/contrib/autograph/pyct/compiler.py
|
1
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converting AST to code.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# TODO(mdan): Use six for compatibility here.
import atexit
import imp
import os
import tempfile
import astor
import gast
from tensorflow.contrib.autograph.pyct import anno
from tensorflow.contrib.autograph.pyct import ast_util
from tensorflow.contrib.autograph.pyct import origin_info
from tensorflow.contrib.autograph.pyct import parser
def _build_source_map(node, code):
"""Return the Python objects represented by given AST.
Compiling the AST code this way ensures that the source code is readable by
e.g. `pdb` or `inspect`.
Args:
node: An AST node of the original generated code, before the source code is
generated.
code: The string representation of the source code for the newly generated
code.
Returns:
Dict[CodeLocation, OriginInfo], a mapping between the user and AutoGraph
generated code.
"""
# After we have the final generated code we reparse it to get the final line
# numbers. Then we walk through the generated and original ASTs in parallel
# to build the mapping between the user and generated code.
new_node = parser.parse_str(code)
origin_info.resolve(new_node, code)
source_mapping = {}
for before, after in ast_util.parallel_walk(node, new_node):
# Need both checks because if origin information is ever copied over to new
# nodes then we need to rely on the fact that only the original user code
# has the origin annotation.
if (anno.hasanno(before, anno.Basic.ORIGIN) and
anno.hasanno(after, anno.Basic.ORIGIN)):
source_info = anno.getanno(before, anno.Basic.ORIGIN)
new_line_number = anno.getanno(after, anno.Basic.ORIGIN).line_number
source_mapping[new_line_number] = source_info
return source_mapping
def ast_to_source(node, indentation=' '):
"""Return the source code of given AST.
Args:
node: The code to compile, as an AST object.
indentation: The string to use for indentation.
Returns:
code: The source code generated from the AST object
source_mapping: A mapping between the user and AutoGraph generated code.
"""
original_node = node
if isinstance(node, gast.AST):
node = gast.gast_to_ast(node)
generator = astor.codegen.SourceGenerator(indentation, False,
astor.string_repr.pretty_string)
generator.visit(node)
generator.result.append('\n')
# In some versions of Python, literals may appear as actual values. This
# ensures everything is string.
code = map(str, generator.result)
code = astor.source_repr.pretty_source(code).lstrip()
source_mapping = _build_source_map(original_node, code)
return code, source_mapping
def ast_to_object(node,
indentation=' ',
source_prefix=None,
delete_on_exit=True):
"""Return the Python objects represented by given AST.
Compiling the AST code this way ensures that the source code is readable by
e.g. `pdb` or `inspect`.
Args:
node: The code to compile, as an AST object.
indentation: The string to use for indentation.
source_prefix: Optional string to print as-is into the source file.
delete_on_exit: Whether to delete the temporary file used for compilation on
exit.
Returns:
compiled_node: A module object containing the compiled source code.
source: The source code of the compiled object
Raises:
ValueError: If ag_source_map__ is already in the namespace of the compiled
node.
"""
# code_source_mapping does not yet include the offsets from import statements.
source, code_source_mapping = ast_to_source(node, indentation=indentation)
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
# TODO(znado): move into an _offset_source_map() helper function.
# Need to offset the generated line numbers by the number of import lines.
if source_prefix:
num_import_lines = source_prefix.count('\n') + 1
else:
num_import_lines = 0
source_mapping = {}
for line_number, original_position in code_source_mapping.items():
source_map_key = origin_info.CodeLocation(
file_path=f.name, line_number=line_number + num_import_lines)
source_mapping[source_map_key] = original_position
module_name = os.path.basename(f.name[:-3])
if source_prefix:
f.write(source_prefix)
f.write('\n')
f.write(source)
if delete_on_exit:
atexit.register(lambda: os.remove(f.name))
compiled_node = imp.load_source(module_name, f.name)
# TODO(znado): Clean this up so we don't need to attach it to the namespace.
# TODO(znado): This does not work for classes because their methods share a
# namespace.
# This attaches the source map which is needed for error handling. Note that
# api.to_graph copies this source map into an attribute of the function.
#
# We need this so the ag_source_map__ variable is available to the call to
# rewrite_graph_construction_error in the except block inside each function
# that handles graph construction errors.
#
# We cannot get the rewritten function name until it is too late so templating
# is hard, and this cleanly fixes the
# issues encountered with nested functions because this is attached to the
# outermost one.
source_map_name = 'ag_source_map__'
if source_map_name in compiled_node.__dict__:
raise ValueError('cannot convert %s because is has namespace attribute '
'"%s", which is reserved for AutoGraph.' %
(compiled_node, source_map_name))
compiled_node.__dict__[source_map_name] = source_mapping
return compiled_node, source
|
yorkerlin/shogun
|
refs/heads/develop
|
examples/undocumented/python_modular/distance_normsquared_modular.py
|
26
|
#!/usr/bin/env python
traindat = '../data/fm_train_real.dat'
testdat = '../data/fm_test_real.dat'
parameter_list = [[traindat,testdat],[traindat,testdat]]
def distance_normsquared_modular (train_fname=traindat,test_fname=testdat):
from modshogun import RealFeatures, EuclideanDistance, CSVFile
feats_train=RealFeatures(CSVFile(train_fname))
feats_test=RealFeatures(CSVFile(test_fname))
distance=EuclideanDistance(feats_train, feats_train)
distance.set_disable_sqrt(True)
dm_train=distance.get_distance_matrix()
distance.init(feats_train, feats_test)
dm_test=distance.get_distance_matrix()
return distance,dm_train,dm_test
if __name__=='__main__':
print('EuclideanDistance - NormSquared')
distance_normsquared_modular(*parameter_list[0])
|
mcbrune/delphixpy_automation
|
refs/heads/master
|
v1_8_0/dx_provision_vdb.py
|
3
|
#!/usr/bin/env python
#Adam Bowen - Apr 2016
#This script provisions a vdb or dSource
# Updated by Corey Brune Aug 2016
# --- Create vFiles VDB
#requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
#TODO:
# Refactor provisioning functions
# Documentation
"""Provision VDB's
Usage:
dx_provision_db.py --source <name> --target_grp <name> --target <name>
(--db <name> | --vfiles_path <path>) [--no_truncate_log]
(--environment <name> --type <type>) [ --envinst <name>]
[--template <name>] [--mapfile <file>]
[--timestamp_type <type>] [--timestamp <timepoint_semantic>]
[--timeflow <name>]
[--instname <sid>] [--mntpoint <path>] [--noopen]
[--uniqname <name>][--source_grp <name>]
[--engine <identifier> | --all]
[--vdb_restart <bool> ]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
[--postrefresh <name>] [--prerefresh <name>]
[--configure-clone <name>]
[--prerollback <name>] [--postrollback <name>]
dx_provision_db.py -h | --help | -v | --version
Provision VDB from a defined source on the defined target environment.
Examples:
dx_provision_vdb.py --engine landsharkengine --source_grp Sources --source "ASE pubs3 DB" --db vase --target testASE --target_grp Analytics --environment LINUXTARGET --type ase --envinst "LINUXTARGET"
dx_provision_vdb.py --source_grp Sources --source "Employee Oracle 11G DB" --instname autod --uniqname autoprod --db autoprod --target autoprod --target_grp Analytics --environment LINUXTARGET --type oracle --envinst "/u01/app/oracle/product/11.2.0/dbhome_1"
dx_provision_vdb.py --source_grp Sources --source "AdventureWorksLT2008R2" --db vAW --target testAW --target_grp Analytics --environment WINDOWSTARGET --type mssql --envinst MSSQLSERVER --all
dx_provision_vdb.py --source UF_Source --target appDataVDB --target_grp Untitled --environment LinuxTarget --type vfiles --vfiles_path /mnt/provision/appDataVDB --prerollback "/u01/app/oracle/product/scripts/PreRollback.sh" --postrollback "/u01/app/oracle/product/scripts/PostRollback.sh" --vdb_restart true
Options:
--source_grp <name> The group where the source resides.
--source <name> Name of the source object
--target_grp <name> The group into which Delphix will place the VDB.
--target <name> The unique name that you want to call this object
in Delphix
--db <name> The name you want to give the database (Oracle Only)
--vfiles_path <path> The full path on the Target server where Delphix
will provision the vFiles
--no_truncate_log Don't truncate log on checkpoint (ASE only)
--environment <name> The name of the Target environment in Delphix
--type <type> The type of VDB this is.
oracle | mssql | ase | vfiles
--prerefresh <name> Pre-Hook commands
--postrefresh <name> Post-Hook commands
--prerollback <name> Post-Hook commands
--postrollback <name> Post-Hook commands
--configure-clone <name> Configure Clone commands
--vdb_restart <bool> Either True or False. Default: False
--envinst <name> The identifier of the instance in Delphix.
ex. "/u01/app/oracle/product/11.2.0/dbhome_1"
ex. LINUXTARGET
--timeflow <name> Name of the timeflow from which you are provisioning
--timestamp_type <type> The type of timestamp you are specifying.
Acceptable Values: TIME, SNAPSHOT
[default: SNAPSHOT]
--timestamp <timepoint_semantic>
The Delphix semantic for the point in time from
which you want to provision your VDB.
Formats:
latest point in time or snapshot: LATEST
point in time: "YYYY-MM-DD HH24:MI:SS"
snapshot name: "@YYYY-MM-DDTHH24:MI:SS.ZZZ"
snapshot time from GUI: "YYYY-MM-DD HH24:MI"
[default: LATEST]
--template <name> Target VDB Template name (Oracle Only)
--mapfile <file> Target VDB mapping file (Oracle Only)
--instname <sid> Target VDB SID name (Oracle Only)
--uniqname <name> Target VDB db_unique_name (Oracle Only)
--mntpoint <path> Mount point for the VDB
[default: /mnt/provision]
--noopen Don't open database after provision (Oracle Only)
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_provision_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = 'v.0.2.305'
import signal
import sys
import time
import traceback
import re
from docopt import docopt
from os.path import basename
from time import sleep, time
from delphixpy.delphix_engine import DelphixEngine
from delphixpy.exceptions import HttpError
from delphixpy.exceptions import JobError
from delphixpy.exceptions import RequestError
from delphixpy.web import database
from delphixpy.web import environment
from delphixpy.web import group
from delphixpy.web import job
from delphixpy.web import repository
from delphixpy.web import snapshot
from delphixpy.web import source
from delphixpy.web.database import template
from delphixpy.web.vo import VirtualSourceOperations
from delphixpy.web.vo import OracleDatabaseContainer
from delphixpy.web.vo import OracleInstance
from delphixpy.web.vo import OracleProvisionParameters
from delphixpy.web.vo import OracleSIConfig
from delphixpy.web.vo import OracleVirtualSource
from delphixpy.web.vo import TimeflowPointLocation
from delphixpy.web.vo import TimeflowPointSemantic
from delphixpy.web.vo import TimeflowPointTimestamp
from delphixpy.web.vo import ASEDBContainer
from delphixpy.web.vo import ASEInstanceConfig
from delphixpy.web.vo import ASEProvisionParameters
from delphixpy.web.vo import ASESIConfig
from delphixpy.web.vo import ASEVirtualSource
from delphixpy.web.vo import MSSqlProvisionParameters
from delphixpy.web.vo import MSSqlDatabaseContainer
from delphixpy.web.vo import MSSqlVirtualSource
from delphixpy.web.vo import MSSqlSIConfig
from delphixpy.web.vo import AppDataVirtualSource
from delphixpy.web.vo import AppDataProvisionParameters
from delphixpy.web.vo import AppDataDirectSourceConfig
from lib.DxTimeflow import DxTimeflow
from lib.DlpxException import DlpxException
from lib.GetSession import GetSession
from lib.GetReferences import find_dbrepo
from lib.GetReferences import find_obj_by_name
from lib.DxLogging import logging_est
from lib.DxLogging import print_info
from lib.DxLogging import print_debug
def create_ase_vdb(engine, server, jobs, vdb_group, vdb_name, environment_obj,
container_obj):
'''
Create a Sybase ASE VDB
'''
vdb_obj = find_database_by_name_and_group_name(engine, server,
vdb_group.name, vdb_name)
if vdb_obj == None:
vdb_params = ASEProvisionParameters()
vdb_params.container = ASEDBContainer()
if arguments['--no_truncate_log']:
vdb_params.truncate_log_on_checkpoint = False
else:
vdb_params.truncate_log_on_checkpoint = True
vdb_params.container.group = vdb_group.reference
vdb_params.container.name = vdb_name
vdb_params.source = ASEVirtualSource()
vdb_params.source_config = ASESIConfig()
vdb_params.source_config.database_name = arguments['--db']
vdb_params.source_config.instance = ASEInstanceConfig()
vdb_params.source_config.instance.host = environment_obj.host
vdb_repo = find_dbrepo_by_environment_ref_and_name(engine, server,
"ASEInstance",
environment_obj.reference,
arguments['--envinst'])
vdb_params.source_config.repository = vdb_repo.reference
vdb_params.timeflow_point_parameters = set_timeflow_point(engine,
server,
container_obj)
vdb_params.timeflow_point_parameters.container = container_obj.reference
print_info("Provisioning " + vdb_name)
database.provision(server, vdb_params)
#Add the job into the jobs dictionary so we can track its progress
jobs[engine["hostname"]] = server.last_job
#return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return server.last_job
else:
print_info(engine["hostname"] + ": " + vdb_name + " already exists.")
return vdb_obj.reference
def create_mssql_vdb(engine, jobs, vdb_group, vdb_name,
environment_obj, container_obj):
'''
Create a MSSQL VDB
engine:
jobs:
vdb_group:
vdb_name,
environment_obj:
container_obj:
'''
vdb_obj = find_database_by_name_and_group_name(engine, dx_session_obj.server_session,
vdb_group.name, vdb_name)
if vdb_obj == None:
vdb_params = MSSqlProvisionParameters()
vdb_params.container = MSSqlDatabaseContainer()
vdb_params.container.group = vdb_group.reference
vdb_params.container.name = vdb_name
vdb_params.source = MSSqlVirtualSource()
vdb_params.source.allow_auto_vdb_restart_on_host_reboot = False
vdb_params.source_config = MSSqlSIConfig()
vdb_params.source_config.database_name = arguments['--db']
vdb_params.source_config.repository = find_dbrepo(
dx_session_obj.server_session, 'MSSqlInstance', environment_obj.reference,
arguments['--envinst']).reference
vdb_params.timeflow_point_parameters = set_timeflow_point(engine,
dx_session_obj.server_session,
container_obj)
if not vdb_params.timeflow_point_parameters:
return
vdb_params.timeflow_point_parameters.container = \
container_obj.reference
print_info(engine["hostname"] + ":Provisioning " + vdb_name)
database.provision(dx_session_obj.server_session, vdb_params)
#Add the job into the jobs dictionary so we can track its progress
jobs[engine["hostname"]] = dx_session_obj.server_session.last_job
#return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return dx_session_obj.server_session.last_job
else:
print_info(engine["hostname"] + ": " + vdb_name + " already exists.")
return vdb_obj.reference
def create_vfiles_vdb(engine, jobs, vfiles_group, vfiles_name,
environment_obj, container_obj, pre_refresh=None,
post_refresh=None, pre_rollback=None,
post_rollback=None, configure_clone=None):
'''
Create a Vfiles VDB
'''
vfiles_obj = None
try:
vfiles_obj = find_obj_by_name(dx_session_obj.server_session,
database, vfiles_name)
except DlpxException:
pass
if vfiles_obj is None:
vfiles_repo = find_repo_by_environment_ref(engine,
'Unstructured Files',
environment_obj.reference)
vfiles_params = AppDataProvisionParameters()
vfiles_params.source = AppDataVirtualSource()
vfiles_params.source_config = AppDataDirectSourceConfig()
vdb_restart_reobj = re.compile('true', re.IGNORECASE)
if vdb_restart_reobj.search(str(arguments['--vdb_restart'])):
vfiles_params.source.allow_auto_vdb_restart_on_host_reboot = True
elif vdb_restart_reobj.search(str(arguments['--vdb_restart'])) is None:
vfiles_params.source.allow_auto_vdb_restart_on_host_reboot = False
vfiles_params.container = { 'type': 'AppDataContainer',
'group': vfiles_group.reference,
'name': vfiles_name }
vfiles_params.source_config.name = arguments['--target']
vfiles_params.source_config.path = arguments['--vfiles_path']
vfiles_params.source_config.environment_user = \
environment_obj.primary_user
vfiles_params.source_config.repository = vfiles_repo.reference
vfiles_params.source.parameters = {}
vfiles_params.source.name = vfiles_name
vfiles_params.source.name = vfiles_name
vfiles_params.source.operations = VirtualSourceOperations()
if pre_refresh:
vfiles_params.source.operations.pre_refresh = [{ 'type':
'RunCommandOnSourceOperation',
'command': pre_refresh }]
if post_refresh:
vfiles_params.source.operations.post_refresh = [{ 'type':
'RunCommandOnSourceOperation',
'command': post_refresh }]
if pre_rollback:
vfiles_params.source.operations.pre_rollback = [{ 'type':
'RunCommandOnSourceOperation',
'command': pre_rollback }]
if post_rollback:
vfiles_params.source.operations.post_rollback = [{ 'type':
'RunCommandOnSourceOperation',
'command': post_rollback }]
if configure_clone:
vfiles_params.source.operations.configure_clone = [{ 'type':
'RunCommandOnSourceOperation',
'command': configure_clone }]
if arguments['--timestamp_type'] is None:
vfiles_params.timeflow_point_parameters = {
'type': 'TimeflowPointSemantic',
'container': container_obj.reference,
'location': 'LATEST_POINT'}
elif arguments['--timestamp_type'].upper() == 'SNAPSHOT':
try:
dx_timeflow_obj = DxTimeflow(dx_session_obj.server_session)
dx_snap_params = dx_timeflow_obj.set_timeflow_point(
container_obj,
arguments['--timestamp_type'],
arguments['--timestamp'],
arguments['--timeflow'])
except RequestError as e:
raise DlpxException('Could not set the timeflow point:\n%s'
% (e))
if dx_snap_params.type == 'TimeflowPointSemantic':
vfiles_params.timeflow_point_parameters = {'type':
dx_snap_params.type,
'container':
dx_snap_params.container,
'location':
dx_snap_params.location}
elif dx_snap_params.type == 'TimeflowPointTimestamp':
vfiles_params.timeflow_point_parameters = {'type':
dx_snap_params.type,
'timeflow':
dx_snap_params.timeflow,
'timestamp':
dx_snap_params.timestamp}
print_info('%s: Provisioning %s\n' % (engine["hostname"],
vfiles_name))
try:
database.provision(dx_session_obj.server_session, vfiles_params)
except (JobError, RequestError, HttpError) as e:
raise DlpxException('\nERROR: Could not provision the database:'
'\n%s' % (e))
#Add the job into the jobs dictionary so we can track its progress
jobs[engine['hostname']] = dx_session_obj.server_session.last_job
#return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return dx_session_obj.server_session.last_job
else:
print_info('\nERROR %s: %s already exists. \n' % (engine['hostname'],
vfiles_name))
return vfiles_obj.reference
def create_oracle_si_vdb(engine, jobs, vdb_name, vdb_group_obj,
environment_obj, container_obj, pre_refresh=None,
post_refresh=None, pre_rollback=None,
post_rollback=None, configure_clone=None):
'''
Create an Oracle SI VDB
'''
vdb_obj = None
try:
vdb_obj = find_obj_by_name(dx_session_obj.server_session, database,
vdb_name)
except DlpxException:
pass
if vdb_obj == None:
vdb_params = OracleProvisionParameters()
vdb_params.open_resetlogs = True
if arguments['--noopen']:
vdb_params.open_resetlogs = False
vdb_params.container = OracleDatabaseContainer()
vdb_params.container.group = vdb_group_obj.reference
vdb_params.container.name = vdb_name
vdb_params.source = OracleVirtualSource()
vdb_params.source.allow_auto_vdb_restart_on_host_reboot = False
if arguments['--instname']:
inst_name = arguments['--instname']
elif arguments['--instname'] == None:
inst_name = vdb_name
if arguments['--uniqname']:
unique_name = arguments['--uniqname']
elif arguments['--uniqname'] == None:
unique_name = vdb_name
if arguments['--db']:
db = arguments['--db']
elif arguments['--db'] == None:
db = vdb_name
vdb_params.source.mount_base = arguments['--mntpoint']
if arguments['--mapfile']:
vdb_params.source.file_mapping_rules = arguments['--mapfile']
if arguments['--template']:
template_obj = find_obj_by_name(dx_session_obj.server_session,
database.template,
arguments['--template'])
vdb_params.source.config_template = template_obj.reference
vdb_params.source_config = OracleSIConfig()
vdb_params.source.operations = VirtualSourceOperations()
if pre_refresh:
vdb_params.source.operations.pre_refresh = [{ 'type':
'RunCommandOnSourceOperation',
'command': pre_refresh }]
if post_refresh:
vdb_params.source.operations.post_refresh = [{ 'type':
'RunCommandOnSourceOperation',
'command': post_refresh }]
if pre_rollback:
vdb_params.source.operations.pre_rollback = [{ 'type':
'RunCommandOnSourceOperation',
'command': pre_rollback }]
if post_rollback:
vdb_params.source.operations.post_rollback = [{ 'type':
'RunCommandOnSourceOperation',
'command': post_rollback }]
if configure_clone:
vdb_params.source.operations.configure_clone = [{ 'type':
'RunCommandOnSourceOperation',
'command': configure_clone }]
vdb_repo = find_dbrepo_by_environment_ref_and_install_path(engine,
dx_session_obj.server_session,
'OracleInstall',
environment_obj.reference,
arguments['--envinst'])
vdb_params.source_config.database_name = db
vdb_params.source_config.unique_name = unique_name
vdb_params.source_config.instance = OracleInstance()
vdb_params.source_config.instance.instance_name = inst_name
vdb_params.source_config.instance.instance_number = 1
vdb_params.source_config.repository = vdb_repo.reference
dx_timeflow_obj = DxTimeflow(dx_session_obj.server_session)
vdb_params.timeflow_point_parameters = \
dx_timeflow_obj.set_timeflow_point(container_obj,
arguments['--timestamp_type'],
arguments['--timestamp'])
print vdb_params, '\n\n\n'
print_info(engine["hostname"] + ": Provisioning " + vdb_name)
database.provision(dx_session_obj.server_session, vdb_params)
#Add the job into the jobs dictionary so we can track its progress
jobs[engine['hostname']] = dx_session_obj.server_session.last_job
#return the job object to the calling statement so that we can tell if
# a job was created or not (will return None, if no job)
return dx_session_obj.server_session.last_job
else:
raise DlpxException('\nERROR: %s: %s alread exists\n' %
(engine['hostname'], vdb_name))
def find_all_databases_by_group_name(engine, server, group_name,
exclude_js_container=False):
"""
Easy way to quickly find databases by group name
"""
#First search groups for the name specified and return its reference
group_obj = find_obj_by_name(dx_session_obj.server_session, group,
group_name)
if group_obj:
databases=database.get_all(server, group=group_obj.reference,
no_js_container_data_source=exclude_js_container)
return databases
def find_database_by_name_and_group_name(engine, server, group_name,
database_name):
databases = find_all_databases_by_group_name(engine, server, group_name)
for each in databases:
if each.name == database_name:
print_debug('%s: Found a match %s' % (engine['hostname'],
str(each.reference)))
return each
print_info('%s unable to find %s in %s' % (engine['hostname'],
database_name, group_name))
def find_dbrepo_by_environment_ref_and_install_path(engine, server,
install_type,
f_environment_ref,
f_install_path):
'''
Function to find database repository objects by environment reference and
install path, and return the object's reference as a string
You might use this function to find Oracle and PostGreSQL database repos.
'''
print_debug('%s: Searching objects in the %s class for one with the '
'environment reference of %s and an install path of %s' %
(engine['hostname'], install_type, f_environment_ref,
f_install_path), debug)
for obj in repository.get_all(server, environment=f_environment_ref):
if install_type == 'PgSQLInstall':
if (obj.type == install_type and
obj.installation_path == f_install_path):
print_debug('%s: Found a match %s' % (engine['hostname'],
str(obj.reference)), debug)
return obj
elif install_type == 'OracleInstall':
if (obj.type == install_type and
obj.installation_home == f_install_path):
print_debug('%s: Fount a match %s' % (engine['hostname'],
str(obj.reference)), debug)
return obj
else:
raise DlpxException('%s: No Repo match found for type %s.\n' %
(engine["hostname"], install_type))
def find_repo_by_environment_ref(engine, repo_type, f_environment_ref,
f_install_path=None):
'''
Function to find unstructured file repository objects by environment
reference and name, and return the object's reference as a string
You might use this function to find Unstructured File repos.
'''
print_debug('\n%s: Searching objects in the %s class for one with the'
'environment reference of %s\n' %
(engine['hostname'], repo_type, f_environment_ref), debug)
obj_ref = ''
all_objs = repository.get_all(dx_session_obj.server_session,
environment=f_environment_ref)
for obj in all_objs:
if obj.name == repo_type:
print_debug(engine['hostname'] + ': Found a match ' +
str(obj.reference))
return obj
elif obj.type == repo_type:
print_debug('%s Found a match %s' % (engine['hostname'],
str(obj.reference)), debug)
return obj
raise DlpxException('%s: No Repo match found for type %s\n' % (
engine['hostname'], repo_type))
def find_dbrepo_by_environment_ref_and_name(engine, repo_type,
f_environment_ref, f_name):
'''
Function to find database repository objects by environment reference and
name, and return the object's reference as a string
You might use this function to find MSSQL database repos.
'''
print_debug('%s: Searching objects in the %s class for one with the '
'environment reference of %s and a name of %s.' %
(engine['hostname'], repo_type, f_environment_ref, f_name),
debug)
obj_ref = ''
all_objs = repository.get_all(server, environment=f_environment_ref)
for obj in all_objs:
if (repo_type == 'MSSqlInstance' or repo_type == 'ASEInstance'):
if (obj.type == repo_type and obj.name == f_name):
print_debug('%s: Found a match %s' % (engine['hostname'],
str(obj.reference)), debug)
return obj
elif repo_type == 'Unstructured Files':
if obj.value == install_type:
print_debug('%s: Found a match %s' % (engine['hostname'],
str(obj.reference)), debug)
return obj
raise DlpxException('%s: No Repo match found for type %s\n' %
(engine['hostname'], repo_type))
def find_snapshot_by_database_and_name(engine, database_obj, snap_name):
"""
Find snapshots by database and name. Return snapshot reference.
engine: Dictionary of engines from config file.
database_obj: Database object to find the snapshot against
snap_name: Name of the snapshot
"""
snapshots = snapshot.get_all(dx_session_obj.server_session,
database=database_obj.reference)
matches = []
for snapshot_obj in snapshots:
if str(snapshot_obj.name).startswith(arguments['--timestamp']):
matches.append(snapshot_obj)
for each in matches:
print_debug(each.name, debug)
if len(matches) == 1:
print_debug('%s: Found one and only one match. This is good.\n %s' %
(engine['hostname'], matches[0]), debug)
return matches[0]
elif len(matches) > 1:
raise DlpxException('%s: The name specified was not specific enough.'
' More than one match found.\n' %
(engine['hostname'],))
else:
raise DlpxException('%s: No matches found for the time specified.\n'
% (engine['hostname']))
def find_snapshot_by_database_and_time(engine, database_obj, snap_time):
snapshots = snapshot.get_all(dx_session_obj.server_session,
database=database_obj.reference)
matches = []
for snapshot_obj in snapshots:
if str(snapshot_obj.latest_change_point.timestamp).startswith(arguments['--timestamp']):
matches.append(snapshot_obj)
if len(matches) == 1:
print_debug('%s": Found one and only one match. This is good.\n%s' %
(engine['hostname'], matches[0]), debug)
return matches[0]
elif len(matches) > 1:
print_debug(matches, debug)
raise DlpxException('%s: The time specified was not specific enough.'
'More than one match found.\n' %
(engine['hostname']))
else:
raise DlpxException('%s: No matches found for the time specified.\n'
% (engine['hostname']))
def find_source_by_database(engine, database_obj):
#The source tells us if the database is enabled/disables, virtual,
# vdb/dSource, or is a staging database.
source_obj = source.get_all(server, database=database_obj.reference)
#We'll just do a little sanity check here to ensure we only have a 1:1
# result.
if len(source_obj) == 0:
raise DlpxException('%s: Did not find a source for %s. Exiting.\n' %
(engine['hostname'], database_obj.name))
elif len(source_obj) > 1:
raise DlpxException('%s: More than one source returned for %s. '
'Exiting.\n' % (engine['hostname'],
database_obj.name + ". Exiting"))
return source_obj
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary containing engine information
"""
#Establish these variables as empty for use later
environment_obj = None
source_objs = None
jobs = {}
try:
#Setup the connection to the Delphix Engine
dx_session_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
group_obj = find_obj_by_name(dx_session_obj.server_session, group,
arguments['--target_grp'])
#Get the reference of the target environment.
print_debug('Getting environment for %s\n' % (host_name), debug)
#Get the environment object by the hostname
environment_obj = find_obj_by_name(dx_session_obj.server_session,
environment, host_name)
except DlpxException as e:
print('\nERROR: Engine %s encountered an error while provisioning '
'%s:\n%s\n' % (engine['hostname'], arguments['--target'], e))
sys.exit(1)
print_debug('Getting database information for %s\n' %
(arguments['--source']), debug)
try:
#Get the database reference we are copying from the database name
database_obj = find_obj_by_name(dx_session_obj.server_session,
database, arguments['--source'])
except DlpxException:
return
thingstodo = ["thingtodo"]
#reset the running job count before we begin
i = 0
try:
with dx_session_obj.job_mode(single_thread):
while (len(jobs) > 0 or len(thingstodo) > 0):
arg_type = arguments['--type'].lower()
if len(thingstodo)> 0:
if arg_type == "oracle":
create_oracle_si_vdb(engine, jobs, database_name,
group_obj, environment_obj,
database_obj,
arguments['--prerefresh'],
arguments['--postrefresh'],
arguments['--prerollback'],
arguments['--postrollback'],
arguments['--configure-clone'])
elif arg_type == "ase":
create_ase_vdb(engine, server, jobs, group_obj,
database_name, environment_obj,
database_obj)
elif arg_type == "mssql":
create_mssql_vdb(engine, jobs, group_obj,
database_name, environment_obj,
database_obj)
elif arg_type == "vfiles":
create_vfiles_vdb(engine, jobs, group_obj,
database_name, environment_obj,
database_obj,
arguments['--prerefresh'],
arguments['--postrefresh'],
arguments['--prerollback'],
arguments['--postrollback'],
arguments['--configure-clone'])
thingstodo.pop()
#get all the jobs, then inspect them
i = 0
for j in jobs.keys():
job_obj = job.get(dx_session_obj.server_session, jobs[j])
print_debug(job_obj, debug)
print_info(engine["hostname"] + ": VDB Provision: " +
job_obj.job_state)
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from
# the running jobs list.
del jobs[j]
else:
#If the job is in a running state, increment the
# running job count.
i += 1
print_info('%s: %s jobs running.' % (engine['hostname'],
str(i)))
#If we have running jobs, pause before repeating the checks.
if len(jobs) > 0:
sleep(float(arguments['--poll']))
except (DlpxException, JobError) as e:
print '\nError while provisioning %s:\n%s' % (database_name, e.message)
sys.exit(1)
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
No arguments required for run_job().
"""
#Create an empty list to store threads we create.
threads = []
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
#For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print 'Error encountered in main_workflow:\n%s' % (e)
sys.exit(1)
elif arguments['--all'] is False:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dx_session_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: %s\n' %
(arguments['--engine']))
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException('\nERROR: Delphix Engine %s cannot be '
'found in %s. Please check your value '
'and try again. Exiting.\n' % (
arguments['--engine'], config_file_path))
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]['default'] == \
'true':
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: %s' % (
dx_session_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def serversess(f_engine_address, f_engine_username, f_engine_password):
"""
Function to setup the session with the Delphix Engine
"""
server_session= DelphixEngine(f_engine_address, f_engine_username,
f_engine_password, "DOMAIN")
return server_session
def set_exit_handler(func):
"""
This function helps us set the correct exit code
"""
signal.signal(signal.SIGTERM, func)
def set_timeflow_point(engine, server, container_obj):
"""
This returns the reference of the timestamp specified.
"""
if arguments['--timestamp_type'].upper() == "SNAPSHOT":
if arguments['--timestamp'].upper() == "LATEST":
print_debug('%s: Using the latest Snapshot.' %
(engine['hostname']), debug)
timeflow_point_parameters = TimeflowPointSemantic()
timeflow_point_parameters.container = container_obj.reference
timeflow_point_parameters.location = "LATEST_SNAPSHOT"
elif arguments['--timestamp'].startswith("@"):
print_debug('%s: Using a named snapshot' % (engine['hostname']),
debug)
snapshot_obj = find_snapshot_by_database_and_name(engine, server,
container_obj,
arguments['--timestamp'])
if snapshot_obj != None:
timeflow_point_parameters=TimeflowPointLocation()
timeflow_point_parameters.timeflow = snapshot_obj.timeflow
timeflow_point_parameters.location = \
snapshot_obj.latest_change_point.location
else:
raise DlpxException('%s: Was unable to use the specified '
'snapshot %s for database %s\n' %
(engine['hostname'],
arguments['--timestamp'],
container_obj.name))
else:
print_debug('%s: Using a time-designated snapshot' %
(engine['hostname']), debug)
snapshot_obj = find_snapshot_by_database_and_time(engine, server,
container_obj,
arguments['--timestamp'])
if snapshot_obj != None:
timeflow_point_parameters=TimeflowPointTimestamp()
timeflow_point_parameters.timeflow = snapshot_obj.timeflow
timeflow_point_parameters.timestamp = \
snapshot_obj.latest_change_point.timestamp
else:
raise DlpxException('%s: Was unable to find a suitable time '
' for %s for database %s.\n' %
(engine['hostname'],
arguments['--timestamp'],
container_obj.name))
elif arguments['--timestamp_type'].upper() == "TIME":
if arguments['--timestamp'].upper() == "LATEST":
timeflow_point_parameters = TimeflowPointSemantic()
timeflow_point_parameters.location = "LATEST_POINT"
else:
raise DlpxException('%s: Only support a --timestamp value of '
'"latest" when used with timestamp_type '
'of time' %s (engine['hostname']))
else:
raise DlpxException('%s is not a valied timestamp_type. Exiting\n' %
(arguments['--timestamp_type']))
timeflow_point_parameters.container = container_obj.reference
return timeflow_point_parameters
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
elapsed_minutes = round((time() - time_start)/60, +1)
return elapsed_minutes
def update_jobs_dictionary(engine, server, jobs):
"""
This function checks each job in the dictionary and updates its status or
removes it if the job is complete.
Return the number of jobs still running.
"""
#Establish the running jobs counter, as we are about to update the count
# from the jobs report.
i = 0
#get all the jobs, then inspect them
for j in jobs.keys():
job_obj = job.get(server, jobs[j])
print_debug('%s: %s' % (engine['hostname'], str(job_obj)), debug)
print_info('%s: %s: %s' % (engine['hostname'], j.name,
job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the running
# jobs list.
del jobs[j]
else:
#If the job is in a running state, increment the running job count.
i += 1
return i
def main(argv):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global database_name
global host_name
global dx_session_obj
global debug
try:
dx_session_obj = GetSession()
debug = arguments['--debug']
logging_est(arguments['--logdir'], debug)
print_debug(arguments, debug)
time_start = time()
single_thread = False
config_file_path = arguments['--config']
print_info('Welcome to %s version %s' % (basename(__file__),
VERSION))
#Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
database_name = arguments['--target']
host_name = arguments['--environment']
#This is the function that will handle processing main_workflow for
# all the servers.
run_job()
elapsed_minutes = time_elapsed()
print_info('script took %s minutes to get this far. ' %
(str(elapsed_minutes)))
#Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except DlpxException as e:
"""
We use this exception handler when an error occurs in a function call.
"""
print('\nERROR: Please check the ERROR message below:\n%s' %
(e.message))
sys.exit(2)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print('\nERROR: Connection failed to the Delphix Engine. Please '
'check the ERROR message below:\n%s' % (e.message))
sys.exit(2)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so
that we have actionable data
"""
print 'A job failed in the Delphix Engine:\n%s' (e.job)
elapsed_minutes = time_elapsed()
print_info('%s took %s minutes to get this far' % (basename(__file__),
str(elapsed_minutes)))
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug('You sent a CTRL+C to interrupt the process')
elapsed_minutes = time_elapsed()
print_info('%s took %s minutes to get this far' % (basename(__file__),
str(elapsed_minutes)))
except:
"""
Everything else gets caught here
"""
print(sys.exc_info()[0])
print(traceback.format_exc())
elapsed_minutes = time_elapsed()
print_info('%s took %s minutes to get this far' % (basename(__file__),
str(elapsed_minutes)))
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
|
kklmn/xrt
|
refs/heads/master
|
examples/withRaycing/01_SynchrotronSources/MAX-IV-IDs-Flux.py
|
1
|
# -*- coding: utf-8 -*-
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
#import matplotlib as mpl
import copy
import numpy as np
import matplotlib.pyplot as plt
try:
import xlwt
except ImportError:
xlwt = None
import xrt.backends.raycing.sources as rs
from xrt.backends.raycing.physconsts import SIE0
withUndulator = True
#withUndulator = False
#withUrgentUndulator = True
withUrgentUndulator = False
#withSRWUndulator = True
withSRWUndulator = False
def run(case):
eMax = 200100.
eN = 201
thetaMax, psiMax = 500e-6, 500e-6
if case == 'Balder':
Kmax = 8.446
# Kmax = 3
thetaMax, psiMax = 200e-6, 50e-6
# thetaMax, psiMax = 1130e-6/2, 1180e-6/2 # asked by Magnus
eMax, eN = 400100, 401
kwargs = dict(name='SoleilW50', eE=3.0, eI=0.5,
eEpsilonX=0.263, eEpsilonZ=0.008, betaX=9., betaZ=2.,
period=50., n=39, K=Kmax, eMax=eMax,
xPrimeMax=thetaMax*1e3, zPrimeMax=psiMax*1e3, distE='BW')
elif case == 'BioMAX & NanoMAX':
Kmax = 1.92
thetaMax, psiMax = 100e-6, 50e-6
kwargs = dict(name='IVU18.5', eE=3.0, eI=0.5,
eEpsilonX=0.263, eEpsilonZ=0.008, betaX=9., betaZ=2.,
period=18.5, n=108, K=Kmax, eMax=eMax,
xPrimeMax=thetaMax*1e3, zPrimeMax=psiMax*1e3, distE='BW')
elif case == 'Veritas' or case == 'Hippie':
thetaMax, psiMax = 100e-6, 50e-6
# thetaMax, psiMax = 100e-6, 200e-6 # asked by Magnus
# thetaMax, psiMax = 500e-6, 500e-6 # asked by Magnus
kwargs = dict(name='U48', eE=3.0, eI=0.5,
eEpsilonX=0.263, eEpsilonZ=0.008, betaX=9., betaZ=2.,
eMax=eMax,
xPrimeMax=thetaMax*1e3, zPrimeMax=psiMax*1e3, distE='BW')
if case == 'Veritas':
kwargs['period'] = 48.
kwargs['n'] = 81
kwargs['K'] = 4.51
if case == 'Hippie':
kwargs['period'] = 53.
kwargs['n'] = 73
kwargs['K'] = 5.28
sourceW = rs.Wiggler(**kwargs)
energy = np.linspace(100., eMax, eN)
theta = np.linspace(-1, 1, 101) * thetaMax
psi = np.linspace(-1, 1, 101) * psiMax
# theta = np.linspace(-1, 1, 15) * thetaMax
# psi = np.linspace(-1, 1, 15) * psiMax
dtheta, dpsi = theta[1] - theta[0], psi[1] - psi[0]
I0W = sourceW.intensities_on_mesh(energy, theta, psi)[0]
fluxW = I0W.sum(axis=(1, 2)) * dtheta * dpsi
dE = energy[1] - energy[0]
power = fluxW.sum()*dE*SIE0*1e3
print('total power = {} W'.format(power))
cumpower = np.cumsum(fluxW)*dE*SIE0*1e3 / power
ind = np.argwhere(cumpower > 0.5)[0]
y1, y2 = cumpower[ind-1], cumpower[ind]
x1, x2 = energy[ind-1], energy[ind]
Ec = (0.5*(x2-x1) - (y1*x2-y2*x1)) / (y2-y1)
print('Ec = {0} eV'.format(Ec))
if withUrgentUndulator:
ukwargs = copy.copy(kwargs)
del(ukwargs['distE'])
del(ukwargs['betaX'])
del(ukwargs['betaZ'])
ukwargs['eSigmaX'] = (kwargs['eEpsilonX']*kwargs['betaX']*1e3)**0.5
ukwargs['eSigmaZ'] = (kwargs['eEpsilonZ']*kwargs['betaZ']*1e3)**0.5
ukwargs['eMin'] = energy[0]
ukwargs['eMax'] = energy[-1]
ukwargs['eN'] = len(energy)-1
ukwargs['nx'] = len(theta)//2
ukwargs['nz'] = len(psi)//2
ukwargs['icalc'] = 3
sourceU = rs.UndulatorUrgent(**ukwargs)
I0U = sourceU.intensities_on_mesh()[0]
fluxUU = I0U.sum(axis=(1, 2)) * dtheta * dpsi * 4e6
fluxUU[fluxUU <= 0] = 1
fluxUU[np.isnan(fluxUU)] = 1
if withSRWUndulator:
import pickle
with open('c:\Ray-tracing\srw\SRWres.pickle', 'rb') as f:
energySRW, thetaSRW, psiSRW, I0SRW = pickle.load(f)[0:4]
dtheta = thetaSRW[1] - thetaSRW[0]
dpsi = psiSRW[1] - psiSRW[0]
fluxSRWU = I0SRW.sum(axis=(1, 2)) * dtheta * dpsi
if withUndulator:
# kwargs['targetOpenCL'] = None
# kwargs['taper'] = 0, 4.2
# kwargs['gp'] = 1e-4 # needed if does not converge
sourceU = rs.Undulator(**kwargs)
I0U = sourceU.intensities_on_mesh(energy, theta, psi)[0]
fluxU = I0U.sum(axis=(1, 2)) * dtheta * dpsi
fig = plt.figure(figsize=(8, 6))
fig.suptitle(case, fontsize=14)
rect2d1 = [0.12, 0.12, 0.85, 0.8]
ax1 = fig.add_axes(rect2d1, aspect='auto')
rect2d2 = [0.22, 0.19, 0.33, 0.5]
ax2 = fig.add_axes(rect2d2, aspect='auto')
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
ax2.xaxis.set_ticks_position('bottom')
ax2.yaxis.set_ticks_position('left')
ax2.patch.set_visible(False)
for ax, lw in zip([ax1, ax2], [2, 2]):
# plot = ax.plot
plot = ax.semilogy
# plot = ax.loglog
plot(energy/1000., fluxW, '-', lw=lw, alpha=0.7,
label='xrt, as wiggler')
if withUndulator:
plot(energy/1000., fluxU, '-', lw=lw, alpha=0.7,
label='xrt, as undulator')
if withUrgentUndulator:
plot(energy/1000., fluxUU, '-', lw=lw, alpha=0.7,
label='Urgent')
if withSRWUndulator:
plot(energySRW/1000., fluxSRWU, '-', lw=lw, alpha=0.7,
label='SRW (zero emittance)')
if case == 'BioMAX & NanoMAX': # Spectra results
# fnames = ['bionano2.dc0', 'bionano3.dc0']
# labels = ['Spectra, accuracy {0}'.format(i) for i in [2, 3]]
fnames = ['bionano3.dc0']
labels = ['Spectra']
for fname, label in zip(fnames, labels):
e, f = np.loadtxt(fname, skiprows=10, usecols=(0, 1),
unpack=True)
plot(e/1000, f, lw=lw, alpha=0.7, label=label)
ax1.set_xlabel(u'energy (keV)')
ax1.set_ylabel(u'flux through {0:.0f}×{1:.0f} µrad² (ph/s/0.1%BW)'.format(
theta[-1]*2e6, psi[-1]*2e6))
ax1.set_xlim(1, eMax/1e3)
if case == 'Veritas' or case == 'Hippie':
ax1.set_ylim(1e1, None)
else:
ax1.set_ylim(1e3, None)
ax2.set_xlim(1, 30)
ax2.set_ylim(1e12, 2e15)
if withUndulator:
ax1.legend(loc='upper right')
plt.savefig(u'flux_{0}_{1:.0f}×{2:.0f}µrad².png'.format(
case, theta[-1]*2e6, psi[-1]*2e6))
if xlwt is not None:
wb = xlwt.Workbook()
ws = wb.add_sheet('flux')
ws.write(0, 0, u'energy (eV)')
ws.write(
0, 1, u'fluxW through {0:.0f}×{1:.0f} µrad² (ph/s/0.1%BW)'.format(
theta[-1]*2e6, psi[-1]*2e6))
if withUndulator:
ws.write(
0, 2,
u'fluxU through {0:.0f}×{1:.0f} µrad² (ph/s/0.1%BW)'.format(
theta[-1]*2e6, psi[-1]*2e6))
for i, e in enumerate(energy):
ws.write(i+1, 0, e)
ws.write(i+1, 1, fluxW[i])
if withUndulator:
ws.write(i+1, 2, fluxW[i])
wb.save(u'flux_{0}_{1:.0f}×{2:.0f}µrad².xls'.format(
case, theta[-1]*2e6, psi[-1]*2e6))
plt.show()
if __name__ == '__main__':
# run('Balder')
run('BioMAX & NanoMAX')
# run('Veritas')
# run('Hippie')
|
brack3t/kenny-loggings
|
refs/heads/master
|
loggings/constants.py
|
1
|
ACTION_CREATE = 1
ACTION_UPDATE = 2
ACTION_DELETE = 3
|
Big-B702/python-for-android
|
refs/heads/master
|
python3-alpha/python3-src/Tools/scripts/parseentities.py
|
46
|
#!/usr/bin/env python3
""" Utility for parsing HTML entity definitions available from:
http://www.w3.org/ as e.g.
http://www.w3.org/TR/REC-html40/HTMLlat1.ent
Input is read from stdin, output is written to stdout in form of a
Python snippet defining a dictionary "entitydefs" mapping literal
entity name to character or numeric entity.
Marc-Andre Lemburg, mal@lemburg.com, 1999.
Use as you like. NO WARRANTIES.
"""
import re,sys
import TextTools
entityRE = re.compile('<!ENTITY +(\w+) +CDATA +"([^"]+)" +-- +((?:.|\n)+?) *-->')
def parse(text,pos=0,endpos=None):
pos = 0
if endpos is None:
endpos = len(text)
d = {}
while 1:
m = entityRE.search(text,pos,endpos)
if not m:
break
name,charcode,comment = m.groups()
d[name] = charcode,comment
pos = m.end()
return d
def writefile(f,defs):
f.write("entitydefs = {\n")
items = sorted(defs.items())
for name, (charcode,comment) in items:
if charcode[:2] == '&#':
code = int(charcode[2:-1])
if code < 256:
charcode = "'\%o'" % code
else:
charcode = repr(charcode)
else:
charcode = repr(charcode)
comment = TextTools.collapse(comment)
f.write(" '%s':\t%s, \t# %s\n" % (name,charcode,comment))
f.write('\n}\n')
if __name__ == '__main__':
if len(sys.argv) > 1:
infile = open(sys.argv[1])
else:
infile = sys.stdin
if len(sys.argv) > 2:
outfile = open(sys.argv[2],'w')
else:
outfile = sys.stdout
text = infile.read()
defs = parse(text)
writefile(outfile,defs)
|
projectcalico/calico-nova
|
refs/heads/calico-readme
|
nova/api/openstack/compute/contrib/virtual_interfaces.py
|
10
|
# Copyright (C) 2011 Midokura KK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The virtual interfaces extension."""
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova import compute
from nova import network
authorize = extensions.extension_authorizer('compute', 'virtual_interfaces')
def _translate_vif_summary_view(_context, vif):
"""Maps keys for VIF summary view."""
d = {}
d['id'] = vif['uuid']
d['mac_address'] = vif['address']
return d
class ServerVirtualInterfaceController(object):
"""The instance VIF API controller for the OpenStack API.
"""
def __init__(self):
self.compute_api = compute.API()
self.network_api = network.API()
super(ServerVirtualInterfaceController, self).__init__()
def _items(self, req, server_id, entity_maker):
"""Returns a list of VIFs, transformed through entity_maker."""
context = req.environ['nova.context']
instance = common.get_instance(self.compute_api, context, server_id,
want_objects=True)
vifs = self.network_api.get_vifs_by_instance(context, instance)
limited_list = common.limited(vifs, req)
res = [entity_maker(context, vif) for vif in limited_list]
return {'virtual_interfaces': res}
def index(self, req, server_id):
"""Returns the list of VIFs for a given instance."""
authorize(req.environ['nova.context'])
return self._items(req, server_id,
entity_maker=_translate_vif_summary_view)
class Virtual_interfaces(extensions.ExtensionDescriptor):
"""Virtual interface support."""
name = "VirtualInterfaces"
alias = "os-virtual-interfaces"
namespace = ("http://docs.openstack.org/compute/ext/"
"virtual_interfaces/api/v1.1")
updated = "2011-08-17T00:00:00Z"
def get_resources(self):
resources = []
res = extensions.ResourceExtension(
'os-virtual-interfaces',
controller=ServerVirtualInterfaceController(),
parent=dict(member_name='server', collection_name='servers'))
resources.append(res)
return resources
|
Laurawly/tvm-1
|
refs/heads/master
|
python/tvm/ir/container.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Additional container data structures used across IR variants."""
import tvm._ffi
from tvm.runtime import Object
from tvm.runtime.container import getitem_helper
from tvm.runtime import _ffi_api
@tvm._ffi.register_object("Array")
class Array(Object):
"""Array container of TVM.
You do not need to create Array explicitly.
Normally python list and tuple will be converted automatically
to Array during tvm function call.
You may get Array in return values of TVM function call.
"""
def __getitem__(self, idx):
return getitem_helper(self, _ffi_api.ArrayGetItem, len(self), idx)
def __len__(self):
return _ffi_api.ArraySize(self)
@tvm._ffi.register_object
class Map(Object):
"""Map container of TVM.
You do not need to create Map explicitly.
Normally python dict will be converted automaticall to Map during tvm function call.
You can use convert to create a dict[Object-> Object] into a Map
"""
def __getitem__(self, k):
return _ffi_api.MapGetItem(self, k)
def __contains__(self, k):
return _ffi_api.MapCount(self, k) != 0
def items(self):
"""Get the items from the map"""
akvs = _ffi_api.MapItems(self)
return [(akvs[i], akvs[i + 1]) for i in range(0, len(akvs), 2)]
def __len__(self):
return _ffi_api.MapSize(self)
def get(self, key, default=None):
"""Get an element with a default value.
Parameters
----------
key : object
The attribute key.
default : object
The default object.
Returns
-------
value: object
The result value.
"""
return self[key] if key in self else default
|
nickos556/pandas-qt
|
refs/heads/master
|
tests/test_DataFrameModel.py
|
3
|
# -*- coding: utf-8 -*-
import random
from pandasqt.compat import Qt, QtCore, QtGui
import pytest
import pytestqt
import decimal
import numpy
import pandas
from pandasqt.models.DataFrameModel import DataFrameModel, DATAFRAME_ROLE
from pandasqt.models.DataSearch import DataSearch
from pandasqt.models.SupportedDtypes import SupportedDtypes
def test_initDataFrame():
model = DataFrameModel()
assert model.dataFrame().empty
def test_initDataFrameWithDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
def test_setDataFrame():
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel()
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
with pytest.raises(TypeError) as excinfo:
model.setDataFrame(None)
assert "pandas.core.frame.DataFrame" in unicode(excinfo.value)
@pytest.mark.parametrize(
"copy, operator",
[
(True, numpy.not_equal),
(False, numpy.equal)
]
)
def test_copyDataFrame(copy, operator):
dataFrame = pandas.DataFrame([0], columns=['A'])
model = DataFrameModel(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
model.setDataFrame(dataFrame, copyDataFrame=copy)
assert operator(id(model.dataFrame()), id(dataFrame))
def test_TimestampFormat():
model = DataFrameModel()
assert model.timestampFormat == Qt.ISODate
newFormat = u"yy-MM-dd hh:mm"
model.timestampFormat = newFormat
assert model.timestampFormat == newFormat
with pytest.raises(TypeError) as excinfo:
model.timestampFormat = "yy-MM-dd hh:mm"
assert "unicode" in unicode(excinfo.value)
#def test_signalUpdate(qtbot):
#model = DataFrameModel()
#with qtbot.waitSignal(model.layoutAboutToBeChanged) as layoutAboutToBeChanged:
#model.signalUpdate()
#assert layoutAboutToBeChanged.signal_triggered
#with qtbot.waitSignal(model.layoutChanged) as blocker:
#model.signalUpdate()
#assert blocker.signal_triggered
@pytest.mark.parametrize(
"orientation, role, index, expectedHeader",
[
(Qt.Horizontal, Qt.EditRole, 0, None),
(Qt.Vertical, Qt.EditRole, 0, None),
(Qt.Horizontal, Qt.DisplayRole, 0, 'A'),
(Qt.Horizontal, Qt.DisplayRole, 1, None), # run into IndexError
(Qt.Vertical, Qt.DisplayRole, 0, 0),
(Qt.Vertical, Qt.DisplayRole, 1, 1)
]
)
def test_headerData(orientation, role, index, expectedHeader):
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.headerData(index, orientation, role) == expectedHeader
def test_flags():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
index = model.index(0, 0)
assert index.isValid()
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled
model.enableEditing(True)
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
model.setDataFrame(pandas.DataFrame([True], columns=['A']))
index = model.index(0, 0)
model.enableEditing(True)
assert model.flags(index) != Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsEditable
assert model.flags(index) == Qt.ItemIsSelectable | Qt.ItemIsEnabled | Qt.ItemIsUserCheckable
def test_rowCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.rowCount() == 1
model = DataFrameModel(pandas.DataFrame(numpy.arange(100), columns=['A']))
assert model.rowCount() == 100
def test_columnCount():
model = DataFrameModel(pandas.DataFrame([0], columns=['A']))
assert model.columnCount() == 1
model = DataFrameModel( pandas.DataFrame(numpy.arange(100).reshape(1, 100), columns=numpy.arange(100)) )
assert model.columnCount() == 100
class TestSort(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.mark.parametrize(
"signal",
[
"layoutAboutToBeChanged",
"layoutChanged",
"sortingAboutToStart",
"sortingFinished",
]
)
def test_signals(self, model, qtbot, signal):
with qtbot.waitSignal(getattr(model, signal)) as blocker:
model.sort(0)
assert blocker.signal_triggered
def test_returnValues(self, model):
model.sort(0)
@pytest.mark.parametrize(
"testAscending, modelAscending, isIdentic",
[
(True, Qt.AscendingOrder, True),
(False, Qt.DescendingOrder, True),
(True, Qt.DescendingOrder, False),
]
)
def test_sort(self, model, dataFrame, testAscending, modelAscending, isIdentic):
temp = dataFrame.sort('A', ascending=testAscending)
model.sort(0, order=modelAscending)
assert (dataFrame['A'] == temp['A']).all() == isIdentic
class TestData(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame(numpy.random.rand(10), columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
index = model.index(0, 0)
assert index.isValid()
return index
def test_invalidIndex(self, model):
assert model.data(QtCore.QModelIndex()) is None
def test_unknownRole(self, model, index):
assert index.isValid()
assert model.data(index, role="unknownRole") == None
def test_unhandledDtype(self, model, index):
dataFrame = pandas.DataFrame([92.289+151.96j], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.complex64)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == None
# with pytest.raises(TypeError) as excinfo:
# model.data(index)
# assert "unhandled data type" in unicode(excinfo.value)
@pytest.mark.parametrize(
"value, dtype", [
("test", object),
(u"äöü", object),
]
)
def test_strAndUnicode(self, model, index, value, dtype):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
@pytest.mark.parametrize(
"value, dtype, precision", [
(1, numpy.int8, None),
(1, numpy.int16, None),
(1, numpy.int32, None),
(1, numpy.int64, None),
(1, numpy.uint8, None),
(1, numpy.uint16, None),
(1, numpy.uint32, None),
(1, numpy.uint64, None),
(1.11111, numpy.float16, DataFrameModel._float_precisions[str('float16')]),
(1.11111111, numpy.float32, DataFrameModel._float_precisions[str('float32')]),
(1.1111111111111111, numpy.float64, DataFrameModel._float_precisions[str('float64')])
]
)
def test_numericalValues(self, model, index, value, dtype, precision):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
if precision:
modelValue = model.data(index, role=Qt.DisplayRole)
assert model.data(index) == round(value, precision)
assert model.data(index, role=Qt.DisplayRole) == round(value, precision)
assert model.data(index, role=Qt.EditRole) == round(value, precision)
else:
assert model.data(index) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
assert model.data(index, role=DATAFRAME_ROLE).dtype == dtype
#@pytest.mark.parametrize(
#"border1, modifier, border2, dtype", [
#("min", -1, "max", numpy.uint8),
#("max", +1, "min", numpy.uint8),
#("min", -1, "max", numpy.uint16),
#("max", +1, "min", numpy.uint16),
#("min", -1, "max", numpy.uint32),
#("max", +1, "min", numpy.uint32),
#("min", -1, "max", numpy.uint64),
##("max", +1, "min", numpy.uint64), # will raise OverFlowError caused by astype function,
## uneffects models data method
#("min", -1, "max", numpy.int8),
#("max", +1, "min", numpy.int8),
#("min", -1, "max", numpy.int16),
#("max", +1, "min", numpy.int16),
#("min", -1, "max", numpy.int32),
#("max", +1, "min", numpy.int32),
##("min", -1, "max", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
##("max", +1, "min", numpy.int64), # will raise OverFlowError caused by astype function
## uneffects models data method
#]
#)
#def test_integerBorderValues(self, model, index, border1, modifier, border2, dtype):
#ii = numpy.iinfo(dtype)
#dataFrame = pandas.DataFrame([getattr(ii, border1) + modifier], columns=['A'])
#dataFrame['A'] = dataFrame['A'].astype(dtype)
#model.setDataFrame(dataFrame)
#assert not model.dataFrame().empty
#assert model.dataFrame() is dataFrame
#assert index.isValid()
#assert model.data(index) == getattr(ii, border2)
@pytest.mark.parametrize(
"value, qtbool",
[
(True, Qt.Checked),
(False, Qt.Unchecked)
]
)
def test_bool(self, model, index, value, qtbool):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.bool_)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == qtbool
assert model.data(index, role=DATAFRAME_ROLE) == value
assert isinstance(model.data(index, role=DATAFRAME_ROLE), numpy.bool_)
def test_date(self, model, index):
pandasDate = pandas.Timestamp("1990-10-08T10:15:45")
qDate = QtCore.QDateTime.fromString(str(pandasDate), Qt.ISODate)
dataFrame = pandas.DataFrame([pandasDate], columns=['A'])
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
assert model.data(index, role=Qt.DisplayRole) == qDate
assert model.data(index, role=Qt.EditRole) == qDate
assert model.data(index, role=Qt.CheckStateRole) == None
assert model.data(index, role=DATAFRAME_ROLE) == pandasDate
assert isinstance(model.data(index, role=DATAFRAME_ROLE), pandas.Timestamp)
class TestSetData(object):
@pytest.fixture
def dataFrame(self):
return pandas.DataFrame([10], columns=['A'])
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
return model.index(0, 0)
def test_invalidIndex(self, model):
assert model.setData(QtCore.QModelIndex(), None) == False
def test_nothingHasChanged(self, model, index):
assert model.setData(index, 10) == False
def test_unhandledDtype(self, model, index):
dataFrame = pandas.DataFrame([92.289+151.96j], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.complex64)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
model.enableEditing(True)
with pytest.raises(TypeError) as excinfo:
model.setData(index, numpy.complex64(92+151j))
assert "unhandled data type" in unicode(excinfo.value)
@pytest.mark.parametrize(
"value, dtype", [
("test", object),
(u"äöü", object),
]
)
def test_strAndUnicode(self, model, index, value, dtype):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
newValue = u"{}123".format(value)
model.enableEditing(True)
assert model.setData(index, newValue)
assert model.data(index) == newValue
assert model.data(index, role=Qt.DisplayRole) == newValue
assert model.data(index, role=Qt.EditRole) == newValue
assert model.data(index, role=Qt.CheckStateRole) == None
assert model.data(index, role=DATAFRAME_ROLE) == newValue
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
@pytest.mark.parametrize(
"value, qtbool",
[
(True, Qt.Checked),
(False, Qt.Unchecked)
]
)
def test_bool(self, model, index, value, qtbool):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(numpy.bool_)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
model.enableEditing(True)
# pytest.set_trace()
# everything is already set as false and since Qt.Unchecked = 0, 0 == False
# therefore the assert will fail without further constraints
assert model.setData(index, qtbool) == value
assert model.data(index, role=Qt.DisplayRole) == value
assert model.data(index, role=Qt.EditRole) == value
assert model.data(index, role=Qt.CheckStateRole) == qtbool
assert model.data(index, role=DATAFRAME_ROLE) == value
assert isinstance(model.data(index, role=DATAFRAME_ROLE), numpy.bool_)
def test_date(self, model, index):
numpyDate = numpy.datetime64("1990-10-08T10:15:45+0100")
dataFrame = pandas.DataFrame([numpyDate], columns=['A'])
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
newDate = pandas.Timestamp("2000-12-08T10:15:45")
newQDate = QtCore.QDateTime.fromString(str(newDate), Qt.ISODate)
model.enableEditing(True)
assert model.setData(index, newQDate)
assert model.data(index, role=Qt.DisplayRole) == newQDate
assert model.data(index, role=Qt.EditRole) == newQDate
assert model.data(index, role=Qt.CheckStateRole) == None
assert model.data(index, role=DATAFRAME_ROLE) == newDate
assert isinstance(model.data(index, role=DATAFRAME_ROLE), pandas.Timestamp)
assert model.setData(index, 'foobar') == False
@pytest.mark.parametrize(
"value, dtype, precision", [
(1, numpy.int8, None),
(1, numpy.int16, None),
(1, numpy.int32, None),
(1, numpy.int64, None),
(1, numpy.uint8, None),
(1, numpy.uint16, None),
(1, numpy.uint32, None),
(1, numpy.uint64, None),
(1.11111, numpy.float16, DataFrameModel._float_precisions[str('float16')]),
(1.11111111, numpy.float32, DataFrameModel._float_precisions[str('float32')]),
(1.11111111111111111, numpy.float64, DataFrameModel._float_precisions[str('float64')])
]
)
def test_numericalValues(self, model, index, value, dtype, precision):
dataFrame = pandas.DataFrame([value], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
newValue = value + 1
model.enableEditing(True)
assert model.setData(index, newValue)
if precision:
modelValue = model.data(index, role=Qt.DisplayRole)
#assert abs(decimal.Decimal(str(modelValue)).as_tuple().exponent) == precision
assert model.data(index) == round(newValue, precision)
assert model.data(index, role=Qt.DisplayRole) == round(newValue, precision)
assert model.data(index, role=Qt.EditRole) == round(newValue, precision)
else:
assert model.data(index) == newValue
assert model.data(index, role=Qt.DisplayRole) == newValue
assert model.data(index, role=Qt.EditRole) == newValue
assert model.data(index, role=Qt.CheckStateRole) == None
assert isinstance(model.data(index, role=DATAFRAME_ROLE), dtype)
assert model.data(index, role=DATAFRAME_ROLE).dtype == dtype
@pytest.mark.parametrize(
"border, modifier, dtype", [
("min", -1, numpy.uint8),
("max", +1, numpy.uint8),
("min", -1, numpy.uint16),
("max", +1, numpy.uint16),
("min", -1, numpy.uint32),
("max", +1, numpy.uint32),
("min", -1, numpy.uint64),
("max", +1, numpy.uint64),
("min", -1, numpy.int8),
("max", +1, numpy.int8),
("min", -1, numpy.int16),
("max", +1, numpy.int16),
("min", -1, numpy.int32),
("max", +1, numpy.int32),
("min", -1, numpy.int64),
("max", +1, numpy.int64),
]
)
def test_integerBorderValues(self, model, index, border, modifier, dtype):
ii = numpy.iinfo(dtype)
value = getattr(ii, border) + modifier
dataFrame = pandas.DataFrame([getattr(ii, border)], columns=['A'])
dataFrame['A'] = dataFrame['A'].astype(dtype)
model.setDataFrame(dataFrame)
assert not model.dataFrame().empty
assert model.dataFrame() is dataFrame
assert index.isValid()
model.enableEditing(True)
assert model.setData(index, value)
assert model.data(index) == getattr(ii, border)
class TestFilter(object):
@pytest.fixture
def dataFrame(self):
data = [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]
]
columns = ['Foo', 'Bar', 'Spam', 'Eggs', 'Baz']
dataFrame = pandas.DataFrame(data, columns=columns)
return dataFrame
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def index(self, model):
return model.index(0, 0)
def test_filter_single_column(self, model, index):
filterString = 'Foo < 10'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows > postFilterRows
assert preFilterRows == (postFilterRows + 1)
def test_filter_freeSearch(self, model, index):
filterString = 'freeSearch("10")'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows > postFilterRows
assert preFilterRows == (postFilterRows + 2)
def test_filter_multiColumn(self, model, index):
filterString = '(Foo < 10) & (Bar > 1)'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows > postFilterRows
assert preFilterRows == (postFilterRows + 2)
def test_filter_unknown_keyword(self, model, index):
filterString = '(Foo < 10) and (Bar > 1)'
search = DataSearch("Test", filterString)
preFilterRows = model.rowCount()
model.setFilter(search)
postFilterRows = model.rowCount()
assert preFilterRows == postFilterRows
class TestEditMode(object):
@pytest.fixture
def dataFrame(self):
data = [
[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]
]
columns = ['Foo', 'Bar', 'Spam', 'Eggs', 'Baz']
dataFrame = pandas.DataFrame(data, columns=columns)
return dataFrame
@pytest.fixture
def model(self, dataFrame):
return DataFrameModel(dataFrame)
@pytest.fixture
def newColumns(self):
columns = []
for dtype, description in SupportedDtypes._all:
columns.append((description, dtype))
for _type in [int, float, bool, object]:
desc = 'default_%s' % (str(_type),)
columns.append((desc, _type))
return columns
def test_edit_data(self, model):
index = model.index(0, 0)
currentData = index.data()
assert not model.setData(index, 42)
assert index.data() == currentData
model.enableEditing(True)
assert model.setData(index, 42)
assert index.data() != currentData
assert index.data() == 42
def test_add_column(self, model, newColumns):
model.enableEditing(True)
columnCount = model.columnCount()
rowCount = model.rowCount()
for index, data in enumerate(newColumns):
desc, _type = data
if isinstance(_type, numpy.dtype):
defaultVal = _type.type()
if _type.type == numpy.datetime64:
defaultVal = pandas.Timestamp('')
else:
defaultVal = _type()
assert model.addDataFrameColumn(desc, _type, defaultVal)
for row in xrange(rowCount):
idx = model.index(row, columnCount + index)
newVal = idx.data(DATAFRAME_ROLE)
assert newVal == defaultVal
def test_remove_columns(self, model):
model.enableEditing(True)
df = model.dataFrame().copy()
columnNames = model.dataFrame().columns.tolist()
#remove a column which doesn't exist
assert not model.removeDataFrameColumns([(3, 'monty')])
assert model.columnCount() == len(columnNames)
#remove one column at a time
for index, column in enumerate(columnNames):
assert model.removeDataFrameColumns([(index, column)])
assert model.columnCount() == 0
model.setDataFrame(df, copyDataFrame=True)
assert model.columnCount() == len(columnNames)
# remove all columns
columnNames = [(i, n) for i, n in enumerate(columnNames)]
assert model.removeDataFrameColumns(columnNames)
assert model.columnCount() == 0
def test_remove_columns_random(self, dataFrame):
columnNames = dataFrame.columns.tolist()
columnNames = [(i, n) for i, n in enumerate(columnNames)]
for cycle in xrange(1000):
elements = random.randint(1, len(columnNames))
names = random.sample(columnNames, elements)
df = dataFrame.copy()
model = DataFrameModel(df)
assert not model.removeDataFrameColumns(names)
model.enableEditing(True)
model.removeDataFrameColumns(names)
_columnSet = set(columnNames)
_removedSet = set(names)
remainingColumns = _columnSet - _removedSet
for idx, col in remainingColumns:
assert col in model.dataFrame().columns.tolist()
def test_add_rows(self, model):
assert not model.addDataFrameRows()
model.enableEditing(True)
rows = model.rowCount()
assert not model.addDataFrameRows(count=0)
assert model.rowCount() == rows
assert model.addDataFrameRows()
assert model.rowCount() == rows + 1
assert model.addDataFrameRows(count=5)
assert model.rowCount() == rows + 1 + 5
idx = model.index(rows+4, 0)
assert idx.data() == 0
def test_remove_rows(self, model):
assert not model.removeDataFrameRows([0])
model.enableEditing(True)
df = model.dataFrame().copy()
rows = model.rowCount()
model.removeDataFrameRows([0])
assert model.rowCount() < rows
assert model.rowCount() == rows - 1
assert numpy.all(df.loc[1:].values == model.dataFrame().values)
model.removeDataFrameRows([0, 1])
assert model.dataFrame().empty
model.setDataFrame(df, copyDataFrame=True)
assert not model.removeDataFrameRows([5, 6, 7])
rows = model.rowCount()
assert model.removeDataFrameRows([0, 1, 7, 10])
assert model.rowCount() < rows
assert model.rowCount() == 1
if __name__ == '__main__':
pytest.main()
|
justacec/bokeh
|
refs/heads/master
|
bokeh/charts/builder.py
|
2
|
"""This is the Bokeh charts interface. It gives you a high level API to build
complex plot is a simple way.
This is the Builder class, a minimal prototype class to build more chart
types on top of it.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import warnings
from six import string_types
from .attributes import AttrSpec, ColorAttr, CatAttr
from .chart import Chart
from .data_source import ChartDataSource
from .models import CompositeGlyph
from .properties import Dimension, ColumnLabel
from .utils import collect_attribute_columns, label_from_index_dict, build_hover_tooltips
from .data_source import OrderedAssigner
from ..models.ranges import Range, Range1d, FactorRange
from ..models.sources import ColumnDataSource
from ..core.properties import (HasProps, Instance, List, String, Dict,
Color, Bool, Tuple, Either, Enum)
from ..core.enums import SortDirection
from ..io import curdoc, curstate
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def create_and_build(builder_class, *data, **kws):
"""A factory function for handling Chart and Builder generation.
Returns:
:class:`Chart`
"""
if getattr(builder_class, 'dimensions') is None:
raise NotImplementedError('Each builder must specify its dimensions, %s does not.' % builder_class.__name__)
if getattr(builder_class, 'default_attributes') is None:
raise NotImplementedError('Each builder must specify its default_attributes, %s does not.' % builder_class.__name__)
builder_props = set(builder_class.properties()) | \
set(getattr(builder_class, "__deprecated_attributes__", []))
# append dimensions to the builder props
for dim in builder_class.dimensions:
builder_props.add(dim)
# append attributes to the builder props
for attr_name in builder_class.default_attributes.keys():
builder_props.add(attr_name)
# create the new builder
builder_kws = {k: v for k, v in kws.items() if k in builder_props}
builder = builder_class(*data, **builder_kws)
# create a chart to return, since there isn't one already
chart_kws = {k: v for k, v in kws.items() if k not in builder_props}
chart = Chart(**chart_kws)
chart.add_builder(builder)
chart.start_plot()
return chart
class Builder(HasProps):
""" A prototype class to inherit each new chart Builder type.
It provides useful methods to be used by the inherited builder classes,
in order to automate most of the charts creation tasks and leave the
core customization to specialized builder classes. In that pattern
inherited builders just need to provide the following methods:
Required:
* :meth:`~bokeh.charts.builder.Builder.yield_renderers`: yields the glyphs to be
rendered into the plot. Here you should call the
:meth:`~bokeh.charts.builder.Builder.add_glyph` method so that the builder can
setup the legend for you.
* :meth:`~bokeh.charts.builder.Builder.set_ranges`: setup the ranges for the
glyphs. This is called after glyph creation, so you are able to inspect the
comp_glyphs for their minimum and maximum values. See the
:meth:`~bokeh.charts.builder.Builder.create` method for more information on
when this is called and how the builder provides the ranges to the containing
:class:`Chart` using the :meth:`Chart.add_ranges` method.
Optional:
* :meth:`~bokeh.charts.builder.Builder.setup`: provides an area
where subclasses of builder can introspect properties, setup attributes, or change
property values. This is called before
:meth:`~bokeh.charts.builder.Builder.process_data`.
* :meth:`~bokeh.charts.builder.Builder.process_data`: provides an area
where subclasses of builder can manipulate the source data before renderers are
created.
"""
# Optional Inputs
x_range = Instance(Range)
y_range = Instance(Range)
xlabel = String()
ylabel = String()
xscale = String()
yscale = String()
palette = List(Color, help="""Optional input to override the default palette used
by any color attribute.
""")
# Dimension Configuration
"""
The dimension labels that drive the position of the
glyphs. Subclasses should implement this so that the Builder
base class knows which dimensions it needs to operate on.
An example for a builder working with cartesian x and y
coordinates would be dimensions = ['x', 'y']. You should
then instantiate the x and y dimensions as attributes of the
subclass of builder using the :class:`Dimension
<bokeh.charts.properties.Dimension>` class. One for x, as x
= Dimension(...), and one as y = Dimension(...).
"""
dimensions = None # None because it MUST be overridden
"""
The dimension labels that must exist to produce the
glyphs. This specifies what are the valid configurations for
the chart, with the option of specifying the type of the
columns. The
:class:`~bokeh.charts.data_source.ChartDataSource` will
inspect this property of your subclass of Builder and use
this to fill in any required dimensions if no keyword
arguments are used.
"""
req_dimensions = []
# Attribute Configuration
attributes = Dict(String, Instance(AttrSpec), help="""
The attribute specs used to group data. This is a mapping between the role of
the attribute spec (e.g. 'color') and the
:class:`~bokeh.charts.attributes.AttrSpec` class (e.g.,
:class:`~bokeh.charts.attributes.ColorAttr`). The Builder will use this
attributes property during runtime, which will consist of any attribute specs
that are passed into the chart creation function (e.g.,
:class:`~bokeh.charts.Bar`), ones that are created for the user from simple
input types (e.g. `Bar(..., color='red')` or `Bar(..., color=<column_name>)`),
or lastly, the attribute spec found in the default_attributes configured for
the subclass of :class:`~bokeh.charts.builder.Builder`.
""")
"""
The default attribute specs used to group data. This is
where the subclass of Builder should specify what the
default attributes are that will yield attribute values to
each group of data, and any specific configuration. For
example, the :class:`ColorAttr` utilizes a default palette
for assigning color based on groups of data. If the user
doesn't assign a column of the data to the associated
attribute spec, then the default attrspec is used, which
will yield a constant color value for each group of
data. This is by default the first color in the default
palette, but can be customized by setting the default color
in the ColorAttr.
"""
default_attributes = None # None because it MUST be overridden
# Derived properties (created by Builder at runtime)
attribute_columns = List(ColumnLabel, help="""
All columns used for specifying attributes for the Chart. The Builder will set
this value on creation so that the subclasses can know the distinct set of columns
that are being used to assign attributes.
""")
comp_glyphs = List(Instance(CompositeGlyph), help="""
A list of composite glyphs, where each represents a unique subset of data. The
composite glyph is a helper class that encapsulates all low level
:class:`~bokeh.models.glyphs.Glyph`, that represent a higher level group of
data. For example, the :class:`BoxGlyph` is a single class that yields
each :class:`GlyphRenderer` needed to produce a Box on a :class:`BoxPlot`. The
single Box represents a full array of values that are aggregated, and is made
up of multiple :class:`~bokeh.models.glyphs.Rect` and
:class:`~bokeh.models.glyphs.Segment` glyphs.
""")
labels = List(String, help="""Represents the unique labels to be used for legends.""")
"""List of attributes to use for legends."""
label_attributes = []
"""
Used to assign columns to dimensions when no selections have been provided. The
default behavior is provided by the :class:`OrderedAssigner`, which assigns
a single column to each dimension available in the `Builder`'s `dims` property.
"""
column_selector = OrderedAssigner
comp_glyph_types = List(Instance(CompositeGlyph))
sort_dim = Dict(String, Bool, default={})
sort_legend = List(Tuple(String, Bool), help="""
List of tuples to use for sorting the legend, in order that they should be
used for sorting. This sorting can be different than the sorting used for the
rest of the chart. For example, you might want to sort only on the column
assigned to the color attribute, or sort it descending. The order of each tuple
is (Column, Ascending).
""")
legend_sort_field = String(help="""
Attribute that should be used to sort the legend, for example: color,
dash, maker, etc. Valid values for this property depend on the type
of chart.
""")
legend_sort_direction = Enum(SortDirection, help="""
Sort direction to apply to :attr:`~bokeh.charts.builder.Builder.sort_legend`.
Valid values are: `ascending` or `descending`.
""")
source = Instance(ColumnDataSource)
tooltips = Either(List(Tuple(String, String)), List(String), Bool, default=None,
help="""
Tells the builder to add tooltips to the chart by either using the columns
specified to the chart attributes (True), or by generating tooltips for each
column specified (list(str)), or by explicit specification of the tooltips
using the valid input for the `HoverTool` tooltips kwarg.
""")
__deprecated_attributes__ = ('sort_legend',)
def __init__(self, *args, **kws):
"""Common arguments to be used by all the inherited classes.
Args:
data (:ref:`userguide_charts_data_types`): source data for the chart
legend (str, bool): the legend of your plot. The legend content is
inferred from incoming input.It can be ``top_left``,
``top_right``, ``bottom_left``, ``bottom_right``.
It is ``top_right`` is you set it as True.
Attributes:
source (obj): datasource object for your plot,
initialized as a dummy None.
x_range (obj): x-associated datarange object for you plot,
initialized as a dummy None.
y_range (obj): y-associated datarange object for you plot,
initialized as a dummy None.
groups (list): to be filled with the incoming groups of data.
Useful for legend construction.
data (dict): to be filled with the incoming data and be passed
to the ChartDataSource for each Builder class.
attr (list(AttrSpec)): to be filled with the new attributes created after
loading the data dict.
"""
data = None
if len(args) != 0 or len(kws) != 0:
# chart dimensions can be literal dimensions or attributes
attrs = list(self.default_attributes.keys())
dims = self.dimensions + attrs
# pop the dimension inputs from kwargs
data_args = {}
for dim in dims:
if dim in kws.keys():
data_args[dim] = kws[dim]
# build chart data source from inputs, given the dimension configuration
data_args['dims'] = tuple(dims)
data_args['required_dims'] = tuple(self.req_dimensions)
data_args['attrs'] = attrs
data_args['column_assigner'] = self.column_selector
data = ChartDataSource.from_data(*args, **data_args)
# make sure that the builder dimensions have access to the chart data source
for dim in self.dimensions:
getattr(getattr(self, dim), 'set_data')(data)
# handle input attrs and ensure attrs have access to data
attributes = self._setup_attrs(data, kws)
# remove inputs handled by dimensions and chart attributes
for dim in dims:
kws.pop(dim, None)
else:
attributes = dict()
kws['attributes'] = attributes
super(Builder, self).__init__(**kws)
# collect unique columns used for attributes
self.attribute_columns = collect_attribute_columns(**self.attributes)
for k in self.__deprecated_attributes__:
if k in kws:
setattr(self, k, kws[k])
self._data = data
self._legends = []
def _setup_attrs(self, data, kws):
"""Handle overridden attributes and initialize them with data.
Makes sure that all attributes have access to the data
source, which is used for mapping attributes to groups
of data.
Returns:
None
"""
source = ColumnDataSource(data.df)
attr_names = self.default_attributes.keys()
custom_palette = kws.get('palette')
attributes = dict()
for attr_name in attr_names:
attr = kws.pop(attr_name, None)
# if given an attribute use it
if isinstance(attr, AttrSpec):
attributes[attr_name] = attr
# if we are given columns, use those
elif isinstance(attr, str) or isinstance(attr, list):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# override palette if available
if isinstance(attributes[attr_name], ColorAttr):
if custom_palette is not None:
attributes[attr_name].iterable = custom_palette
attributes[attr_name].setup(data=source, columns=attr)
else:
# override palette if available
if (isinstance(self.default_attributes[attr_name], ColorAttr) and
custom_palette is not None):
attributes[attr_name] = self.default_attributes[attr_name]._clone()
attributes[attr_name].iterable = custom_palette
else:
attributes[attr_name] = self.default_attributes[attr_name]._clone()
# make sure all have access to data source
for attr_name in attr_names:
attributes[attr_name].update_data(data=source)
return attributes
def setup(self):
"""Perform any initial pre-processing, attribute config.
Returns:
None
"""
pass
def process_data(self):
"""Make any global data manipulations before grouping.
It has to be implemented by any of the inherited class
representing each different chart type. It is the place
where we make specific calculations for each chart.
Returns:
None
"""
pass
def yield_renderers(self):
""" Generator that yields the glyphs to be draw on the plot
It has to be implemented by any of the inherited class
representing each different chart type.
Yields:
:class:`GlyphRenderer`
"""
raise NotImplementedError('Subclasses of %s must implement _yield_renderers.' %
self.__class__.__name__)
def set_ranges(self):
"""Calculate and set the x and y ranges.
It has to be implemented by any of the subclasses of builder
representing each different chart type, and is called after
:meth:`yield_renderers`.
Returns:
None
"""
raise NotImplementedError('Subclasses of %s must implement _set_ranges.' %
self.__class__.__name__)
def get_dim_extents(self):
"""Helper method to retrieve maximum extents of all the renderers.
Returns:
a dict mapping between dimension and value for x_max, y_max, x_min, y_min
"""
return {'x_max': max([renderer.x_max for renderer in self.comp_glyphs]),
'y_max': max([renderer.y_max for renderer in self.comp_glyphs]),
'x_min': min([renderer.x_min for renderer in self.comp_glyphs]),
'y_min': min([renderer.y_min for renderer in self.comp_glyphs])
}
def add_glyph(self, group, glyph):
"""Add a composite glyph.
Manages the legend, since the builder might not want all attribute types
used for the legend.
Args:
group (:class:`DataGroup`): the data the `glyph` is associated with
glyph (:class:`CompositeGlyph`): the glyph associated with the `group`
Returns:
None
"""
if isinstance(glyph, list):
for sub_glyph in glyph:
self.comp_glyphs.append(sub_glyph)
else:
self.comp_glyphs.append(glyph)
# handle cases where builders have specified which attributes to use for labels
label = None
if len(self.label_attributes) > 0:
for attr in self.label_attributes:
# this will get the last attribute group label for now
if self.attributes[attr].columns is not None:
label = self._get_group_label(group, attr=attr)
# if no special case for labeling, just use the group label
if label is None:
label = self._get_group_label(group, attr='label')
# add to legend if new and unique label
if str(label) not in self.labels and label is not None:
self._legends.append((label, glyph.renderers))
self.labels.append(label)
def _get_group_label(self, group, attr='label'):
"""Get the label of the group by the attribute name.
Args:
group (:attr:`DataGroup`: the group of data
attr (str, optional): the attribute name containing the label, defaults to
'label'.
Returns:
str: the label for the group
"""
if attr is 'label':
label = group.label
else:
label = group[attr]
if isinstance(label, dict):
label = tuple(label.values())
return self._get_label(label)
@staticmethod
def _get_label(raw_label):
"""Converts a label by string or tuple to a string representation.
Args:
raw_label (str or tuple(any, any)): a unique identifier for the data group
Returns:
str: a label that is usable in charts
"""
# don't convert None type to string so we can test for it later
if raw_label is None:
return None
if (isinstance(raw_label, tuple) or isinstance(raw_label, list)) and \
len(raw_label) == 1:
raw_label = raw_label[0]
elif isinstance(raw_label, dict):
raw_label = label_from_index_dict(raw_label)
return str(raw_label)
def collect_attr_kwargs(self):
if hasattr(super(self.__class__, self), 'default_attributes'):
attrs = set(self.default_attributes.keys()) - set(
(super(self.__class__, self).default_attributes or {}).keys())
else:
attrs = set()
return attrs
def get_group_kwargs(self, group, attrs):
return {attr: group[attr] for attr in attrs}
def create(self, chart=None):
"""Builds the renderers, adding them and other components to the chart.
Args:
chart (:class:`Chart`, optional): the chart that will contain the glyph
renderers that the `Builder` produces.
Returns:
:class:`Chart`
"""
# call methods that allow customized setup by subclasses
self.setup()
self.process_data()
# create and add renderers to chart
renderers = self.yield_renderers()
if chart is None:
chart = Chart()
chart.add_renderers(self, renderers)
# handle ranges after renders, since ranges depend on aggregations
# ToDo: should reconsider where this occurs
self.set_ranges()
chart.add_ranges('x', self.x_range)
chart.add_ranges('y', self.y_range)
# sort the legend if we are told to
self._legends = self._sort_legend(
self.legend_sort_field, self.legend_sort_direction,
self._legends, self.attributes)
# always contribute legends, let Chart sort it out
chart.add_legend(self._legends)
chart.add_labels('x', self.xlabel)
chart.add_labels('y', self.ylabel)
chart.add_scales('x', self.xscale)
chart.add_scales('y', self.yscale)
if self.tooltips is not None:
tooltips = build_hover_tooltips(hover_spec=self.tooltips,
chart_cols=self.attribute_columns)
chart.add_tooltips(tooltips)
return chart
@classmethod
def generate_help(cls):
help_str = ''
for comp_glyph in cls.comp_glyph_types:
help_str += str(comp_glyph.glyph_properties())
return help_str
@staticmethod
def _sort_legend(legend_sort_field, legend_sort_direction, legends, attributes):
"""Sort legends sorted by looping though sort_legend items (
see :attr:`Builder.sort_legend` for more details)
"""
if legend_sort_field:
if len(attributes[legend_sort_field].columns) > 0:
# TODO(fpliger): attributes should be consistent and not
# need any type checking but for
# the moment it is not, specially when going
# though a process like binning or when data
# is built for HeatMap, Scatter, etc...
item_order = [x[0] if isinstance(x, tuple) else x
for x in attributes[legend_sort_field].items]
item_order = [str(x) if not isinstance(x, string_types)
else x for x in item_order]
def foo(leg):
return item_order.index(leg[0])
reverse = legend_sort_direction == 'descending'
return list(sorted(legends, key=foo, reverse=reverse))
return legends
@property
def sort_legend(self):
warnings.warn("Chart property `sort_legend` was deprecated in 0.12 \
and will be removed in the future. Use `legend_sort_field` and \
`legend_sort_direction` instead.")
return [(self.legend_sort_field, self.legend_sort_direction)]
@sort_legend.setter
def sort_legend(self, value):
warnings.warn("Chart property 'sort_legend' was deprecated in 0.12 \
and will be removed in the future. Use `legend_sort_field` and \
`legend_sort_direction` instead.")
self.legend_sort_field, direction = value[0]
if direction:
self.legend_sort_direction = "ascending"
else:
self.legend_sort_direction = "descending"
class XYBuilder(Builder):
"""Implements common functionality for XY Builders."""
x = Dimension('x')
y = Dimension('y')
dimensions = ['x', 'y']
req_dimensions = [['x'],
['y'],
['x', 'y']]
default_attributes = {'color': ColorAttr()}
def set_ranges(self):
"""Calculate and set the x and y ranges."""
# ToDo: handle when only single dimension is provided
extents = self.get_dim_extents()
endx = extents['x_max']
startx = extents['x_min']
self.x_range = self._get_range('x', startx, endx)
endy = extents['y_max']
starty = extents['y_min']
self.y_range = self._get_range('y', starty, endy)
if self.xlabel is None:
if self.x.selection is not None:
select = self.x.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.xlabel = ', '.join(select)
if self.ylabel is None:
if self.y.selection is not None:
select = self.y.selection
if not isinstance(select, list):
select = [select]
else:
select = ['']
self.ylabel = ', '.join(select)
def _get_range(self, dim, start, end):
"""Create a :class:`Range` for the :class:`Chart`.
Args:
dim (str): the name of the dimension, which is an attribute of the builder
start: the starting value of the range
end: the ending value of the range
Returns:
:class:`Range`
"""
dim_ref = getattr(self, dim)
values = dim_ref.data
dtype = dim_ref.dtype.name
sort = self.sort_dim.get(dim)
# object data or single value
if dtype == 'object':
factors = values.drop_duplicates()
if sort:
# TODO (fpliger): this handles pandas API change so users do not experience
# the related annoying deprecation warning. This is probably worth
# removing when pandas deprecated version (0.16) is "old" enough
try:
factors.sort_values(inplace=True)
except AttributeError:
factors.sort(inplace=True)
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=factors.tolist())
elif 'datetime' in dtype:
setattr(self, dim + 'scale', 'datetime')
return Range1d(start=start, end=end)
else:
if end == 'None' or (end - start) == 0:
setattr(self, dim + 'scale', 'categorical')
return FactorRange(factors=['None'])
else:
diff = end - start
setattr(self, dim + 'scale', 'linear')
return Range1d(start=start - 0.1 * diff, end=end + 0.1 * diff)
class AggregateBuilder(Builder):
"""A base class for deriving specific builders performing aggregation with stats.
The typical AggregateBuilder takes a single dimension of values.
"""
values = Dimension('values')
default_attributes = {'label': CatAttr(),
'color': ColorAttr()}
|
dpgeorge/micropython
|
refs/heads/master
|
tests/extmod/ujson_load.py
|
15
|
try:
from uio import StringIO
import ujson as json
except:
try:
from io import StringIO
import json
except ImportError:
print("SKIP")
raise SystemExit
print(json.load(StringIO("null")))
print(json.load(StringIO('"abc\\u0064e"')))
print(json.load(StringIO("[false, true, 1, -2]")))
print(json.load(StringIO('{"a":true}')))
|
dimroc/tensorflow-mnist-tutorial
|
refs/heads/master
|
lib/python3.6/site-packages/tensorflow/contrib/learn/python/learn/dataframe/transforms/boolean_mask.py
|
94
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Masks one `Series` based on the content of another `Series`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import series
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_py
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
"""Boolean mask for `SparseTensor`s.
Args:
sparse_tensor: a `SparseTensor`.
mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
of `sparse_tensor`.
name: optional name for this operation.
Returns:
A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
`True`.
"""
# TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
with ops.name_scope(name, values=[sparse_tensor, mask]):
mask = ops.convert_to_tensor(mask)
mask_rows = array_ops.where(mask)
first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
[0, 0], [-1, 1]))
# Identify indices corresponding to the rows identified by mask_rows.
sparse_entry_matches = functional_ops.map_fn(
lambda x: math_ops.equal(first_indices, x),
mask_rows,
dtype=dtypes.bool)
# Combine the rows of index_matches to form a mask for the sparse indices
# and values.
to_retain = array_ops.reshape(
functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])
return sparse_ops.sparse_retain(sparse_tensor, to_retain)
@series.Series.register_binary_op("select_rows")
class BooleanMask(transform.TensorFlowTransform):
"""Apply a boolean mask to a `Series`."""
@property
def name(self):
return "BooleanMask"
@property
def input_valency(self):
return 2
@property
def _output_names(self):
return "output",
def _apply_transform(self, input_tensors, **kwargs):
"""Applies the transformation to the `transform_input`.
Args:
input_tensors: a list of Tensors representing the input to
the Transform.
**kwargs: Additional keyword arguments, unused here.
Returns:
A namedtuple of Tensors representing the transformed output.
"""
input_tensor = input_tensors[0]
mask = input_tensors[1]
if mask.get_shape().ndims > 1:
mask = array_ops.squeeze(mask)
if isinstance(input_tensor, sparse_tensor_py.SparseTensor):
mask_fn = sparse_boolean_mask
else:
mask_fn = array_ops.boolean_mask
# pylint: disable=not-callable
return self.return_type(mask_fn(input_tensor, mask))
|
hcsturix74/django
|
refs/heads/master
|
tests/signals/models.py
|
445
|
"""
Testing signals before/after saving and deleting.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Person(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Car(models.Model):
make = models.CharField(max_length=20)
model = models.CharField(max_length=20)
def __str__(self):
return "%s %s" % (self.make, self.model)
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=20)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Book(models.Model):
name = models.CharField(max_length=20)
authors = models.ManyToManyField(Author)
def __str__(self):
return self.name
|
hmoco/waterbutler
|
refs/heads/develop
|
waterbutler/providers/__init__.py
|
321
|
__import__("pkg_resources").declare_namespace(__name__)
|
hdinsight/hue
|
refs/heads/master
|
apps/useradmin/src/useradmin/management/commands/import_ldap_user.py
|
34
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.utils.translation import ugettext_lazy as _t, ugettext as _
from desktop.conf import LDAP
from useradmin import ldap_access
from useradmin.views import import_ldap_users
class Command(BaseCommand):
"""
Handler for importing LDAP users into the Hue database.
If a user has been previously imported, this will sync their user information.
"""
option_list = BaseCommand.option_list + (
make_option("--dn", help=_t("Whether or not the user should be imported by "
"distinguished name."),
action="store_true",
default=False),
make_option("--sync-groups", help=_t("Sync groups of the users."),
action="store_true",
default=False),
make_option("--server", help=_t("Server to connect to."),
action="store",
default=None),
)
args = "username"
def handle(self, user=None, **options):
if user is None:
raise CommandError(_("A username must be provided."))
import_by_dn = options['dn']
sync_groups = options['sync_groups']
server = options['server']
connection = ldap_access.get_connection_from_server(server)
import_ldap_users(connection, user, sync_groups, import_by_dn)
|
zahodi/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_sys_db.py
|
32
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: bigip_sys_db
short_description: Manage BIG-IP system database variables
description:
- Manage BIG-IP system database variables
version_added: "2.2"
options:
key:
description:
- The database variable to manipulate.
required: true
state:
description:
- The state of the variable on the system. When C(present), guarantees
that an existing variable is set to C(value). When C(reset) sets the
variable back to the default value. At least one of value and state
C(reset) are required.
required: false
default: present
choices:
- present
- reset
value:
description:
- The value to set the key to. At least one of value and state C(reset)
are required.
required: false
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires BIG-IP version 12.0.0 or greater
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set the boot.quiet DB variable on the BIG-IP
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "boot.quiet"
value: "disable"
delegate_to: localhost
- name: Disable the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
value: "false"
delegate_to: localhost
- name: Reset the initial setup screen
bigip_sys_db:
user: "admin"
password: "secret"
server: "lb.mydomain.com"
key: "setup.run"
state: "reset"
delegate_to: localhost
'''
RETURN = '''
name:
description: The key in the system database that was specified
returned: changed and success
type: string
sample: "setup.run"
default_value:
description: The default value of the key
returned: changed and success
type: string
sample: "true"
value:
description: The value that you set the key to
returned: changed and success
type: string
sample: "false"
'''
try:
from f5.bigip import ManagementRoot
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
class BigIpSysDb(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def flush(self):
result = dict()
state = self.params['state']
value = self.params['value']
if not state == 'reset' and not value:
raise F5ModuleError(
"When setting a key, a value must be supplied"
)
current = self.read()
if self.params['check_mode']:
if value == current:
changed = False
else:
changed = True
else:
if state == "present":
changed = self.present()
elif state == "reset":
changed = self.reset()
current = self.read()
result.update(
name=current.name,
default_value=current.defaultValue,
value=current.value
)
result.update(dict(changed=changed))
return result
def read(self):
dbs = self.api.tm.sys.dbs.db.load(
name=self.params['key']
)
return dbs
def present(self):
current = self.read()
if current.value == self.params['value']:
return False
current.update(value=self.params['value'])
current.refresh()
if current.value != self.params['value']:
raise F5ModuleError(
"Failed to set the DB variable"
)
return True
def reset(self):
current = self.read()
default = current.defaultValue
if current.value == default:
return False
current.update(value=default)
current.refresh()
if current.value != current.defaultValue:
raise F5ModuleError(
"Failed to reset the DB variable"
)
return True
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
key=dict(required=True),
state=dict(default='present', choices=['present', 'reset']),
value=dict(required=False, default=None)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
try:
obj = BigIpSysDb(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.f5 import *
if __name__ == '__main__':
main()
|
KasperPRasmussen/bokeh
|
refs/heads/master
|
examples/models/custom.py
|
3
|
from __future__ import print_function
from bokeh.core.properties import String
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.callbacks import Callback
from bokeh.models.glyphs import Circle
from bokeh.models import Plot, DataRange1d, LinearAxis, ColumnDataSource, PanTool, WheelZoomTool, TapTool
from bokeh.models.layouts import HBox
from bokeh.resources import INLINE
from bokeh.util.browser import view
class Popup(Callback):
__implementation__ = """
_ = require "underscore"
Util = require "util/util"
Model = require "model"
p = require "core/properties"
class Popup extends Model
type: "Popup"
execute: (data_source) ->
for i in Util.get_indices(data_source)
message = Util.replace_placeholders(@get("message"), data_source, i)
window.alert(message)
null
@define {
message: [ p.String, "" ]
}
module.exports =
Model: Popup
"""
message = String("", help="""
Message to display in a popup window. This can be a template string,
which will be formatted with data from the data source.
""")
class MyHBox(HBox):
__implementation__ = """
HBox = require "models/layouts/hbox"
class MyHBoxView extends HBox.View
render: () ->
super()
@$el.css({border: "5px solid black"})
class MyHBox extends HBox.Model
type: "MyHBox"
default_view: MyHBoxView
module.exports = {
Model: MyHBox
View: MyHBoxView
}
"""
source = ColumnDataSource(
data = dict(
x = [1, 2, 3, 4, 4, 5, 5],
y = [5, 4, 3, 2, 2.1, 1, 1.1],
color = ["rgb(0, 100, 120)", "green", "blue", "#2c7fb8", "#2c7fb8", "rgba(120, 230, 150, 0.5)", "rgba(120, 230, 150, 0.5)"]
)
)
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
circle = Circle(x="x", y="y", radius=0.2, fill_color="color", line_color="black")
circle_renderer = plot.add_glyph(source, circle)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
tap = TapTool(renderers=[circle_renderer], callback=Popup(message="Selected color: @color"))
plot.add_tools(PanTool(), WheelZoomTool(), tap)
doc = Document()
doc.add_root(MyHBox(children=[plot]))
if __name__ == "__main__":
filename = "custom.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Demonstration of user-defined models"))
print("Wrote %s" % filename)
view(filename)
|
QBI-Software/Tracking
|
refs/heads/master
|
trackerplots/trackerSPT.py
|
2
|
#!/usr/bin/python3
"""
QBI Meunier Tracker APP: Custom Tracker plots
*******************************************************************************
Copyright (C) 2015 QBI Software, The University of Queensland
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
"""
import numpy as np
import scipy.io
import os
from tracking import Tracker
class TrackerSPT:
def __init__(self):
# Initialize fields (default vbSPT)
self.CylinderL = 40 # nm (length of cylindrical part only)
self.Radius = 20 # nm (spherical end caps, and cylinder radius)
# initiate options
self.timestep = 0.02 # [20ms]
self.stepSize = 5 #[nm]
self.locAccuracy = 0 #[nm]
self.transMat = []#[0.958 0.0421;0.084 0.9158] # [/timestep]
self.transRate = []#[-15 15;30 -30] # [/s]
self.occProb = 0
self.Dapp = 0
self.trajLengths = []
self.runs = 1
self.do_steadystate = False
self.do_parallel = False
self.do_single = False
self.do_transRate = False
self.do_transMat = False
self.finalTraj = {}
self.tracker = Tracker()
self.fields = dict()
self.numTraj = 0
def loadtest(self):
mydata = scipy.io.loadmat('D:\\Projects\\Meunier_Tracking\\Tracking\\testdata_vbSPT.mat',
struct_as_record=False,squeeze_me=True)
self.CylinderL = mydata['cylL']
self.Radius = mydata['cylRadius']
# initiate options
self.timestep = mydata['timestep']
self.stepSize = mydata['stepSize']
self.trajLengths = mydata['trajLengths']
self.runs = mydata['stepSize']
self.finalTraj = mydata['finalTraj']
self.numTraj = mydata['numTraj']
#'finalTraj': array([ array([[-247.62617404, 86.06416631, 7.26799504, 1. ],
"""Output to matlab file for use with vbSPT
"""
def save_mat(self, fullfilename):
# List the parameters
if (len(self.trajLengths) > 0):
self.fields['finalTraj']=self.createNDarray(self.finalTraj)
self.fields['trajLengths']=self.trajLengths
self.fields['runs']=self.runs
self.fields['do_steadystate']=self.do_steadystate
self.fields['do_parallel']=self.do_parallel
self.fields['do_single']=self.do_single
self.fields['cylL']=self.CylinderL
self.fields['cylRadius']=self.Radius
self.fields['timestep']=self.timestep
self.fields['stepSize']=self.stepSize
self.fields['locAccuracy']=self.locAccuracy
self.fields['numTraj']=self.numTraj #len(self.trajLengths)
self.fields['avTrajLength']=np.mean(self.trajLengths)
self.fields['shortestTraj']=min(self.trajLengths)
self.fields['longestTraj']=max(self.trajLengths)
self.fields['Dapp']=self.Dapp
self.fields['occProb']=self.occProb
self.fields['transMat']=self.transMat
self.fields['transRate']=self.transRate
scipy.io.savemat(fullfilename,appendmat=True,mdict=self.fields)
else:
print('Error: No trajectory data to save')
def createNDarray(self,trajitems):
dtype = object
#self.loadtest()
nd = np.asanyarray(trajitems)
return nd
""" Load data from Tracker obj
"""
def load_data(self, tracker):
self.finalTraj = []
self.trajLengths = []
#Load from tracker data
plotlist = list(tracker.plotter.items())
self.numTraj = len(plotlist)
# for each plot
tracknum = 0
for track in plotlist:
tracklist = track[1]
trackrow = []
for co in tracklist:
trackrow.append(np.array([co.x, co.y, co.frame]))
self.finalTraj.append(trackrow)
self.trajLengths.append(len(tracklist))
tracknum = tracknum + 1 #not actual track num track[0]
if __name__ == "__main__":
import sys
spt = TrackerSPT()
spt.loadtest()
spt.save_mat('D:\\Projects\\Meunier_Tracking\\Tracking\\testrun.mat')
|
rsunder10/PopularityBased-SearchEngine
|
refs/heads/master
|
lib/python3.4/site-packages/django/views/decorators/debug.py
|
712
|
import functools
from django.http import HttpRequest
def sensitive_variables(*variables):
"""
Indicates which variables used in the decorated function are sensitive, so
that those variables can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Two forms are accepted:
* with specified variable names:
@sensitive_variables('user', 'password', 'credit_card')
def my_function(user):
password = user.pass_word
credit_card = user.credit_card_number
...
* without any specified variable names, in which case it is assumed that
all variables are considered sensitive:
@sensitive_variables()
def my_function()
...
"""
def decorator(func):
@functools.wraps(func)
def sensitive_variables_wrapper(*func_args, **func_kwargs):
if variables:
sensitive_variables_wrapper.sensitive_variables = variables
else:
sensitive_variables_wrapper.sensitive_variables = '__ALL__'
return func(*func_args, **func_kwargs)
return sensitive_variables_wrapper
return decorator
def sensitive_post_parameters(*parameters):
"""
Indicates which POST parameters used in the decorated view are sensitive,
so that those parameters can later be treated in a special way, for example
by hiding them when logging unhandled exceptions.
Two forms are accepted:
* with specified parameters:
@sensitive_post_parameters('password', 'credit_card')
def my_view(request):
pw = request.POST['password']
cc = request.POST['credit_card']
...
* without any specified parameters, in which case it is assumed that
all parameters are considered sensitive:
@sensitive_post_parameters()
def my_view(request)
...
"""
def decorator(view):
@functools.wraps(view)
def sensitive_post_parameters_wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), (
"sensitive_post_parameters didn't receive an HttpRequest. "
"If you are decorating a classmethod, be sure to use "
"@method_decorator."
)
if parameters:
request.sensitive_post_parameters = parameters
else:
request.sensitive_post_parameters = '__ALL__'
return view(request, *args, **kwargs)
return sensitive_post_parameters_wrapper
return decorator
|
rotofly/odoo
|
refs/heads/master
|
addons/mrp/wizard/change_production_qty.py
|
245
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
class change_production_qty(osv.osv_memory):
_name = 'change.production.qty'
_description = 'Change Quantity of Products'
_columns = {
'product_qty': fields.float('Product Qty', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
}
def default_get(self, cr, uid, fields, context=None):
""" To get default values for the object.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param fields: List of fields for which we want default values
@param context: A standard dictionary
@return: A dictionary which of fields with values.
"""
if context is None:
context = {}
res = super(change_production_qty, self).default_get(cr, uid, fields, context=context)
prod_obj = self.pool.get('mrp.production')
prod = prod_obj.browse(cr, uid, context.get('active_id'), context=context)
if 'product_qty' in fields:
res.update({'product_qty': prod.product_qty})
return res
def _update_product_to_produce(self, cr, uid, prod, qty, context=None):
move_lines_obj = self.pool.get('stock.move')
for m in prod.move_created_ids:
move_lines_obj.write(cr, uid, [m.id], {'product_uom_qty': qty})
def change_prod_qty(self, cr, uid, ids, context=None):
"""
Changes the Quantity of Product.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: List of IDs selected
@param context: A standard dictionary
@return:
"""
record_id = context and context.get('active_id',False)
assert record_id, _('Active Id not found')
prod_obj = self.pool.get('mrp.production')
bom_obj = self.pool.get('mrp.bom')
move_obj = self.pool.get('stock.move')
for wiz_qty in self.browse(cr, uid, ids, context=context):
prod = prod_obj.browse(cr, uid, record_id, context=context)
prod_obj.write(cr, uid, [prod.id], {'product_qty': wiz_qty.product_qty})
prod_obj.action_compute(cr, uid, [prod.id])
for move in prod.move_lines:
bom_point = prod.bom_id
bom_id = prod.bom_id.id
if not bom_point:
bom_id = bom_obj._bom_find(cr, uid, product_id=prod.product_id.id, context=context)
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
prod_obj.write(cr, uid, [prod.id], {'bom_id': bom_id})
bom_point = bom_obj.browse(cr, uid, [bom_id])[0]
if not bom_id:
raise osv.except_osv(_('Error!'), _("Cannot find bill of material for this product."))
factor = prod.product_qty * prod.product_uom.factor / bom_point.product_uom.factor
product_details, workcenter_details = \
bom_obj._bom_explode(cr, uid, bom_point, prod.product_id, factor / bom_point.product_qty, [], context=context)
for r in product_details:
if r['product_id'] == move.product_id.id:
move_obj.write(cr, uid, [move.id], {'product_uom_qty': r['product_qty']})
if prod.move_prod_id:
move_obj.write(cr, uid, [prod.move_prod_id.id], {'product_uom_qty' : wiz_qty.product_qty})
self._update_product_to_produce(cr, uid, prod, wiz_qty.product_qty, context=context)
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
molotof/infernal-twin
|
refs/heads/master
|
build/reportlab/src/reportlab/lib/rl_accel.py
|
34
|
#this is the interface module that imports all from the C extension _rl_accel
_c_funcs = {}
_py_funcs = {}
### NOTE! FP_STR SHOULD PROBABLY ALWAYS DO A PYTHON STR() CONVERSION ON ARGS
### IN CASE THEY ARE "LAZY OBJECTS". ACCELLERATOR DOESN'T DO THIS (YET)
__all__ = list(filter(None,'''
fp_str
unicode2T1
instanceStringWidthT1
instanceStringWidthTTF
asciiBase85Encode
asciiBase85Decode
escapePDF
sameFrag
calcChecksum
add32
hex32
'''.split()))
import reportlab
testing = getattr(reportlab,'_rl_testing',False)
del reportlab
for fn in __all__:
try:
exec('from reportlab.lib._rl_accel import %s as f' % fn)
_c_funcs[fn] = f
if testing: _py_funcs[fn] = None
except ImportError:
_py_funcs[fn] = None
if _py_funcs:
from reportlab.lib.utils import isBytes, isUnicode, isSeq, isPy3, rawBytes, asNative, asUnicode, asBytes
from math import log
from struct import unpack
if 'fp_str' in _py_funcs:
_log_10 = lambda x,log=log,_log_e_10=log(10.0): log(x)/_log_e_10
_fp_fmts = "%.0f", "%.1f", "%.2f", "%.3f", "%.4f", "%.5f", "%.6f"
def fp_str(*a):
'''convert separate arguments (or single sequence arg) into space separated numeric strings'''
if len(a)==1 and isSeq(a[0]): a = a[0]
s = []
A = s.append
for i in a:
sa =abs(i)
if sa<=1e-7: A('0')
else:
l = sa<=1 and 6 or min(max(0,(6-int(_log_10(sa)))),6)
n = _fp_fmts[l]%i
if l:
j = len(n)
while j:
j -= 1
if n[j]!='0':
if n[j]!='.': j += 1
break
n = n[:j]
A((n[0]!='0' or len(n)==1) and n or n[1:])
return ' '.join(s)
#hack test for comma users
if ',' in fp_str(0.25):
_FP_STR = _fp_str
def _fp_str(*a):
return _FP_STR(*a).replace(',','.')
_py_funcs['fp_str'] = fp_str
if 'unicode2T1' in _py_funcs:
def unicode2T1(utext,fonts):
'''return a list of (font,string) pairs representing the unicode text'''
R = []
font, fonts = fonts[0], fonts[1:]
enc = font.encName
if 'UCS-2' in enc:
enc = 'UTF16'
while utext:
try:
if isUnicode(utext):
s = utext.encode(enc)
else:
s = utext
R.append((font,s))
break
except UnicodeEncodeError as e:
i0, il = e.args[2:4]
if i0:
R.append((font,utext[:i0].encode(enc)))
if fonts:
R.extend(unicode2T1(utext[i0:il],fonts))
else:
R.append((font._notdefFont,font._notdefChar*(il-i0)))
utext = utext[il:]
return R
_py_funcs['unicode2T1'] = unicode2T1
if 'instanceStringWidthT1' in _py_funcs:
if isPy3:
def instanceStringWidthT1(self, text, size, encoding='utf8'):
"""This is the "purist" approach to width"""
if not isUnicode(text): text = text.decode(encoding)
return sum([sum(map(f.widths.__getitem__,t)) for f, t in unicode2T1(text,[self]+self.substitutionFonts)])*0.001*size
else:
def instanceStringWidthT1(self, text, size, encoding='utf8'):
"""This is the "purist" approach to width"""
if not isUnicode(text): text = text.decode(encoding)
return sum([sum(map(f.widths.__getitem__,list(map(ord,t)))) for f, t in unicode2T1(text,[self]+self.substitutionFonts)])*0.001*size
_py_funcs['instanceStringWidthT1'] = instanceStringWidthT1
if 'instanceStringWidthTTF' in _py_funcs:
def instanceStringWidthTTF(self, text, size, encoding='utf-8'):
"Calculate text width"
if not isUnicode(text):
text = text.decode(encoding or 'utf-8')
g = self.face.charWidths.get
dw = self.face.defaultWidth
return 0.001*size*sum([g(ord(u),dw) for u in text])
_py_funcs['instanceStringWidthTTF'] = instanceStringWidthTTF
if 'hex32' in _py_funcs:
def hex32(i):
return '0X%8.8X' % (int(i)&0xFFFFFFFF)
_py_funcs['hex32'] = hex32
if 'add32' in _py_funcs:
def add32(x, y):
"Calculate (x + y) modulo 2**32"
return (x+y) & 0xFFFFFFFF
_py_funcs['add32'] = add32
if 'calcChecksum' in _py_funcs:
def calcChecksum(data):
"""Calculates TTF-style checksums"""
data = rawBytes(data)
if len(data)&3: data = data + (4-(len(data)&3))*b"\0"
return sum(unpack(">%dl" % (len(data)>>2), data)) & 0xFFFFFFFF
_py_funcs['calcChecksum'] = calcChecksum
if 'escapePDF' in _py_funcs:
_ESCAPEDICT={}
for c in range(256):
if c<32 or c>=127:
_ESCAPEDICT[c]= '\\%03o' % c
elif c in (ord('\\'),ord('('),ord(')')):
_ESCAPEDICT[c] = '\\'+chr(c)
else:
_ESCAPEDICT[c] = chr(c)
del c
#Michael Hudson donated this
def escapePDF(s):
r = []
for c in s:
if not type(c) is int:
c = ord(c)
r.append(_ESCAPEDICT[c])
return ''.join(r)
_py_funcs['escapePDF'] = escapePDF
if 'asciiBase85Encode' in _py_funcs:
def asciiBase85Encode(input):
"""Encodes input using ASCII-Base85 coding.
This is a compact encoding used for binary data within
a PDF file. Four bytes of binary data become five bytes of
ASCII. This is the default method used for encoding images."""
doOrd = not isPy3 or isUnicode(input)
# special rules apply if not a multiple of four bytes.
whole_word_count, remainder_size = divmod(len(input), 4)
cut = 4 * whole_word_count
body, lastbit = input[0:cut], input[cut:]
out = [].append
for i in range(whole_word_count):
offset = i*4
b1 = body[offset]
b2 = body[offset+1]
b3 = body[offset+2]
b4 = body[offset+3]
if doOrd:
b1 = ord(b1)
b2 = ord(b2)
b3 = ord(b3)
b4 = ord(b4)
if b1<128:
num = (((((b1<<8)|b2)<<8)|b3)<<8)|b4
else:
num = 16777216 * b1 + 65536 * b2 + 256 * b3 + b4
if num == 0:
#special case
out('z')
else:
#solve for five base-85 numbers
temp, c5 = divmod(num, 85)
temp, c4 = divmod(temp, 85)
temp, c3 = divmod(temp, 85)
c1, c2 = divmod(temp, 85)
assert ((85**4) * c1) + ((85**3) * c2) + ((85**2) * c3) + (85*c4) + c5 == num, 'dodgy code!'
out(chr(c1+33))
out(chr(c2+33))
out(chr(c3+33))
out(chr(c4+33))
out(chr(c5+33))
# now we do the final bit at the end. I repeated this separately as
# the loop above is the time-critical part of a script, whereas this
# happens only once at the end.
#encode however many bytes we have as usual
if remainder_size > 0:
lastbit += (4-len(lastbit))*('\0' if doOrd else b'\000')
b1 = lastbit[0]
b2 = lastbit[1]
b3 = lastbit[2]
b4 = lastbit[3]
if doOrd:
b1 = ord(b1)
b2 = ord(b2)
b3 = ord(b3)
b4 = ord(b4)
num = 16777216 * b1 + 65536 * b2 + 256 * b3 + b4
#solve for c1..c5
temp, c5 = divmod(num, 85)
temp, c4 = divmod(temp, 85)
temp, c3 = divmod(temp, 85)
c1, c2 = divmod(temp, 85)
#print 'encoding: %d %d %d %d -> %d -> %d %d %d %d %d' % (
# b1,b2,b3,b4,num,c1,c2,c3,c4,c5)
lastword = chr(c1+33) + chr(c2+33) + chr(c3+33) + chr(c4+33) + chr(c5+33)
#write out most of the bytes.
out(lastword[0:remainder_size + 1])
#terminator code for ascii 85
out('~>')
return ''.join(out.__self__)
_py_funcs['asciiBase85Encode'] = asciiBase85Encode
if 'asciiBase85Decode' in _py_funcs:
def asciiBase85Decode(input):
"""Decodes input using ASCII-Base85 coding.
This is not normally used - Acrobat Reader decodes for you
- but a round trip is essential for testing."""
#strip all whitespace
stripped = ''.join(asNative(input).split())
#check end
assert stripped[-2:] == '~>', 'Invalid terminator for Ascii Base 85 Stream'
stripped = stripped[:-2] #chop off terminator
#may have 'z' in it which complicates matters - expand them
stripped = stripped.replace('z','!!!!!')
# special rules apply if not a multiple of five bytes.
whole_word_count, remainder_size = divmod(len(stripped), 5)
#print '%d words, %d leftover' % (whole_word_count, remainder_size)
#assert remainder_size != 1, 'invalid Ascii 85 stream!'
cut = 5 * whole_word_count
body, lastbit = stripped[0:cut], stripped[cut:]
out = [].append
for i in range(whole_word_count):
offset = i*5
c1 = ord(body[offset]) - 33
c2 = ord(body[offset+1]) - 33
c3 = ord(body[offset+2]) - 33
c4 = ord(body[offset+3]) - 33
c5 = ord(body[offset+4]) - 33
num = ((85**4) * c1) + ((85**3) * c2) + ((85**2) * c3) + (85*c4) + c5
temp, b4 = divmod(num,256)
temp, b3 = divmod(temp,256)
b1, b2 = divmod(temp, 256)
assert num == 16777216 * b1 + 65536 * b2 + 256 * b3 + b4, 'dodgy code!'
out(chr(b1))
out(chr(b2))
out(chr(b3))
out(chr(b4))
#decode however many bytes we have as usual
if remainder_size > 0:
while len(lastbit) < 5:
lastbit = lastbit + '!'
c1 = ord(lastbit[0]) - 33
c2 = ord(lastbit[1]) - 33
c3 = ord(lastbit[2]) - 33
c4 = ord(lastbit[3]) - 33
c5 = ord(lastbit[4]) - 33
num = (((85*c1+c2)*85+c3)*85+c4)*85 + (c5
+(0,0,0xFFFFFF,0xFFFF,0xFF)[remainder_size])
temp, b4 = divmod(num,256)
temp, b3 = divmod(temp,256)
b1, b2 = divmod(temp, 256)
assert num == 16777216 * b1 + 65536 * b2 + 256 * b3 + b4, 'dodgy code!'
#print 'decoding: %d %d %d %d %d -> %d -> %d %d %d %d' % (
# c1,c2,c3,c4,c5,num,b1,b2,b3,b4)
#the last character needs 1 adding; the encoding loses
#data by rounding the number to x bytes, and when
#divided repeatedly we get one less
if remainder_size == 2:
lastword = chr(b1)
elif remainder_size == 3:
lastword = chr(b1) + chr(b2)
elif remainder_size == 4:
lastword = chr(b1) + chr(b2) + chr(b3)
else:
lastword = ''
out(lastword)
r = ''.join(out.__self__)
return asBytes(r,enc='latin1')
_py_funcs['asciiBase85Decode'] = asciiBase85Decode
if 'sameFrag' in _py_funcs:
def sameFrag(f,g):
'returns 1 if two ParaFrags map out the same'
if (hasattr(f,'cbDefn') or hasattr(g,'cbDefn')
or hasattr(f,'lineBreak') or hasattr(g,'lineBreak')): return 0
for a in ('fontName', 'fontSize', 'textColor', 'rise', 'underline', 'strike', 'link', "backColor"):
if getattr(f,a,None)!=getattr(g,a,None): return 0
return 1
_py_funcs['sameFrag'] = sameFrag
G=globals()
for fn in __all__:
f = _c_funcs[fn] if fn in _c_funcs else _py_funcs[fn]
if not f:
raise RuntimeError('function %s is not properly defined' % fn)
G[fn] = f
del fn, f, G
if __name__=='__main__':
import sys, os
for modname in 'reportlab.lib.rl_accel','reportlab.lib._rl_accel':
for cmd in (
#"unicode2T1('abcde fghi . jkl ; mno',fonts)",
#"unicode2T1(u'abcde fghi . jkl ; mno',fonts)",
"_instanceStringWidthU(font,'abcde fghi . jkl ; mno',10)",
"_instanceStringWidthU(font,u'abcde fghi . jkl ; mno',10)",
):
print('%s %s' % (modname,cmd))
s=';'.join((
"from reportlab.pdfbase.pdfmetrics import getFont",
"from %s import unicode2T1,_instanceStringWidthU" % modname,
"fonts=[getFont('Helvetica')]+getFont('Helvetica').substitutionFonts""",
"font=fonts[0]",
))
os.system('%s -m timeit -s"%s" "%s"' % (sys.executable,s,cmd))
|
Ivoz/pip
|
refs/heads/develop
|
pip/_vendor/colorama/__init__.py
|
450
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
from .initialise import init, deinit, reinit
from .ansi import Fore, Back, Style
from .ansitowin32 import AnsiToWin32
VERSION = '0.2.7'
|
YuxuanLing/trunk
|
refs/heads/master
|
trunk/code/study/python/Fluent-Python-example-code/attic/control/guido/guido0.py
|
1
|
"""
Exemplo adaptado da mensagem do Guido van Rossum em:
https://groups.google.com/forum/#!msg/python-tulip/bmphRrryuFk/aB45sEJUomYJ
http://bit.ly/yieldfrom
>>> principal(ger1())
OK
42
Visualização no PythonTutor: http://goo.gl/FQWq2F
"""
def ger1():
val = yield 'OK'
print(val)
yield # para evitar o StopIteration
def principal(g):
print(next(g))
g.send(42)
# auto-teste
import doctest
doctest.testmod()
|
madslonnberg/blog
|
refs/heads/master
|
node_modules/pygmentize-bundled/vendor/pygments/scripts/find_error.py
|
117
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Lexing error finder
~~~~~~~~~~~~~~~~~~~
For the source files given on the command line, display
the text where Error tokens are being generated, along
with some context.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys, os
# always prefer Pygments from source if exists
srcpath = os.path.join(os.path.dirname(__file__), '..')
if os.path.isdir(os.path.join(srcpath, 'pygments')):
sys.path.insert(0, srcpath)
from pygments.lexer import RegexLexer
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.token import Error, Text, _TokenType
from pygments.cmdline import _parse_options
class DebuggingRegexLexer(RegexLexer):
"""Make the state stack, position and current match instance attributes."""
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
self.pos = 0
tokendefs = self._tokens
self.statestack = list(stack)
statetokens = tokendefs[self.statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
self.m = m = rexmatch(text, self.pos)
if m:
if type(action) is _TokenType:
yield self.pos, action, m.group()
else:
for item in action(self, m):
yield item
self.pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
self.statestack.pop()
elif state == '#push':
self.statestack.append(self.statestack[-1])
else:
self.statestack.append(state)
elif isinstance(new_state, int):
# pop
del self.statestack[new_state:]
elif new_state == '#push':
self.statestack.append(self.statestack[-1])
else:
assert False, 'wrong state def: %r' % new_state
statetokens = tokendefs[self.statestack[-1]]
break
else:
try:
if text[self.pos] == '\n':
# at EOL, reset state to 'root'
self.pos += 1
self.statestack = ['root']
statetokens = tokendefs['root']
yield self.pos, Text, u'\n'
continue
yield self.pos, Error, text[self.pos]
self.pos += 1
except IndexError:
break
def main(fn, lexer=None, options={}):
if lexer is not None:
lx = get_lexer_by_name(lexer)
else:
try:
lx = get_lexer_for_filename(os.path.basename(fn), **options)
except ValueError:
try:
name, rest = fn.split('_', 1)
lx = get_lexer_by_name(name, **options)
except ValueError:
raise AssertionError('no lexer found for file %r' % fn)
debug_lexer = False
# does not work for e.g. ExtendedRegexLexers
if lx.__class__.__bases__ == (RegexLexer,):
lx.__class__.__bases__ = (DebuggingRegexLexer,)
debug_lexer = True
elif lx.__class__.__bases__ == (DebuggingRegexLexer,):
# already debugged before
debug_lexer = True
lno = 1
text = file(fn, 'U').read()
text = text.strip('\n') + '\n'
tokens = []
states = []
def show_token(tok, state):
reprs = map(repr, tok)
print ' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
if debug_lexer:
print ' ' + ' ' * (29-len(reprs[0])) + repr(state),
print
for type, val in lx.get_tokens(text):
lno += val.count('\n')
if type == Error:
print 'Error parsing', fn, 'on line', lno
print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
if showall:
for tok, state in map(None, tokens, states):
show_token(tok, state)
else:
for i in range(max(len(tokens) - num, 0), len(tokens)):
show_token(tokens[i], states[i])
print 'Error token:'
l = len(repr(val))
print ' ' + repr(val),
if debug_lexer and hasattr(lx, 'statestack'):
print ' ' * (60-l) + repr(lx.statestack),
print
print
return 1
tokens.append((type, val))
if debug_lexer:
if hasattr(lx, 'statestack'):
states.append(lx.statestack[:])
else:
states.append(None)
if showall:
for tok, state in map(None, tokens, states):
show_token(tok, state)
return 0
num = 10
showall = False
lexer = None
options = {}
if __name__ == '__main__':
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'n:l:aO:')
for opt, val in opts:
if opt == '-n':
num = int(val)
elif opt == '-a':
showall = True
elif opt == '-l':
lexer = val
elif opt == '-O':
options = _parse_options([val])
ret = 0
for f in args:
ret += main(f, lexer, options)
sys.exit(bool(ret))
|
tedi3231/openerp
|
refs/heads/master
|
openerp/pooler.py
|
61
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" Functions kept for backward compatibility.
They are simple wrappers around a global RegistryManager methods.
"""
from openerp.modules.registry import RegistryManager
def get_db_and_pool(db_name, force_demo=False, status=None, update_module=False):
"""Create and return a database connection and a newly initialized registry."""
registry = RegistryManager.get(db_name, force_demo, status, update_module)
return registry.db, registry
def restart_pool(db_name, force_demo=False, status=None, update_module=False):
"""Delete an existing registry and return a database connection and a newly initialized registry."""
registry = RegistryManager.new(db_name, force_demo, status, update_module)
return registry.db, registry
def get_db(db_name):
"""Return a database connection. The corresponding registry is initialized."""
return get_db_and_pool(db_name)[0]
def get_pool(db_name, force_demo=False, status=None, update_module=False):
"""Return a model registry."""
return get_db_and_pool(db_name, force_demo, status, update_module)[1]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kosgroup/odoo
|
refs/heads/10.0
|
addons/calendar/models/__init__.py
|
23
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ir_attachment
import ir_http
import res_partner
import mail_message
import calendar
|
anntzer/scikit-learn
|
refs/heads/main
|
sklearn/covariance/tests/test_robust_covariance.py
|
16
|
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import itertools
import numpy as np
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_raise_message
from sklearn.utils._testing import assert_warns_message
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet
from sklearn.covariance import fast_mcd
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def test_fast_mcd_on_invalid_input():
X = np.arange(100)
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
fast_mcd, X)
def test_mcd_class_on_invalid_input():
X = np.arange(100)
mcd = MinCovDet()
assert_raise_message(ValueError, 'Expected 2D array, got 1D array instead',
mcd.fit, X)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_mcd_issue3367():
# Check that MCD completes when the covariance matrix is singular
# i.e. one of the rows and columns are all zeros
rand_gen = np.random.RandomState(0)
# Think of these as the values for X and Y -> 10 values between -5 and 5
data_values = np.linspace(-5, 5, 10).tolist()
# Get the cartesian product of all possible coordinate pairs from above set
data = np.array(list(itertools.product(data_values, data_values)))
# Add a third column that's all zeros to make our data a set of point
# within a plane, which means that the covariance matrix will be singular
data = np.hstack((data, np.zeros((data.shape[0], 1))))
# The below line of code should raise an exception if the covariance matrix
# is singular. As a further test, since we have points in XYZ, the
# principle components (Eigenvectors) of these directly relate to the
# geometry of the points. Since it's a plane, we should be able to test
# that the Eigenvector that corresponds to the smallest Eigenvalue is the
# plane normal, specifically [0, 0, 1], since everything is in the XY plane
# (as I've set it up above). To do this one would start by:
#
# evals, evecs = np.linalg.eigh(mcd_fit.covariance_)
# normal = evecs[:, np.argmin(evals)]
#
# After which we need to assert that our `normal` is equal to [0, 0, 1].
# Do note that there is floating point error associated with this, so it's
# best to subtract the two and then compare some small tolerance (e.g.
# 1e-12).
MinCovDet(random_state=rand_gen).fit(data)
def test_mcd_support_covariance_is_zero():
# Check that MCD returns a ValueError with informative message when the
# covariance of the support data is equal to 0.
X_1 = np.array([0.5, 0.1, 0.1, 0.1, 0.957, 0.1, 0.1, 0.1, 0.4285, 0.1])
X_1 = X_1.reshape(-1, 1)
X_2 = np.array([0.5, 0.3, 0.3, 0.3, 0.957, 0.3, 0.3, 0.3, 0.4285, 0.3])
X_2 = X_2.reshape(-1, 1)
msg = ('The covariance matrix of the support data is equal to 0, try to '
'increase support_fraction')
for X in [X_1, X_2]:
assert_raise_message(ValueError, msg, MinCovDet().fit, X)
def test_mcd_increasing_det_warning():
# Check that a warning is raised if we observe increasing determinants
# during the c_step. In theory the sequence of determinants should be
# decreasing. Increasing determinants are likely due to ill-conditioned
# covariance matrices that result in poor precision matrices.
X = [[5.1, 3.5, 1.4, 0.2],
[4.9, 3.0, 1.4, 0.2],
[4.7, 3.2, 1.3, 0.2],
[4.6, 3.1, 1.5, 0.2],
[5.0, 3.6, 1.4, 0.2],
[4.6, 3.4, 1.4, 0.3],
[5.0, 3.4, 1.5, 0.2],
[4.4, 2.9, 1.4, 0.2],
[4.9, 3.1, 1.5, 0.1],
[5.4, 3.7, 1.5, 0.2],
[4.8, 3.4, 1.6, 0.2],
[4.8, 3.0, 1.4, 0.1],
[4.3, 3.0, 1.1, 0.1],
[5.1, 3.5, 1.4, 0.3],
[5.7, 3.8, 1.7, 0.3],
[5.4, 3.4, 1.7, 0.2],
[4.6, 3.6, 1.0, 0.2],
[5.0, 3.0, 1.6, 0.2],
[5.2, 3.5, 1.5, 0.2]]
mcd = MinCovDet(random_state=1)
assert_warns_message(RuntimeWarning,
"Determinant has increased",
mcd.fit, X)
|
ric2b/Vivaldi-browser
|
refs/heads/master
|
chromium/chrome/PRESUBMIT.py
|
4
|
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Presubmit script for changes affecting chrome/
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
import re
INCLUDE_CPP_FILES_ONLY = (
r'.*\.(cc|h)$',
)
INCLUDE_SOURCE_FILES_ONLY = (
r'.*\.(c|cc|cpp|h|m|mm)$',
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages.*\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# Header trickery
r'.*-inl\.h$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
)
def _CheckChangeLintsClean(input_api, output_api):
"""Makes sure that the chrome/ code is cpplint clean."""
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
return input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources)
def _CheckNoContentUnitTestsInChrome(input_api, output_api):
"""Makes sure that no unit tests from content/ are included in unit_tests."""
problems = []
for f in input_api.AffectedFiles():
if not f.LocalPath().endswith('BUILD.gn'):
continue
for line_num, line in f.ChangedContents():
m = re.search(r"'(.*\/content\/.*unittest.*)'", line)
if m:
problems.append(m.group(1))
if not problems:
return []
return [output_api.PresubmitPromptWarning(
'Unit tests located in content/ should be added to the ' +
'content_unittests target.',
items=problems)]
def _CheckNoOSIOSMacrosInChromeFile(input_api, f):
"""Check for OS_IOS in a given file in chrome/."""
preprocessor_statement = input_api.re.compile(r'^\s*#')
ios_macro = input_api.re.compile(r'defined\(OS_IOS\)')
results = []
for lnum, line in f.ChangedContents():
if preprocessor_statement.search(line) and ios_macro.search(line):
results.append(' %s:%d' % (f.LocalPath(), lnum))
return results
def _CheckNoOSIOSMacrosInChrome(input_api, output_api):
"""Check for OS_IOS which isn't used in chrome/."""
ios_macros = []
def SourceFilter(affected_file):
return input_api.FilterSourceFile(affected_file, INCLUDE_SOURCE_FILES_ONLY,
input_api.DEFAULT_BLACK_LIST)
for f in input_api.AffectedSourceFiles(SourceFilter):
ios_macros.extend(_CheckNoOSIOSMacrosInChromeFile(input_api, f))
if not ios_macros:
return []
return [output_api.PresubmitError(
'OS_IOS is not used in chrome/ but found in:\n', ios_macros)]
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoContentUnitTestsInChrome(input_api, output_api))
results.extend(_CheckNoOSIOSMacrosInChrome(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
results.extend(_CheckChangeLintsClean(input_api, output_api))
return results
def CheckChangeOnCommit(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
return results
|
jamesfolberth/jupyterhub_AWS_deployment
|
refs/heads/master
|
notebooks/data8_notebooks/project1/tests/q22.py
|
4
|
test = {
'name': 'Question',
'points': 1,
'suites': [
{
'cases': [
{
'code': r"""
>>> income_raw.labels == income_by_zipcode.labels
True
""",
'hidden': False,
'locked': False
},
],
'scored': True,
'setup': '',
'teardown': '',
'type': 'doctest'
}
]
}
|
tkanemoto/kombu
|
refs/heads/master
|
examples/complete_send.py
|
31
|
"""
Example producer that sends a single message and exits.
You can use `complete_receive.py` to receive the message sent.
"""
from kombu import Connection, Producer, Exchange, Queue
#: By default messages sent to exchanges are persistent (delivery_mode=2),
#: and queues and exchanges are durable.
exchange = Exchange('kombu_demo', type='direct')
queue = Queue('kombu_demo', exchange, routing_key='kombu_demo')
with Connection('amqp://guest:guest@localhost:5672//') as connection:
#: Producers are used to publish messages.
#: a default exchange and routing key can also be specifed
#: as arguments the Producer, but we rather specify this explicitly
#: at the publish call.
producer = Producer(connection)
#: Publish the message using the json serializer (which is the default),
#: and zlib compression. The kombu consumer will automatically detect
#: encoding, serialization and compression used and decode accordingly.
producer.publish({'hello': 'world'},
exchange=exchange,
routing_key='kombu_demo',
serializer='json', compression='zlib')
|
farseerri/git_code
|
refs/heads/master
|
tests/system/suite_tools/tst_codepasting/test.py
|
2
|
#############################################################################
##
## Copyright (C) 2014 Digia Plc and/or its subsidiary(-ies).
## Contact: http://www.qt-project.org/legal
##
## This file is part of Qt Creator.
##
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and Digia. For licensing terms and
## conditions see http://www.qt.io/licensing. For further information
## use the contact form at http://www.qt.io/contact-us.
##
## GNU Lesser General Public License Usage
## Alternatively, this file may be used under the terms of the GNU Lesser
## General Public License version 2.1 or version 3 as published by the Free
## Software Foundation and appearing in the file LICENSE.LGPLv21 and
## LICENSE.LGPLv3 included in the packaging of this file. Please review the
## following information to ensure the GNU Lesser General Public License
## requirements will be met: https://www.gnu.org/licenses/lgpl.html and
# http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html.
##
## In addition, as a special exception, Digia gives you certain additional
## rights. These rights are described in the Digia Qt LGPL Exception
## version 1.1, included in the file LGPL_EXCEPTION.txt in this package.
##
#############################################################################
source("../../shared/qtcreator.py")
import random
def invalidPasteId(protocol):
if protocol == 'Paste.KDE.Org':
return None
else:
return -1
def main():
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
protocolsToTest = ["Paste.KDE.Org"]#, "Pastebin.Ca"]
# Be careful with Pastebin.Com, there are only 10 pastes per 24h
# for all machines using the same IP-address like you.
# protocolsToTest += ["Pastebin.Com"]
sourceFile = os.path.join(os.getcwd(), "testdata", "main.cpp")
aut = currentApplicationContext()
# make sure General Messages is open
openGeneralMessages()
for protocol in protocolsToTest:
invokeMenuItem("File", "Open File or Project...")
selectFromFileDialog(sourceFile)
editor = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
type(editor, "<Up>")
typeLines(editor, "// tst_codepasting %s" % datetime.utcnow())
sourceText = editor.plainText
invokeMenuItem("Tools", "Code Pasting", "Paste Snippet...")
selectFromCombo(":Send to Codepaster.protocolBox_QComboBox", protocol)
pasteEditor = waitForObject(":stackedWidget.plainTextEdit_QPlainTextEdit")
test.compare(pasteEditor.plainText, sourceText, "Verify that dialog shows text from the editor")
description = "Description %s" % datetime.utcnow()
type(waitForObject(":uiDescription_QLineEdit"), description)
typeLines(pasteEditor, "// tst_codepasting %s" % datetime.utcnow())
pastedText = pasteEditor.plainText
expiry = waitForObject(":Send to Codepaster.qt_spinbox_lineedit_QLineEdit")
expiryDays = random.randint(1, 10)
replaceEditorContent(expiry, "%d" % expiryDays)
test.log("Using expiry of %d days." % expiryDays)
# make sure to read all former errors (they won't get read twice)
aut.readStderr()
clickButton(waitForObject(":Send to Codepaster.Paste_QPushButton"))
outputWindow = waitForObject(":Qt Creator_Core::OutputWindow")
waitFor("'http://' in str(outputWindow.plainText)", 20000)
try:
output = str(outputWindow.plainText).splitlines()[-1]
except:
output = ""
stdErrOut = aut.readStderr()
match = re.search("^%s protocol error: (.*)$" % protocol, stdErrOut, re.MULTILINE)
if match:
pasteId = invalidPasteId(protocol)
if "Internal Server Error" in match.group(1):
test.warning("Server Error - trying to continue...")
else:
test.fail("%s protocol error: %s" % (protocol, match.group(1)))
elif output.strip() == "":
pasteId = invalidPasteId(protocol)
elif "Post limit, maximum pastes per 24h reached" in output:
test.warning("Maximum pastes per day exceeded.")
pasteId = None
else:
pasteId = output.rsplit("/", 1)[1]
clickButton(waitForObject(":*Qt Creator.Clear_QToolButton"))
invokeMenuItem('File', 'Revert "main.cpp" to Saved')
clickButton(waitForObject(":Revert to Saved.Proceed_QPushButton"))
snooze(1) # "Close All" might be disabled
invokeMenuItem("File", "Close All")
if not pasteId:
test.fatal("Could not get id of paste to %s" % protocol)
continue
invokeMenuItem("Tools", "Code Pasting", "Fetch Snippet...")
selectFromCombo(":CodePaster__Internal__PasteSelectDialog.protocolBox_QComboBox", protocol)
pasteModel = waitForObject(":CodePaster__Internal__PasteSelectDialog.listWidget_QListWidget").model()
waitFor("pasteModel.rowCount() > 1", 20000)
if protocol == 'Pastebin.Ca':
description = description[:32]
if pasteId == -1:
try:
pasteLine = filter(lambda str: description in str, dumpItems(pasteModel))[0]
pasteId = pasteLine.split(" ", 1)[0]
except:
test.fail("Could not find description line in list of pastes from %s" % protocol)
clickButton(waitForObject(":CodePaster__Internal__PasteSelectDialog.Cancel_QPushButton"))
continue
else:
try:
pasteLine = filter(lambda str: pasteId in str, dumpItems(pasteModel))[0]
except:
test.fail("Could not find id '%s' in list of pastes from %s" % (pasteId, protocol))
clickButton(waitForObject(":CodePaster__Internal__PasteSelectDialog.Cancel_QPushButton"))
continue
if protocol.startswith("Pastebin."):
test.verify(description in pasteLine, "Verify that line in list of pastes contains the description")
pasteLine = pasteLine.replace(".", "\\.")
waitForObjectItem(":CodePaster__Internal__PasteSelectDialog.listWidget_QListWidget", pasteLine)
clickItem(":CodePaster__Internal__PasteSelectDialog.listWidget_QListWidget", pasteLine, 5, 5, 0, Qt.LeftButton)
clickButton(waitForObject(":CodePaster__Internal__PasteSelectDialog.OK_QPushButton"))
filenameCombo = waitForObject(":Qt Creator_FilenameQComboBox")
waitFor("not filenameCombo.currentText.isEmpty()", 20000)
editor = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
test.compare(filenameCombo.currentText, "%s: %s" % (protocol, pasteId), "Verify title of editor")
test.compare(editor.plainText, pastedText, "Verify that pasted and fetched texts are the same")
invokeMenuItem("File", "Close All")
invokeMenuItem("File", "Open File or Project...")
selectFromFileDialog(sourceFile)
editor = waitForObject(":Qt Creator_CppEditor::Internal::CPPEditorWidget")
markText(editor, "Down", 7)
# QString QTextCursor::selectedText () const:
# "Note: If the selection obtained from an editor spans a line break, the text will contain a
# Unicode U+2029 paragraph separator character instead of a newline \n character."
selectedText = str(editor.textCursor().selectedText()).replace(unichr(0x2029), "\n")
invokeMenuItem("Tools", "Code Pasting", "Paste Snippet...")
test.compare(waitForObject(":stackedWidget.plainTextEdit_QPlainTextEdit").plainText,
selectedText, "Verify that dialog shows selected text from the editor")
clickButton(waitForObject(":Send to Codepaster.Cancel_QPushButton"))
invokeMenuItem("File", "Exit")
|
neerajvashistha/pa-dude
|
refs/heads/master
|
lib/python2.7/site-packages/sphinx/pycode/pgen2/grammar.py
|
7
|
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""This module defines the data structures used to represent a grammar.
These are a bit arcane because they are derived from the data
structures used by Python's 'pgen' parser generator.
There's also a table here mapping operators to their names in the
token module; the Python tokenize module reports all operators as the
fallback token code OP, but the parser needs the actual token code.
"""
from __future__ import print_function
# Python imports
import pickle
# Local imports
from sphinx.pycode.pgen2 import token
class Grammar(object):
"""Pgen parsing tables tables conversion class.
Once initialized, this class supplies the grammar tables for the
parsing engine implemented by parse.py. The parsing engine
accesses the instance variables directly. The class here does not
provide initialization of the tables; several subclasses exist to
do this (see the conv and pgen modules).
The load() method reads the tables from a pickle file, which is
much faster than the other ways offered by subclasses. The pickle
file is written by calling dump() (after loading the grammar
tables using a subclass). The report() method prints a readable
representation of the tables to stdout, for debugging.
The instance variables are as follows:
symbol2number -- a dict mapping symbol names to numbers. Symbol
numbers are always 256 or higher, to distinguish
them from token numbers, which are between 0 and
255 (inclusive).
number2symbol -- a dict mapping numbers to symbol names;
these two are each other's inverse.
states -- a list of DFAs, where each DFA is a list of
states, each state is is a list of arcs, and each
arc is a (i, j) pair where i is a label and j is
a state number. The DFA number is the index into
this list. (This name is slightly confusing.)
Final states are represented by a special arc of
the form (0, j) where j is its own state number.
dfas -- a dict mapping symbol numbers to (DFA, first)
pairs, where DFA is an item from the states list
above, and first is a set of tokens that can
begin this grammar rule (represented by a dict
whose values are always 1).
labels -- a list of (x, y) pairs where x is either a token
number or a symbol number, and y is either None
or a string; the strings are keywords. The label
number is the index in this list; label numbers
are used to mark state transitions (arcs) in the
DFAs.
start -- the number of the grammar's start symbol.
keywords -- a dict mapping keyword strings to arc labels.
tokens -- a dict mapping token numbers to arc labels.
"""
def __init__(self):
self.symbol2number = {}
self.number2symbol = {}
self.states = []
self.dfas = {}
self.labels = [(0, "EMPTY")]
self.keywords = {}
self.tokens = {}
self.symbol2label = {}
self.start = 256
def dump(self, filename):
"""Dump the grammar tables to a pickle file."""
f = open(filename, "wb")
pickle.dump(self.__dict__, f, 2)
f.close()
def load(self, filename):
"""Load the grammar tables from a pickle file."""
f = open(filename, "rb")
d = pickle.load(f)
f.close()
self.__dict__.update(d)
def report(self):
"""Dump the grammar tables to standard output, for debugging."""
from pprint import pprint
print("s2n")
pprint(self.symbol2number)
print("n2s")
pprint(self.number2symbol)
print("states")
pprint(self.states)
print("dfas")
pprint(self.dfas)
print("labels")
pprint(self.labels)
print("start", self.start)
# Map from operator to number (since tokenize doesn't do this)
opmap_raw = """
( LPAR
) RPAR
[ LSQB
] RSQB
: COLON
, COMMA
; SEMI
+ PLUS
- MINUS
* STAR
/ SLASH
| VBAR
& AMPER
< LESS
> GREATER
= EQUAL
. DOT
% PERCENT
` BACKQUOTE
{ LBRACE
} RBRACE
@ AT
@= ATEQUAL
== EQEQUAL
!= NOTEQUAL
<> NOTEQUAL
<= LESSEQUAL
>= GREATEREQUAL
~ TILDE
^ CIRCUMFLEX
<< LEFTSHIFT
>> RIGHTSHIFT
** DOUBLESTAR
+= PLUSEQUAL
-= MINEQUAL
*= STAREQUAL
/= SLASHEQUAL
%= PERCENTEQUAL
&= AMPEREQUAL
|= VBAREQUAL
^= CIRCUMFLEXEQUAL
<<= LEFTSHIFTEQUAL
>>= RIGHTSHIFTEQUAL
**= DOUBLESTAREQUAL
// DOUBLESLASH
//= DOUBLESLASHEQUAL
-> RARROW
... ELLIPSIS
"""
opmap = {}
for line in opmap_raw.splitlines():
if line:
op, name = line.split()
opmap[op] = getattr(token, name)
|
camptocamp/ngo-addons-backport
|
refs/heads/master
|
openerp/tools/sql.py
|
455
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def drop_view_if_exists(cr, viewname):
cr.execute("DROP view IF EXISTS %s CASCADE" % (viewname,))
cr.commit()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
WangDequan/kaggle-ndsb
|
refs/heads/master
|
configurations/bagging_03_convroll4_big_wd_maxout512.py
|
6
|
import numpy as np
import theano
import theano.tensor as T
import lasagne as nn
import data
import load
import nn_plankton
import dihedral
import dihedral_fast
import tmp_dnn
import tta
validation_split_path = "splits/bagging_split_3.pkl"
patch_size = (95, 95)
augmentation_params = {
'zoom_range': (1 / 1.6, 1.6),
'rotation_range': (0, 360),
'shear_range': (-20, 20),
'translation_range': (-10, 10),
'do_flip': True,
'allow_stretch': 1.3,
}
batch_size = 128 // 4
chunk_size = 32768 // 4
num_chunks_train = 840
momentum = 0.9
learning_rate_schedule = {
0: 0.003,
700: 0.0003,
800: 0.00003,
}
validate_every = 20
save_every = 20
def estimate_scale(img):
return np.maximum(img.shape[0], img.shape[1]) / 85.0
# augmentation_transforms_test = []
# for flip in [True, False]:
# for zoom in [1/1.3, 1/1.2, 1/1.1, 1.0, 1.1, 1.2, 1.3]:
# for rot in np.linspace(0.0, 360.0, 5, endpoint=False):
# tf = data.build_augmentation_transform(zoom=(zoom, zoom), rotation=rot, flip=flip)
# augmentation_transforms_test.append(tf)
augmentation_transforms_test = tta.build_quasirandom_transforms(70, **{
'zoom_range': (1 / 1.4, 1.4),
'rotation_range': (0, 360),
'shear_range': (-10, 10),
'translation_range': (-8, 8),
'do_flip': True,
'allow_stretch': 1.2,
})
data_loader = load.ZmuvRescaledDataLoader(estimate_scale=estimate_scale, num_chunks_train=num_chunks_train,
patch_size=patch_size, chunk_size=chunk_size, augmentation_params=augmentation_params,
augmentation_transforms_test=augmentation_transforms_test, validation_split_path=validation_split_path)
# Conv2DLayer = nn.layers.cuda_convnet.Conv2DCCLayer
# MaxPool2DLayer = nn.layers.cuda_convnet.MaxPool2DCCLayer
Conv2DLayer = tmp_dnn.Conv2DDNNLayer
MaxPool2DLayer = tmp_dnn.MaxPool2DDNNLayer
def build_model():
l0 = nn.layers.InputLayer((batch_size, 1, patch_size[0], patch_size[1]))
l0c = dihedral.CyclicSliceLayer(l0)
l1a = Conv2DLayer(l0c, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1b = Conv2DLayer(l1a, num_filters=16, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l1 = MaxPool2DLayer(l1b, ds=(3, 3), strides=(2, 2))
l1r = dihedral_fast.CyclicConvRollLayer(l1)
l2a = Conv2DLayer(l1r, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2b = Conv2DLayer(l2a, num_filters=32, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l2 = MaxPool2DLayer(l2b, ds=(3, 3), strides=(2, 2))
l2r = dihedral_fast.CyclicConvRollLayer(l2)
l3a = Conv2DLayer(l2r, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3b = Conv2DLayer(l3a, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3c = Conv2DLayer(l3b, num_filters=64, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l3 = MaxPool2DLayer(l3c, ds=(3, 3), strides=(2, 2))
l3r = dihedral_fast.CyclicConvRollLayer(l3)
l4a = Conv2DLayer(l3r, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4b = Conv2DLayer(l4a, num_filters=256, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4c = Conv2DLayer(l4b, num_filters=128, filter_size=(3, 3), border_mode="same", W=nn_plankton.Conv2DOrthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=nn_plankton.leaky_relu, untie_biases=True)
l4 = MaxPool2DLayer(l4c, ds=(3, 3), strides=(2, 2))
l4r = dihedral_fast.CyclicConvRollLayer(l4)
l4f = nn.layers.flatten(l4r)
l5 = nn.layers.DenseLayer(nn.layers.dropout(l4f, p=0.5), num_units=1024, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=None)
l5fp = nn.layers.FeaturePoolLayer(l5, ds=2)
l5r = dihedral_fast.CyclicRollLayer(l5fp)
l6 = nn.layers.DenseLayer(nn.layers.dropout(l5r, p=0.5), num_units=1024, W=nn_plankton.Orthogonal(1.0), b=nn.init.Constant(0.1), nonlinearity=None)
l6fp = nn.layers.FeaturePoolLayer(l6, ds=2)
l6m = dihedral.CyclicPoolLayer(l6fp, pool_function=nn_plankton.rms)
l7 = nn.layers.DenseLayer(nn.layers.dropout(l6m, p=0.5), num_units=data.num_classes, nonlinearity=T.nnet.softmax, W=nn_plankton.Orthogonal(1.0))
return [l0], l7
def build_objective(l_ins, l_out):
lambda_reg = 0.0005
params = nn.layers.get_all_non_bias_params(l_out)
reg_term = sum(T.sum(p**2) for p in params)
def loss(y, t):
return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
return nn.objectives.Objective(l_out, loss_function=loss)
|
nccgroup/umap2
|
refs/heads/master
|
umap2/apps/base.py
|
1
|
'''
Umap2 applications should subclass the Umap2App.
'''
import sys
import os
import importlib
import logging
import docopt
from serial import Serial, PARITY_NONE
from umap2.phy.facedancer.max342x_phy import Max342xPhy
from umap2.phy.gadgetfs.gadgetfs_phy import GadgetFsPhy
from umap2.utils.ulogger import set_default_handler_level
class Umap2App(object):
def __init__(self, docstring=None):
if docstring is not None:
self.options = docopt.docopt(docstring)
else:
self.options = {}
self.umap_class_dict = {
'audio': ('audio', 'Headset'),
'billboard': ('billboard', 'A billboard, requires USB 2.1 and higher'),
'cdc_acm': ('cdc_acm', 'Abstract Control Model device (like serial modem)'),
'cdc_dl': ('cdc_dl', 'Direct Line Control device (like modem)'),
'ftdi': ('ftdi', 'USB<->RS232 FTDI chip'),
'hub': ('hub', 'USB hub'),
'keyboard': ('keyboard', 'Keyboard'),
'mass_storage': ('mass_storage', 'Disk on key'),
'mtp': ('mtp', 'Android phone'),
'printer': ('printer', 'Printer'),
'smartcard': ('smartcard', 'USB<->smart card interface'),
}
self.umap_classes = sorted(self.umap_class_dict.keys())
self.logger = self.get_logger()
self.num_processed = 0
self.fuzzer = None
self.setup_packet_received = False
def get_logger(self):
levels = {
0: logging.INFO,
1: logging.DEBUG,
# verbose is added by umap2.__init__ module
2: logging.VERBOSE,
}
verbose = self.options.get('--verbose', 0)
logger = logging.getLogger('umap2')
if verbose in levels:
set_default_handler_level(levels[verbose])
else:
set_default_handler_level(logging.VERBOSE)
if self.options.get('--quiet', False):
set_default_handler_level(logging.WARNING)
return logger
def load_phy(self, phy_string):
self.logger.info('Loading physical interface: %s' % phy_string)
phy_arr = phy_string.split(':')
phy_type = phy_arr[0]
if phy_type == 'fd':
self.logger.debug('Physical interface is facedancer')
dev_name = phy_arr[1]
s = Serial(dev_name, 115200, parity=PARITY_NONE, timeout=2)
# fd = Facedancer(s)
phy = Max342xPhy(self, s)
return phy
elif phy_type == 'rd':
try:
from umap2.phy.raspdancer.raspdancer_phy import RaspdancerPhy
self.logger.debug('Physical interface is raspdancer')
phy = RaspdancerPhy(self)
return phy
except ImportError:
raise Exception('Raspdancer support misses spi module and/or gpio module.')
elif phy_type == 'gadgetfs':
self.logger.debug('Physical interface is GadgetFs')
phy = GadgetFsPhy(self)
return phy
raise Exception('Phy type not supported: %s' % phy_type)
def load_device(self, dev_name, phy):
if dev_name in self.umap_classes:
self.logger.info('Loading USB device %s' % dev_name)
module_name = self.umap_class_dict[dev_name][0]
module = importlib.import_module('umap2.dev.%s' % module_name)
else:
self.logger.info('Loading custom USB device from file: %s' % dev_name)
dirpath, filename = os.path.split(dev_name)
modulename = filename[:-3]
if dirpath in sys.path:
sys.path.remove(dirpath)
sys.path.insert(0, dirpath)
module = __import__(modulename, globals(), locals(), [], -1)
usb_device = module.usb_device
kwargs = self.get_user_device_kwargs()
dev = usb_device(self, phy, **kwargs)
return dev
def get_user_device_kwargs(self):
'''
if user provides values for the device, get them here
'''
kwargs = {}
self.update_from_user_param('--vid', 'vid', kwargs, 'int')
self.update_from_user_param('--pid', 'pid', kwargs, 'int')
return kwargs
def update_from_user_param(self, flag, arg_name, kwargs, type):
val = self.options.get(flag, None)
if val is not None:
if type == 'int':
kwargs[arg_name] = int(val, 0)
self.logger.info('Setting user-supplied %s: %#x' % (arg_name, kwargs[arg_name]))
else:
raise Exception('arg type not supported!!')
def signal_setup_packet_received(self):
'''
Signal that we received a setup packet from the host (host is alive)
'''
self.setup_packet_received = True
def should_stop_phy(self):
'''
:return: whether phy should stop serving.
'''
return False
def usb_function_supported(self, reason=None):
'''
Callback from a USB device, notifying that the current USB device
is supported by the host.
By default, do nothing with this information
:param reason: reason why we decided it is supported (default: None)
'''
pass
def get_mutation(self, stage, data=None):
'''
mutation is only needed when fuzzing
'''
return None
|
sstocker46/pyrobotlab
|
refs/heads/master
|
toSort/inMoovTalkMovement.py
|
5
|
from time import sleep
inMoov = Runtime.createAndStart("inMoov", "InMoov")
inMoov.initialize("left","atmega1280","COM7")
inMoov.initialize("right","uno","COM8")
inMoov.initializeHead("left")
inMoov.eye.setCameraIndex(1)
inMoov.tracking.calibrate()
inMoov.systemCheck()
def heard():
data = msg_ear_recognized.data[0]
print "heard ", data
#mouth.setLanguage("fr")
mouth.setLanguage("en")
mouth.speak("you said " + data)
if (data == "rest"):
rest()
elif (data == "one"):
takeball()
elif (data == "one ball"):
ball()
elif (data == "two"):
keepball()
elif (data == "three"):
goestotake1()
elif (data == "four"):
goestotake2()
elif (data == "five"):
take()
elif (data == "six"):
takefinal1()
elif (data == "seven"):
takefinal2()
elif (data == "eight"):
takefinal3()
elif (data == "nine"):
takefinal4()
elif (data == "ten"):
davinciarm1()
elif (data == "look one"):
lookatthing2()
elif (data == "down one"):
putdown1()
elif (data == "down two"):
putdown2()
elif (data == "point"):
pointfinger()
elif (data == "scared"):
scared()
elif (data == "ballet"):
ballet()
elif (data == "surrender"):
surrender()
elif (data == "surrender two"):
surrender2()
elif (data == "what"):
what()
elif (data == "welcome"):
welcome()
elif (data == "protect"):
protectface()
elif (data == "start tracking"):
inMoov.startTracking()
elif (data == "stop tracking"):
inMoov.stopTracking()
inMoov.startListening("rest | one ball | one | two | three | four | five | six | seven | eight | nine | ten | look one | down one | down two | point | scared | ballet | surrender | surrender two | what | welcome | protect | start tracking | stop tracking")
def rest():
inMoov.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
inMoov.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
inMoov.setArmSpeed("left", 0.75, 0.75, 0.75, 0.75)
inMoov.setArmSpeed("right", 0.75, 0.75, 0.75, 0.75)
inMoov.setHeadSpeed( 0.75, 0.75)
inMoov.moveHead(90,90)
inMoov.moveArm("left",0,84,16,15)
inMoov.moveArm("right",0,73,29,15)
inMoov.moveHand("left",50,28,30,10,10,90)
inMoov.moveHand("right",10,10,10,10,10,90)
inMoov.broadcastState()
sleep(5)
def ball():
inMoov.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)
inMoov.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
inMoov.moveHead(52,81)
inMoov.moveArm("left",0,84,16,15)
inMoov.moveArm("right",0,85,58,15)
inMoov.moveHand("left",50,28,30,10,10,90)
inMoov.moveHand("right",10,111,103,19,11,90)
def takeball():
inMoov.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)
inMoov.setArmSpeed("right", 0.85, 0.85, 0.85, 0.85)
inMoov.moveHead(52,81)
inMoov.moveArm("left",0,84,16,15)
inMoov.moveArm("right",6,73,65,16)
inMoov.moveHand("left",50,28,30,0,0,90)
inMoov.moveHand("right",85,131,104,106,139,129)
sleep(5)
def keepball():
inMoov.moveHead(0,80)
inMoov.moveArm("left",0,84,16,15)
inMoov.moveArm("right",70,62,62,16)
inMoov.moveHand("left",50,28,30,10,10,90)
inMoov.moveHand("right",85,131,104,106,139,75)
sleep(4)
def goestotake1():
inMoov.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.65)
inMoov.setArmSpeed("left", 0.75, 0.75, 0.75, 0.95)
inMoov.setArmSpeed("right", 0.95, 0.95, 0.95, 0.85)
inMoov.moveHead(15,84)
inMoov.moveArm("left",90,91,37,15)
inMoov.moveArm("right",63,50,45,15)
inMoov.moveHand("left",50,28,30,10,10,0)
inMoov.moveHand("right",85,85,75,72,81,22)
sleep(1)
def goestotake2():
inMoov.setArmSpeed("left", 0.75, 0.75, 0.75, 0.95)
inMoov.setArmSpeed("right", 0.95, 0.95, 0.95, 0.85)
inMoov.moveHead(12,80)
inMoov.moveArm("left",71,51,37,15)
inMoov.moveArm("right",63,50,45,15)
inMoov.moveHand("left",50,28,30,10,10,0)
inMoov.moveHand("right",77,85,75,72,81,22)
sleep(4)
def take():
inMoov.setHandSpeed("left", 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)
inMoov.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 1.0)
inMoov.moveHead(10,74)
inMoov.moveArm("left",71,51,37,15)
inMoov.moveArm("right",63,50,45,15)
inMoov.moveHand("left",50,28,30,10,10,0)
inMoov.moveHand("right",60,85,75,72,81,22)
sleep(2)
def takefinal1():
inMoov.setHandSpeed("right", 0.65, 0.65, 0.65, 0.65, 0.65, 0.65)
inMoov.moveHead(5,74)
inMoov.moveArm("left",71,51,37,15)
inMoov.moveArm("right",63,50,45,15)
inMoov.moveHand("left",50,28,30,10,10,0)
inMoov.moveHand("right",20,75,74,72,81,22)
sleep(1)
def takefinal2():
inMoov.setHandSpeed("left", 0.75, 0.65, 0.65, 0.65, 0.65, 0.65)
inMoov.setHandSpeed("right", 0.75, 0.65, 0.65, 0.65, 0.65, 0.65)
inMoov.moveHead(10,74)
inMoov.moveArm("left",68,51,37,15)
inMoov.moveArm("right",63,50,45,15)
inMoov.moveHand("left",155,110,118,10,10,0)
inMoov.moveHand("right",20,64,72,72,81,22)
sleep(4)
def takefinal3():
inMoov.setHandSpeed("left", 0.75, 0.65, 0.65, 0.65, 0.65, 0.65)
inMoov.setHandSpeed("right", 0.65, 0.65, 0.65, 0.65, 0.65, 0.65)
inMoov.moveHead(10,74)
inMoov.moveArm("left",68,51,37,15)
inMoov.moveArm("right",63,50,45,15)
inMoov.moveHand("left",170,110,118,10,10,0)
inMoov.moveHand("right",20,30,40,30,30,22)
sleep(3)
def takefinal4():
inMoov.setHandSpeed("left", 1.0, 0.65, 0.65, 0.65, 0.65, 0.65)
inMoov.setHandSpeed("right", 0.75, 0.75, 0.75, 0.75, 0.75, 0.75)
inMoov.setArmSpeed("right", 0.75, 0.85, 0.95, 0.85)
inMoov.moveHead(10,74)
inMoov.moveArm("left",71,51,37,15)
inMoov.setArmSpeed("right", 0.65, 0.65, 0.75, 0.85)
inMoov.moveArm("right",0,82,33,15)
inMoov.moveHand("left",140,125,125,34,34,0)
inMoov.moveHand("right",20,20,40,30,30,20)
sleep(2)
def davinciarm1():
inMoov.setHandSpeed("left", 1.0, 1.0, 1.0, 1.0, 1.0, 0.65)
inMoov.setHandSpeed("right", 1.0, 1.0, 1.0, 1.0, 1.0, 0.65)
inMoov.setArmSpeed("left", 0.75, 0.75, 0.75, 0.75)
inMoov.setArmSpeed("right", 0.75, 0.75, 0.75, 0.75)
inMoov.setHeadSpeed( 0.75, 0.75)
inMoov.moveHead(80,90)
inMoov.moveArm("left",0,118,13,74)
inMoov.moveArm("right",0,118,29,74)
inMoov.moveHand("left",50,28,30,10,10,47)
inMoov.moveHand("right",10,10,10,10,10,137)
sleep(4)
def lookatthing2():
inMoov.setHeadSpeed(0.65, 0.75)
inMoov.moveHead(73,74)
inMoov.moveArm("left",70,64,83,15)
inMoov.moveArm("right",0,82,33,15)
inMoov.moveHand("left",147,130,140,34,34,164)
inMoov.moveHand("right",20,40,40,30,30,10)
sleep(2)
def putdown1():
inMoov.moveHead(0,99)
inMoov.moveArm("left",1,45,57,31)
inMoov.moveArm("right",0,82,33,15)
inMoov.moveHand("left",147,130,135,34,34,35)
inMoov.moveHand("right",20,40,40,30,30,22)
sleep(2)
def putdown2():
inMoov.moveHead(0,99)
inMoov.moveArm("left",1,45,53,31)
inMoov.moveArm("right",0,82,33,15)
sleep(3)
inMoov.moveHand("left",147,61,67,34,34,35)
inMoov.moveHand("right",20,40,40,30,30,22)
inMoov.broadcastState()
sleep(2)
def pointfinger():
inMoov.moveHead(90,90)
inMoov.moveArm("left",0,84,16,15)
inMoov.moveArm("right",26,73,88,15)
inMoov.moveHand("left",50,28,30,10,10,90)
inMoov.moveHand("right",10,10,142,156,148,180)
def scared():
inMoov.moveHead(90,90)
inMoov.moveArm("left",90,40,24,15)
inMoov.moveArm("right",90,40,139,10)
inMoov.moveHand("left",68,85,56,27,26,52)
inMoov.moveHand("right",10,10,20,34,19,156)
def ballet():
inMoov.moveHead(90,90)
inMoov.moveArm("left",0,40,95,29)
inMoov.moveArm("right",50,40,164,10)
inMoov.moveHand("left",68,0,56,27,26,52)
inMoov.moveHand("right",10,10,20,34,19,156)
def surrender():
inMoov.moveHead(90,90)
inMoov.moveArm("left",90,139,15,80)
inMoov.moveArm("right",90,145,37,80)
inMoov.moveHand("left",50,28,30,10,10,76)
inMoov.moveHand("right",10,10,10,10,10,139)
def surrender2():
inMoov.moveHead(90,112)
inMoov.moveArm("left",90,139,48,80)
inMoov.moveArm("right",90,145,77,80)
inMoov.moveHand("left",50,28,30,10,10,76)
inMoov.moveHand("right",10,10,10,10,10,139)
def what():
inMoov.moveHead(38,90)
inMoov.moveArm("left",0,140,0,15)
inMoov.moveArm("right",0,140,2,15)
inMoov.moveHand("left",50,28,30,10,10,158)
inMoov.moveHand("right",10,10,10,10,10,90)
def welcome():
inMoov.moveHead(38,90)
inMoov.moveArm("left",0,140,0,49)
inMoov.moveArm("right",0,140,2,40)
inMoov.moveHand("left",50,28,30,10,10,158)
inMoov.moveHand("right",10,10,10,10,10,90)
def protectface():
inMoov.moveHead(90,90)
inMoov.moveArm("left",90,64,128,43)
inMoov.moveArm("right",0,73,29,15)
inMoov.moveHand("left",50,28,30,10,10,90)
inMoov.moveHand("right",10,10,10,10,10,90)
|
oudalab/phyllo
|
refs/heads/master
|
phyllo/extractors/mayDB.py
|
1
|
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup, NavigableString
from phyllo.phyllo_logger import logger
import nltk
from itertools import cycle
nltk.download('punkt')
from nltk import sent_tokenize
s1=[]
def parseRes2(soup, title, url, cur, author, date, collectiontitle):
chapter = '-'
j = 1
[e.extract() for e in soup.find_all('br')]
[e.extract() for e in soup.find_all('font')]
[e.extract() for e in soup.find_all('table')]
getp = soup.find_all('p')
for p in getp:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'shortborder', 'smallboarder', 'margin',
'internal_navigation', 'pagehead']: # these are not part of the main t
continue
except:
pass
if p.text!='':
sen = p.text
sen = sen.strip()
i = 1
s1 = sen.split('\n')
l = 0
s2 = []
if len(s1) % 5 > 0:
while l < (len(s1) - (len(s1) % 5)):
s = s1[l] + ' ' + s1[l + 1] + ' ' + s1[l + 2] + ' ' + s1[l + 3] + ' ' + s1[l + 4]
s2.append(s)
l += 5
s = ''
for i in range(len(s1) - (len(s1) % 5), len(s1)):
s = s + s1[i] + ' '
s2.append(s)
l = 0
elif len(s1) % 5 == 0:
while l < len(s1):
s = s1[l] + ' ' + s1[l + 1] + ' ' + s1[l + 2] + ' ' + s1[l + 3] + ' ' + s1[l + 4]
s2.append(s)
l += 5
if s2[0]!=' ':
for x in s2:
sentn = x.strip()
num = i
chapter = str(j)
cur.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, collectiontitle, title, 'Latin', author, date, chapter,
num, sentn, url, 'prose'))
i += 1
j += 1
def main():
# get proper URLs
siteURL = 'http://www.thelatinlibrary.com'
biggsURL = 'http://www.thelatinlibrary.com/may.html'
biggsOPEN = urllib.request.urlopen(biggsURL)
biggsSOUP = BeautifulSoup(biggsOPEN, 'html5lib')
textsURL = []
for a in biggsSOUP.find_all('a', href=True):
link = a['href']
textsURL.append("{}/{}".format(siteURL, link))
# remove some unnecessary urls
while ("http://www.thelatinlibrary.com/index.html" in textsURL):
textsURL.remove("http://www.thelatinlibrary.com/index.html")
textsURL.remove("http://www.thelatinlibrary.com/classics.html")
textsURL.remove("http://www.thelatinlibrary.com/neo.html")
textsURL.remove("http://www.thelatinlibrary.com/may/maytitle.shtml")
textsURL.remove("http://www.thelatinlibrary.com/http://ourworld.cs.com/latintexts/index.htm")
logger.info("\n".join(textsURL))
author = biggsSOUP.title.string
author = author.strip()
collectiontitle = biggsSOUP.td.contents[0].strip()
date = biggsSOUP.span.contents[0].strip().replace('(', '').replace(')', '').replace(u"\u2013", '-')
date=date.strip()
title = []
for link in biggsSOUP.findAll('a'):
if (link.get('href') and link.get('href') != 'index.html' and link.get('href') != 'neo.html' and link.get(
'href') != 'classics.html') and link.get('href') != 'http://ourworld.cs.com/latintexts/index.htm' and link.get('href') != 'may/maytitle.shtml':
title.append(link.string)
i = 0
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author = 'Thomas May'")
for u in textsURL:
uOpen = urllib.request.urlopen(u)
gestSoup = BeautifulSoup(uOpen, 'html5lib')
parseRes2(gestSoup, title[i], u, c, author, date, collectiontitle)
i = i + 1
if __name__ == '__main__':
main()
|
izonder/intellij-community
|
refs/heads/master
|
python/testData/inspections/importFromModule/foo/bar_after.py
|
83
|
from importFromModule.foo import baz
baz.quux()
|
Jamlum/pytomo
|
refs/heads/master
|
pytomo/cdfplot_new.py
|
2
|
#!/usr/bin/env python
"Module to plot cdf from data or file. Can be called directly."
from __future__ import division, print_function
#from optparse import OptionParser
import sys
# AO 201221010 (due to error in win) =>
try:
import numpy as np
except ImportError:
np = None
# in case of non-interactive usage
#import matplotlib
#matplotlib.use('PDF')
try:
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
from matplotlib.colors import colorConverter
except ImportError:
plt = None
# <= AO 201221010 (due to error in win)
from itertools import cycle
_VERSION = '2.0'
# possibility to place legend outside graph:
#pylab.subfigure(111)
#pylab.subplots_adjust(right=0.8) or (top=0.8)
#pylab.legend(loc=(1.1, 0.5)
class CdfFigure(object):
"Hold the figure and its default properties"
def __init__(self, xlabel='x', ylabel=r'P(X$\leq$x)',
title='Empirical Distribution', fontsize='xx-large',
legend_fontsize='large', legend_ncol=1, subplot_top=None):
self._figure = plt.figure()
if subplot_top:
self._figure.subplotpars.top = subplot_top
self._axis = self._figure.add_subplot(111)
self._lines = {}
self.xlabel = xlabel
self.ylabel = ylabel
self.title = title
self.fontsize = fontsize
self.legend_fontsize = legend_fontsize
self.legend_ncol = legend_ncol
def savefig(self, *args, **kwargs):
"Saves the figure: interface to plt.Figure.savefig"
self._figure.savefig(*args, **kwargs)
def bar(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.bar"
self._axis.bar(*args, **kwargs)
def plot(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.plot"
self._axis.plot(*args, **kwargs)
def get_xlim(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.get_xlim()"
return self._axis.get_xlim(*args, **kwargs)
def set_xlim(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.set_xlim()"
self._axis.set_xlim(*args, **kwargs)
def set_ylim(self, *args, **kwargs):
"Plot in the axis: interface to plt.Axes.set_ylim()"
self._axis.set_ylim(*args, **kwargs)
def cdfplot(self, data_in, name='Data', finalize=False):
"""Plot the cdf of a data array
Wrapper to call the plot method of axes
"""
data = sorted(filter(lambda x: x is not None, data_in))
data_len = len(data)
if data_len == 0:
print("no data to plot", file=sys.stderr)
return
cdf = np.arange(data_len + 1) / data_len
# to have cdf up to 1
data.append(data[-1])
line = self._axis.plot(data, cdf, drawstyle='steps',
label=name + ': %d' % len(data))
self._lines[name] = line[0]
if finalize:
self.adjust_plot()
def ccdfplot(self, data_in, name='Data', finalize=False):
"""Plot the cdf of a data array
Wrapper to call the plot method of axes
"""
data = sorted(filter(lambda x: x is not None, data_in))
data_len = len(data)
if data_len == 0:
print("no data to plot", file=sys.stderr)
return
ccdf = 1 - np.arange(data_len + 1) / data_len
# to have cdf up to 1
data.append(data[-1])
line = self._axis.plot(data, ccdf, drawstyle='steps',
label=name + ': %d' % len(data))
self._lines[name] = line[0]
if finalize:
self.adjust_plot()
def show(self):
"Show the figure, and hold to do interactive drawing"
self._figure.show()
self._figure.hold(True)
@staticmethod
def generate_line_properties():
"Cycle through the lines properties"
colors = cycle('mgcb')
line_width = 2.5
dashes = cycle([(1, 0), (8, 5)]) #self.dash_generator()
linestyles = cycle(['-'])
#alphas = cycle([.3, 1.])
markers = cycle(' oxv*d')
while True:
dash = dashes.next()
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
dash = dashes.next()
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
yield (colors.next(), line_width, dash, linestyles.next(),
markers.next())
def adjust_lines(self, dashes=True, leg_loc='best'):
"""Put correct styles in the axes lines
Should be launch when all lines are plotted
Optimised for up to 8 lines in the plot
"""
generator = self.generate_line_properties()
for key in sorted(self._lines):
(color, line_width, dash, linestyle, marker) = generator.next()
line = self._lines[key]
line.set_color(color)
line.set_lw(line_width)
line.set_linestyle(linestyle)
if dashes:
line.set_dashes(dash)
line.set_marker(marker)
line.set_markersize(12)
line.set_markeredgewidth(1.5)
line.set_markerfacecolor('1.')
line.set_markeredgecolor(color)
# we want at most 15 markers per line
markevery = 1 + len(line.get_xdata()) // 15
line.set_markevery(markevery)
self.adjust_plot(leg_loc=leg_loc)
def adjust_plot(self, leg_loc='best'):
"Adjust main plot properties (grid, ticks, legend)"
self.put_labels()
self.adjust_ticks()
self._axis.grid(True)
self._axis.legend(loc=leg_loc, ncol=self.legend_ncol)
def put_labels(self):
"Put labels for axes and title"
self._axis.set_xlabel(self.xlabel, size=self.fontsize)
self._axis.set_ylabel(self.ylabel, size=self.fontsize)
self._axis.set_title(self.title, size=self.fontsize)
def legend(self, loc='best'):
"Plot legend with correct font size"
font = FontProperties(size=self.legend_fontsize)
self._axis.legend(loc=loc, prop=font)
def adjust_ticks(self):
"""Adjusts ticks sizes
To call after a rescale (log...)
"""
self._axis.minorticks_on()
for tick in self._axis.xaxis.get_major_ticks():
tick.label1.set_fontsize(self.fontsize)
for tick in self._axis.yaxis.get_major_ticks():
tick.label1.set_fontsize(self.fontsize)
def setgraph_logx(self):
"Set graph in xlogscale and adjusts plot (grid, ticks, legend)"
self._axis.semilogx(nonposy='clip', nonposx='clip')
def setgraph_logy(self):
"Set graph in xlogscale and adjusts plot (grid, ticks, legend)"
self._axis.semilogy(nonposy='clip', nonposx='clip')
def setgraph_loglog(self):
"Set graph in xlogscale and adjusts plot (grid, ticks, legend)"
self._axis.loglog(nonposy='clip', nonposx='clip')
def cdfplotdata(self, list_data_name, **kwargs):
"Method to be able to append data to the figure"
cdfplotdata(list_data_name, figure=self, **kwargs)
def ccdfplotdata(self, list_data_name, **kwargs):
"Method to be able to append data to the figure"
cdfplotdata(list_data_name, figure=self, cdf=True, **kwargs)
def cdfplotdata(list_data_name, figure=None, xlabel='x', loc='best',
fs_legend='large', title = 'Empirical Distribution', logx=True,
logy=False, cdf=True, dashes=True, legend_ncol=1):
"Plot the cdf of a list of names and data arrays"
if not figure:
figure = CdfFigure(xlabel=xlabel, title=title,
legend_fontsize=fs_legend, legend_ncol=legend_ncol)
else:
figure.title = title
figure.xlabel = xlabel
figure.legend_fontsize = fs_legend
figure.legend_ncol = legend_ncol
if not list_data_name:
print("no data to plot", file=sys.stderr)
return figure
for name, data in list_data_name:
if cdf:
figure.cdfplot(data, name=name)
else:
figure.ccdfplot(data, name=name)
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_lines(dashes=dashes, leg_loc=loc)
return figure
def cdfplot(in_file, col=0):
"Plot the cdf of a column in file"
data = np.loadtxt(in_file, usecols = [col])
cdfplotdata(('Data', data))
def scatter_plot(data, title='Scatterplot', xlabel='X', ylabel='Y',
logx=False, logy=False):
"Plot a scatter plot of data"
figure = CdfFigure(title=title, xlabel=xlabel, ylabel=ylabel)
x, y = zip(*data)
figure.plot(x, y, linestyle='', marker='^', markersize=8,
markeredgecolor='b', markerfacecolor='w')
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_plot()
return figure
def scatter_plot_multi(datas, title='Scatterplot', xlabel='X', ylabel='Y',
logx=False, logy=False):
"Plot a scatter plot of dictionary"
figure = CdfFigure(title=title, xlabel=xlabel, ylabel=ylabel)
markers = cycle('^xo')
colors = cycle('brm')
transparent = colorConverter.to_rgba('w', alpha=1)
total_nb = len([x for y in datas.values() for x in y])
for label, data in sorted(datas.items()):
x, y = zip(*data)
figure.plot(x, y,
label=(r'%s: %d (\textbf{%d\%%})'
% (label, len(data), 100 *len(data) // total_nb)),
linestyle='', marker=markers.next(), markersize=8,
markeredgecolor=colors.next(), markerfacecolor=transparent)
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_plot()
return figure
def bin_plot(datas, title='Bin Plot', xlabel='X', ylabel='Y',
logx=False, logy=False):
"Plot a bin plot of dictionary"
figure = CdfFigure(title=title, xlabel=xlabel, ylabel=ylabel)
# linestyles = cycle(('-', '--'))
# markers = cycle('^xo')
# colors = cycle('brm')
# for label, data in datas:
left, width, height, yerr = zip(*datas)
figure.bar(left, height, width, linewidth=0) #, yerr=yerr)
# linestyle=linestyles.next(), marker=markers.next(),
# markersize=6, markeredgecolor=colors.next(),
# markerfacecolor='w')
if logx and logy:
figure.setgraph_loglog()
elif logy:
figure.setgraph_logy()
elif logx:
figure.setgraph_logx()
figure.adjust_plot()
return figure
|
miguelpalacio/python-for-android
|
refs/heads/master
|
python-build/python-libs/gdata/build/lib/atom/http.py
|
136
|
#!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""HttpClients in this module use httplib to make HTTP requests.
This module make HTTP requests based on httplib, but there are environments
in which an httplib based approach will not work (if running in Google App
Engine for example). In those cases, higher level classes (like AtomService
and GDataService) can swap out the HttpClient to transparently use a
different mechanism for making HTTP requests.
HttpClient: Contains a request method which performs an HTTP call to the
server.
ProxiedHttpClient: Contains a request method which connects to a proxy using
settings stored in operating system environment variables then
performs an HTTP call to the endpoint server.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import types
import os
import httplib
import atom.url
import atom.http_interface
import socket
import base64
import atom.http_core
class ProxyError(atom.http_interface.Error):
pass
class TestConfigurationError(Exception):
pass
DEFAULT_CONTENT_TYPE = 'application/atom+xml'
class HttpClient(atom.http_interface.GenericHttpClient):
# Added to allow old v1 HttpClient objects to use the new
# http_code.HttpClient. Used in unit tests to inject a mock client.
v2_http_client = None
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
if isinstance(data, types.StringTypes):
all_headers['Content-Length'] = str(len(data))
else:
raise atom.http_interface.ContentLengthRequired('Unable to calculate '
'the length of the data parameter. Specify a value for '
'Content-Length')
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = DEFAULT_CONTENT_TYPE
if self.v2_http_client is not None:
http_request = atom.http_core.HttpRequest(method=operation)
atom.http_core.Uri.parse_uri(str(url)).modify_request(http_request)
http_request.headers = all_headers
if data:
http_request._body_parts.append(data)
return self.v2_http_client.request(http_request=http_request)
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
connection = self._prepare_connection(url, all_headers)
if self.debug:
connection.debuglevel = 1
connection.putrequest(operation, self._get_access_url(url),
skip_host=True)
if url.port is not None:
connection.putheader('Host', '%s:%s' % (url.host, url.port))
else:
connection.putheader('Host', url.host)
# Overcome a bug in Python 2.4 and 2.5
# httplib.HTTPConnection.putrequest adding
# HTTP request header 'Host: www.google.com:443' instead of
# 'Host: www.google.com', and thus resulting the error message
# 'Token invalid - AuthSub token has wrong scope' in the HTTP response.
if (url.protocol == 'https' and int(url.port or 443) == 443 and
hasattr(connection, '_buffer') and
isinstance(connection._buffer, list)):
header_line = 'Host: %s:443' % url.host
replacement_header_line = 'Host: %s' % url.host
try:
connection._buffer[connection._buffer.index(header_line)] = (
replacement_header_line)
except ValueError: # header_line missing from connection._buffer
pass
# Send the HTTP headers.
for header_name in all_headers:
connection.putheader(header_name, all_headers[header_name])
connection.endheaders()
# If there is data, send it in the request.
if data:
if isinstance(data, list):
for data_part in data:
_send_data_part(data_part, connection)
else:
_send_data_part(data, connection)
# Return the HTTP Response from the server.
return connection.getresponse()
def _prepare_connection(self, url, headers):
if not isinstance(url, atom.url.Url):
if isinstance(url, types.StringTypes):
url = atom.url.parse_url(url)
else:
raise atom.http_interface.UnparsableUrlObject('Unable to parse url '
'parameter because it was not a string or atom.url.Url')
if url.protocol == 'https':
if not url.port:
return httplib.HTTPSConnection(url.host)
return httplib.HTTPSConnection(url.host, int(url.port))
else:
if not url.port:
return httplib.HTTPConnection(url.host)
return httplib.HTTPConnection(url.host, int(url.port))
def _get_access_url(self, url):
return url.to_string()
class ProxiedHttpClient(HttpClient):
"""Performs an HTTP request through a proxy.
The proxy settings are obtained from enviroment variables. The URL of the
proxy server is assumed to be stored in the environment variables
'https_proxy' and 'http_proxy' respectively. If the proxy server requires
a Basic Auth authorization header, the username and password are expected to
be in the 'proxy-username' or 'proxy_username' variable and the
'proxy-password' or 'proxy_password' variable.
After connecting to the proxy server, the request is completed as in
HttpClient.request.
"""
def _prepare_connection(self, url, headers):
proxy_auth = _get_proxy_auth()
if url.protocol == 'https':
# destination is https
proxy = os.environ.get('https_proxy')
if proxy:
# Set any proxy auth headers
if proxy_auth:
proxy_auth = 'Proxy-authorization: %s' % proxy_auth
# Construct the proxy connect command.
port = url.port
if not port:
port = '443'
proxy_connect = 'CONNECT %s:%s HTTP/1.0\r\n' % (url.host, port)
# Set the user agent to send to the proxy
if headers and 'User-Agent' in headers:
user_agent = 'User-Agent: %s\r\n' % (headers['User-Agent'])
else:
user_agent = ''
proxy_pieces = '%s%s%s\r\n' % (proxy_connect, proxy_auth, user_agent)
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
# Connect to the proxy server, very simple recv and error checking
p_sock = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
p_sock.connect((proxy_url.host, int(proxy_url.port)))
p_sock.sendall(proxy_pieces)
response = ''
# Wait for the full response.
while response.find("\r\n\r\n") == -1:
response += p_sock.recv(8192)
p_status = response.split()[1]
if p_status != str(200):
raise ProxyError('Error status=%s' % str(p_status))
# Trivial setup for ssl socket.
ssl = socket.ssl(p_sock, None, None)
fake_sock = httplib.FakeSocket(p_sock, ssl)
# Initalize httplib and replace with the proxy socket.
connection = httplib.HTTPConnection(proxy_url.host)
connection.sock=fake_sock
return connection
else:
# The request was HTTPS, but there was no https_proxy set.
return HttpClient._prepare_connection(self, url, headers)
else:
proxy = os.environ.get('http_proxy')
if proxy:
# Find the proxy host and port.
proxy_url = atom.url.parse_url(proxy)
if not proxy_url.port:
proxy_url.port = '80'
if proxy_auth:
headers['Proxy-Authorization'] = proxy_auth.strip()
return httplib.HTTPConnection(proxy_url.host, int(proxy_url.port))
else:
# The request was HTTP, but there was no http_proxy set.
return HttpClient._prepare_connection(self, url, headers)
def _get_access_url(self, url):
return url.to_string()
def _get_proxy_auth():
proxy_username = os.environ.get('proxy-username')
if not proxy_username:
proxy_username = os.environ.get('proxy_username')
proxy_password = os.environ.get('proxy-password')
if not proxy_password:
proxy_password = os.environ.get('proxy_password')
if proxy_username:
user_auth = base64.encodestring('%s:%s' % (proxy_username,
proxy_password))
return 'Basic %s\r\n' % (user_auth.strip())
else:
return ''
def _send_data_part(data, connection):
if isinstance(data, types.StringTypes):
connection.send(data)
return
# Check to see if data is a file-like object that has a read method.
elif hasattr(data, 'read'):
# Read the file and send it a chunk at a time.
while 1:
binarydata = data.read(100000)
if binarydata == '': break
connection.send(binarydata)
return
else:
# The data object was not a file.
# Try to convert to a string and send the data.
connection.send(str(data))
return
|
xiangel/hue
|
refs/heads/master
|
apps/zookeeper/src/zookeeper/__init__.py
|
1198
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
k-j-m/Pyxon
|
refs/heads/master
|
pyxon/decode2.py
|
1
|
# Dict of the form:
# {cls: {name:(fn, inv_fn)}}
# cls: class that has been written with @cprop annotations
# name: class attribute name
# fn: function to turn json data into the corresponding attribute type
# inv_fn: inverse of fn
class_props = {}
# Dict of the form:
# {AbstractClass:specifier_property}
# AbstractClass: the class that we're trying to (de)serialize
# specifier_property: the name of the json property that
# will indicate the concrete class name
specifier_properties = {}
# Dict of the form {AbstractClass: {label: ConcreteClass}}
# Used to retrieve the concrete implementation of an
# abstract class based on a string label.
class_specifiers = {}
# {ConcreteClass: (AbstractClass, concrete_label)}
conc_to_abstract = {}
def _add_type_property(specifier_properties, conc_to_abstract):
"""
Returns a function that returns type specifier properties.
I have done it like this to let me test the returned function
independent @subtyped(using='$type')
class AbstractClass(object):
def __init__(self, a,b,c, **kwargs):
self.a = a
self.b = b
self.c = c
@extending(AbstractClass, named='concrete_label')
class ConcreteClass(AbstractClass):
def __init__(self, x,y,z,**kwargs):
super(ConcreteClass,self).__init__(**kwargs)
self.x = x
self.y = y
self.z = z
data = {'a':1, 'b':2, 'c':3, 'x':101, 'y':102, 'z':103, '$type':'concrete_label'}
obj = utils.objectify(data, AbstractClass)
ly of any module-level variables.
"""
def fun(data,cls):
"""
Given some JSON data and the class from which it was produced,
this function returns the JSON data with any required type
annotations added to it.
"""
if not cls in conc_to_abstract:
return data
abstract_cls, concrete_label = conc_to_abstract[cls]
prop_name = specifier_properties[abstract_cls]
data[prop_name] = concrete_label
return data
return fun
add_type_property = _add_type_property(specifier_properties, conc_to_abstract)
def _metaprop(class_props):
"""
Wrapped MetaProp as a closure so that class_props can be passed
in during testing, rather than using the module-level variable.
"""
class MetaProp(type):
"""
Metaclass for the prop calculated property decorator.
This class contains all of the decorator logic. The reason
for using a metaclass rather than something simpler is
to allow us to use dot notation when adding calculated
properties.
"""
def __getattr__(prop_cls,key):
def prop2((f1, f2)):
def prop3(cls):
props = class_props.get(cls,{})
props[key]=(f1,f2)
class_props[cls]=props
return cls
return prop3
return prop2
return MetaProp
MetaProp = _metaprop(class_props)
class prop:
"""
Decorator for adding calculated properties to a class.
A calculated property is needed when the json data can't
be added to the class directly, for example when creating
some other user classes from the data before adding as
properties.
The decorator needs to be given 2 functions as arguments:
fun1: a function that takes JSON data and converts to some
other data type
fun2: the inverse of fun1, which takes some data type and
converts it into JSON data
Note: ideally the following will hold for any value of x
>>> fun2(fun1(x)) == x
Example:
class Foo(object): pass
@prop.y(obj(Foo))
class Bar(object):
def __init__(self, y):
self.y = y
"""
__metaclass__ = MetaProp
# Decorator annotations
def _subtyped(specifier_properties):
"""
Coded like to so to allow the specifier_properties param to be passed
in during testing, rather than having a hard-wired reference to a
module-level variable.
"""
def subtyped(using):
"""
Decorator used to indicate that a class will be subtyped.
The using= parameter is used to indicate which JSON
property will contain the name of the subclass. A sensible
value for thsi will be @type, but this wil all depend on
how you have set up the rest of the system.
Example:
@subtyped(using='@type')
class Foo(object): pass
"""
# Because this is a parameterised decorator that we call, we
# now need to create and return the decorator proper.
def subtyped2(cls):
specifier_properties[cls]=using
return cls
return subtyped2
return subtyped
subtyped = _subtyped(specifier_properties)
def _extending(conc_to_abstract, class_specifiers):
def extending(super_cls, named):
"""
This decorator is used to indicate which superclass a class
extends. This could potentially be interpreted from the classes
mro, but that starts to get tricky and we would still need to
add extra info to say what the class will be named in the data.
This label is needed because we can't necessarily rely on the
class name and the class label in the data being the same.
Example:
@extending(Foo, named='Bar')
class Baz(Foo): pass
"""
def extending2(cls):
conc_to_abstract[cls]=super_cls,named
clsmap = class_specifiers.get(super_cls,{})
clsmap[named]=cls
class_specifiers[super_cls]=clsmap
return cls
return extending2
return extending
extending = _extending(conc_to_abstract, class_specifiers)
def _conc2(class_specifiers, specifier_properties):
def conc2(data, cls):
"""
Returns the appropriate concrete class of a subtyped class
based on the content of some JSON data.
If the class is not subtyped then it gets returned.
"""
s1 = set(specifier_properties.keys())
s2 = set(class_specifiers.keys())
assert s1==s2, "You need to use @subtyped and @extending as a pair!:\n%s\n%s" % (str(s1), str(s2))
if not cls in specifier_properties:
return cls
prop_name = specifier_properties[cls]
cls_label = data[prop_name]
concrete_cls = class_specifiers[cls][cls_label]
return concrete_cls
return conc2
conc2 = _conc2(class_specifiers, specifier_properties)
def _get_class_props(class_props):
def get_class_props(cls):
"""
Returns a dict containing the deserialise/serialise functions for
all class properties that need transformations applied to them.
{propname: (deserialiser, serialiser)}
We give this a protective copy because it would be a BAD thing for
this to get changed in the field.
"""
return class_props.get(cls,{}).copy()
return get_class_props
get_class_props = _get_class_props(class_props)
def _get_parent_class(conc_to_abstract):
def get_parent_class(cls):
"""
Returns the parent inherited class if possible, otherwise
returns None.
"""
if cls in conc_to_abstract:
return conc_to_abstract[cls][0]
else:
return None
return get_parent_class
get_parent_class = _get_parent_class(conc_to_abstract)
def _get_type_annotation(conc_to_abstract, specifier_properties):
def get_type_annotation(cls):
if not cls in conc_to_abstract:
return {}
else:
parent_class, type_label = conc_to_abstract[cls]
type_propname = specifier_properties[parent_class]
return {type_propname: type_label}
return get_type_annotation
get_type_annotation = _get_type_annotation(conc_to_abstract, specifier_properties)
|
blitzmann/Pyfa
|
refs/heads/master
|
gui/builtinViewColumns/capacitorUse.py
|
2
|
# =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
# noinspection PyPackageRequirements
import wx
from eos.saveddata.mode import Mode
from service.attribute import Attribute
from gui.utils.numberFormatter import formatAmount
from gui.viewColumn import ViewColumn
from gui.bitmap_loader import BitmapLoader
class CapacitorUse(ViewColumn):
name = "Capacitor Usage"
def __init__(self, fittingView, params):
ViewColumn.__init__(self, fittingView)
self.mask = wx.LIST_MASK_IMAGE
Attribute.getInstance().getAttributeInfo("capacitorNeed")
self.imageId = fittingView.imageList.GetImageIndex("capacitorRecharge_small", "gui")
self.bitmap = BitmapLoader.getBitmap("capacitorRecharge_small", "gui")
def getText(self, mod):
if isinstance(mod, Mode):
return ""
capUse = mod.capUse
if capUse:
return "%s%s" % ("+" if capUse < 0 else "", (formatAmount(-capUse, 3, 0, 3)))
else:
return ""
def getImageId(self, mod):
return -1
def getToolTip(self, mod):
return self.name
CapacitorUse.register()
|
eXcomm/cjdns
|
refs/heads/master
|
node_build/dependencies/libuv/build/gyp/test/win/gyptest-link-large-pdb.py
|
218
|
#!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_large_pdb works correctly.
"""
import TestGyp
import struct
import sys
CHDIR = 'large-pdb'
def CheckImageAndPdb(test, image_basename, expected_page_size,
pdb_basename=None):
if not pdb_basename:
pdb_basename = image_basename + '.pdb'
test.built_file_must_exist(image_basename, chdir=CHDIR)
test.built_file_must_exist(pdb_basename, chdir=CHDIR)
# We expect the PDB to have the given page size. For full details of the
# header look here: https://code.google.com/p/pdbparser/wiki/MSF_Format
# We read the little-endian 4-byte unsigned integer at position 32 of the
# file.
pdb_path = test.built_file_path(pdb_basename, chdir=CHDIR)
pdb_file = open(pdb_path, 'rb')
pdb_file.seek(32, 0)
page_size = struct.unpack('<I', pdb_file.read(4))[0]
if page_size != expected_page_size:
print "Expected page size of %d, got %d for PDB file `%s'." % (
expected_page_size, page_size, pdb_path)
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
test.run_gyp('large-pdb.gyp', chdir=CHDIR)
test.build('large-pdb.gyp', 'large_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_exe.exe', 4096)
test.build('large-pdb.gyp', 'small_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_exe.exe', 1024)
test.build('large-pdb.gyp', 'large_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_dll.dll', 4096)
test.build('large-pdb.gyp', 'small_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_dll.dll', 1024)
test.build('large-pdb.gyp', 'large_pdb_implicit_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_implicit_exe.exe', 4096)
# This target has a different PDB name because it uses an
# 'msvs_large_pdb_path' variable.
test.build('large-pdb.gyp', 'large_pdb_variable_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_variable_exe.exe', 4096,
pdb_basename='foo.pdb')
# This target has a different output name because it uses 'product_name'.
test.build('large-pdb.gyp', 'large_pdb_product_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'bar.exe', 4096)
test.pass_test()
|
goliveirab/odoo
|
refs/heads/8.0
|
addons/l10n_ar/__openerp__.py
|
260
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Cubic ERP - Teradata SAC (<http://cubicerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Argentina Localization Chart Account',
'version': '1.0',
'description': """
Argentinian accounting chart and tax localization.
==================================================
Plan contable argentino e impuestos de acuerdo a disposiciones vigentes
""",
'author': ['Cubic ERP'],
'website': 'http://cubicERP.com',
'category': 'Localization/Account Charts',
'depends': ['account_chart'],
'data':[
'account_tax_code.xml',
'l10n_ar_chart.xml',
'account_tax.xml',
'l10n_ar_wizard.xml',
],
'demo': [],
'active': False,
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
christophlsa/odoo
|
refs/heads/8.0
|
openerp/addons/test_access_rights/models.py
|
299
|
from openerp import fields, models
class SomeObj(models.Model):
_name = 'test_access_right.some_obj'
val = fields.Integer()
|
skidzen/grit-i18n
|
refs/heads/master
|
grit/gather/policy_json_unittest.py
|
60
|
#!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Unit tests for grit.gather.policy_json'''
import os
import re
import sys
if __name__ == '__main__':
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import unittest
import StringIO
from grit.gather import policy_json
class PolicyJsonUnittest(unittest.TestCase):
def GetExpectedOutput(self, original):
expected = eval(original)
for key, message in expected['messages'].iteritems():
del message['desc']
return expected
def testEmpty(self):
original = "{'policy_definitions': [], 'messages': {}}"
gatherer = policy_json.PolicyJson(StringIO.StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 0)
self.failUnless(eval(original) == eval(gatherer.Translate('en')))
def testGeneralPolicy(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'name': 'HomepageLocation',"
" 'type': 'string',"
" 'supported_on': ['chrome.*:8-'],"
" 'features': {'dynamic_refresh': 1},"
" 'example_value': 'http://chromium.org',"
" 'caption': 'nothing special 1',"
" 'desc': 'nothing special 2',"
" 'label': 'nothing special 3',"
" },"
" ],"
" 'messages': {"
" 'msg_identifier': {"
" 'text': 'nothing special 3',"
" 'desc': 'nothing special descr 3',"
" }"
" }"
"}")
gatherer = policy_json.PolicyJson(StringIO.StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 4)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testEnum(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'name': 'Policy1',"
" 'items': ["
" {"
" 'name': 'Item1',"
" 'caption': 'nothing special',"
" }"
" ]"
" },"
" ],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO.StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testSubPolicy(self):
original = (
"{"
" 'policy_definitions': ["
" {"
" 'policies': ["
" {"
" 'name': 'Policy1',"
" 'caption': 'nothing special',"
" }"
" ]"
" },"
" ],"
" 'messages': {}"
"}")
gatherer = policy_json.PolicyJson(StringIO.StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testEscapingAndLineBreaks(self):
original = """{
'policy_definitions': [],
'messages': {
'msg1': {
# The following line will contain two backslash characters when it
# ends up in eval().
'text': '''backslashes, Sir? \\\\''',
'desc': '',
},
'msg2': {
'text': '''quotes, Madam? "''',
'desc': '',
},
'msg3': {
# The following line will contain two backslash characters when it
# ends up in eval().
'text': 'backslashes, Sir? \\\\',
'desc': '',
},
'msg4': {
'text': "quotes, Madam? '",
'desc': '',
},
'msg5': {
'text': '''what happens
with a newline?''',
'desc': ''
},
'msg6': {
# The following line will contain a backslash+n when it ends up in
# eval().
'text': 'what happens\\nwith a newline? (Episode 1)',
'desc': ''
}
}
}"""
gatherer = policy_json.PolicyJson(StringIO.StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 6)
expected = self.GetExpectedOutput(original)
self.failUnless(expected == eval(gatherer.Translate('en')))
def testPlaceholders(self):
original = """{
'policy_definitions': [
{
'name': 'Policy1',
'caption': '''Please install
<ph name="PRODUCT_NAME">$1<ex>Google Chrome</ex></ph>.''',
},
],
'messages': {}
}"""
gatherer = policy_json.PolicyJson(StringIO.StringIO(original))
gatherer.Parse()
self.failUnless(len(gatherer.GetCliques()) == 1)
expected = eval(re.sub('<ph.*ph>', '$1', original))
self.failUnless(expected == eval(gatherer.Translate('en')))
self.failUnless(gatherer.GetCliques()[0].translateable)
msg = gatherer.GetCliques()[0].GetMessage()
self.failUnless(len(msg.GetPlaceholders()) == 1)
ph = msg.GetPlaceholders()[0]
self.failUnless(ph.GetOriginal() == '$1')
self.failUnless(ph.GetPresentation() == 'PRODUCT_NAME')
self.failUnless(ph.GetExample() == 'Google Chrome')
def testGetDescription(self):
gatherer = policy_json.PolicyJson({})
self.assertEquals(
gatherer._GetDescription({'name': 'Policy1'}, 'policy', None, 'desc'),
'Description of the policy named Policy1')
self.assertEquals(
gatherer._GetDescription({'name': 'Plcy2'}, 'policy', None, 'caption'),
'Caption of the policy named Plcy2')
self.assertEquals(
gatherer._GetDescription({'name': 'Plcy3'}, 'policy', None, 'label'),
'Label of the policy named Plcy3')
self.assertEquals(
gatherer._GetDescription({'name': 'Item'}, 'enum_item',
{'name': 'Policy'}, 'caption'),
'Caption of the option named Item in policy Policy')
if __name__ == '__main__':
unittest.main()
|
dario61081/koalixcrm
|
refs/heads/master
|
koalixcrm/crm/migrations/0049_auto_20181014_2258.py
|
2
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-10-12 20:56
from __future__ import unicode_literals
from django.db import migrations
def reverse_func(apps, schema_editor):
return 1
def backup_identifiers(apps, schema_editor):
Position = apps.get_model("crm", "Position")
CustomerGroupTransform = apps.get_model("crm", "CustomerGroupTransform")
Price = apps.get_model("crm", "Price")
UnitTransform = apps.get_model("crm", "UnitTransform")
db_alias = schema_editor.connection.alias
all_positions = Position.objects.using(db_alias).all()
for position in all_positions:
position.product_backup = position.product.id
position.save()
all_customer_group_transforms = CustomerGroupTransform.objects.using(db_alias).all()
for customer_group_transform in all_customer_group_transforms:
customer_group_transform.product_backup = customer_group_transform.product.id
customer_group_transform.save()
all_prices = Price.objects.using(db_alias).all()
for price in all_prices:
price.product_backup = price.product.id
price.save()
all_unit_transforms = UnitTransform.objects.using(db_alias).all()
for unit_transform in all_unit_transforms:
unit_transform.product_backup = unit_transform.product.id
unit_transform.save()
class Migration(migrations.Migration):
dependencies = [
('crm', '0048_auto_20181012_2056'),
]
operations = [
migrations.RunPython(backup_identifiers, reverse_func),
]
|
aequitas/CouchPotato
|
refs/heads/master
|
library/mako/template.py
|
13
|
# template.py
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Michael Bayer
# mike_mp@zzzcomputing.com
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Provides the Template class, a facade for parsing, generating and executing
template strings, as well as template runtime operations."""
from mako.lexer import Lexer
from mako import runtime, util, exceptions, codegen
import imp, os, re, shutil, stat, sys, tempfile, time, types, weakref
class Template(object):
"""a compiled template"""
def __init__(self,
text=None,
filename=None,
uri=None,
format_exceptions=False,
error_handler=None,
lookup=None,
output_encoding=None,
encoding_errors='strict',
module_directory=None,
cache_type=None,
cache_dir=None,
cache_url=None,
module_filename=None,
input_encoding=None,
disable_unicode=False,
default_filters=None,
buffer_filters=(),
imports=None,
preprocessor=None,
cache_enabled=True):
"""Construct a new Template instance using either literal template
text, or a previously loaded template module
:param text: textual template source, or None if a module is to be
provided
:param uri: the uri of this template, or some identifying string.
defaults to the full filename given, or "memory:(hex id of this
Template)" if no filename
:param filename: filename of the source template, if any
:param format_exceptions: catch exceptions and format them into an
error display template
"""
if uri:
self.module_id = re.sub(r'\W', "_", uri)
self.uri = uri
elif filename:
self.module_id = re.sub(r'\W', "_", filename)
drive, path = os.path.splitdrive(filename)
path = os.path.normpath(path).replace(os.path.sep, "/")
self.uri = path
else:
self.module_id = "memory:" + hex(id(self))
self.uri = self.module_id
self.input_encoding = input_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
if util.py3k and disable_unicode:
raise exceptions.UnsupportedError(
"Mako for Python 3 does not "
"support disabling Unicode")
if default_filters is None:
if util.py3k or self.disable_unicode:
self.default_filters = ['str']
else:
self.default_filters = ['unicode']
else:
self.default_filters = default_filters
self.buffer_filters = buffer_filters
self.imports = imports
self.preprocessor = preprocessor
# if plain text, compile code in memory only
if text is not None:
(code, module) = _compile_text(self, text, filename)
self._code = code
self._source = text
ModuleInfo(module, None, self, filename, code, text)
elif filename is not None:
# if template filename and a module directory, load
# a filesystem-based module file, generating if needed
if module_filename is not None:
path = module_filename
elif module_directory is not None:
u = self.uri
if u[0] == '/':
u = u[1:]
path = os.path.abspath(
os.path.join(
os.path.normpath(module_directory),
os.path.normpath(u) + ".py"
)
)
else:
path = None
module = self._compile_from_file(path, filename)
else:
raise exceptions.RuntimeException(
"Template requires text or filename")
self.module = module
self.filename = filename
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
def _compile_from_file(self, path, filename):
if path is not None:
util.verify_directory(os.path.dirname(path))
filemtime = os.stat(filename)[stat.ST_MTIME]
if not os.path.exists(path) or \
os.stat(path)[stat.ST_MTIME] < filemtime:
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
del sys.modules[self.module_id]
if module._magic_number != codegen.MAGIC_NUMBER:
_compile_module_file(
self,
open(filename, 'rb').read(),
filename,
path)
module = imp.load_source(self.module_id, path, open(path, 'rb'))
del sys.modules[self.module_id]
ModuleInfo(module, path, self, filename, None, None)
else:
# template filename and no module directory, compile code
# in memory
code, module = _compile_text(
self,
open(filename, 'rb').read(),
filename)
self._source = None
self._code = code
ModuleInfo(module, None, self, filename, code, None)
return module
@property
def source(self):
"""return the template source code for this Template."""
return _get_module_info_from_callable(self.callable_).source
@property
def code(self):
"""return the module source code for this Template"""
return _get_module_info_from_callable(self.callable_).code
@property
def cache(self):
return self.module._template_cache
def render(self, *args, **data):
"""Render the output of this template as a string.
if the template specifies an output encoding, the string will be
encoded accordingly, else the output is raw (raw output uses cStringIO
and can't handle multibyte characters). a Context object is created
corresponding to the given data. Arguments that are explictly declared
by this template's internal rendering method are also pulled from the
given \*args, \**data members.
"""
return runtime._render(self, self.callable_, args, data)
def render_unicode(self, *args, **data):
"""render the output of this template as a unicode object."""
return runtime._render(self,
self.callable_,
args,
data,
as_unicode=True)
def render_context(self, context, *args, **kwargs):
"""Render this Template with the given context.
the data is written to the context's buffer.
"""
if getattr(context, '_with_template', None) is None:
context._with_template = self
runtime._render_context(self,
self.callable_,
context,
*args,
**kwargs)
def has_def(self, name):
return hasattr(self.module, "render_%s" % name)
def get_def(self, name):
"""Return a def of this template as a DefTemplate."""
return DefTemplate(self, getattr(self.module, "render_%s" % name))
def _get_def_callable(self, name):
return getattr(self.module, "render_%s" % name)
@property
def last_modified(self):
return self.module._modified_time
class ModuleTemplate(Template):
"""A Template which is constructed given an existing Python module.
e.g.::
t = Template("this is a template")
f = file("mymodule.py", "w")
f.write(t.code)
f.close()
import mymodule
t = ModuleTemplate(mymodule)
print t.render()
"""
def __init__(self, module,
module_filename=None,
template=None,
template_filename=None,
module_source=None,
template_source=None,
output_encoding=None,
encoding_errors='strict',
disable_unicode=False,
format_exceptions=False,
error_handler=None,
lookup=None,
cache_type=None,
cache_dir=None,
cache_url=None,
cache_enabled=True
):
self.module_id = re.sub(r'\W', "_", module._template_uri)
self.uri = module._template_uri
self.input_encoding = module._source_encoding
self.output_encoding = output_encoding
self.encoding_errors = encoding_errors
self.disable_unicode = disable_unicode
self.module = module
self.filename = template_filename
ModuleInfo(module,
module_filename,
self,
template_filename,
module_source,
template_source)
self.callable_ = self.module.render_body
self.format_exceptions = format_exceptions
self.error_handler = error_handler
self.lookup = lookup
self.cache_type = cache_type
self.cache_dir = cache_dir
self.cache_url = cache_url
self.cache_enabled = cache_enabled
class DefTemplate(Template):
"""a Template which represents a callable def in a parent template."""
def __init__(self, parent, callable_):
self.parent = parent
self.callable_ = callable_
self.output_encoding = parent.output_encoding
self.module = parent.module
self.encoding_errors = parent.encoding_errors
self.format_exceptions = parent.format_exceptions
self.error_handler = parent.error_handler
self.lookup = parent.lookup
def get_def(self, name):
return self.parent.get_def(name)
class ModuleInfo(object):
"""Stores information about a module currently loaded into memory,
provides reverse lookups of template source, module source code based on
a module's identifier.
"""
_modules = weakref.WeakValueDictionary()
def __init__(self,
module,
module_filename,
template,
template_filename,
module_source,
template_source):
self.module = module
self.module_filename = module_filename
self.template_filename = template_filename
self.module_source = module_source
self.template_source = template_source
self._modules[module.__name__] = template._mmarker = self
if module_filename:
self._modules[module_filename] = self
@property
def code(self):
if self.module_source is not None:
return self.module_source
else:
return open(self.module_filename).read()
@property
def source(self):
if self.template_source is not None:
if self.module._source_encoding and \
not isinstance(self.template_source, unicode):
return self.template_source.decode(
self.module._source_encoding)
else:
return self.template_source
else:
if self.module._source_encoding:
return open(self.template_filename, 'rb').read().\
decode(self.module._source_encoding)
else:
return open(self.template_filename).read()
def _compile_text(template, text, filename):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=template.disable_unicode,
disable_unicode=template.disable_unicode)
cid = identifier
if not util.py3k and isinstance(cid, unicode):
cid = cid.encode()
module = types.ModuleType(cid)
code = compile(source, cid, 'exec')
exec code in module.__dict__, module.__dict__
return (source, module)
def _compile_module_file(template, text, filename, outputpath):
identifier = template.module_id
lexer = Lexer(text,
filename,
disable_unicode=template.disable_unicode,
input_encoding=template.input_encoding,
preprocessor=template.preprocessor)
node = lexer.parse()
source = codegen.compile(node,
template.uri,
filename,
default_filters=template.default_filters,
buffer_filters=template.buffer_filters,
imports=template.imports,
source_encoding=lexer.encoding,
generate_magic_comment=True,
disable_unicode=template.disable_unicode)
# make tempfiles in the same location as the ultimate
# location. this ensures they're on the same filesystem,
# avoiding synchronization issues.
(dest, name) = tempfile.mkstemp(dir=os.path.dirname(outputpath))
if isinstance(source, unicode):
source = source.encode(lexer.encoding or 'ascii')
os.write(dest, source)
os.close(dest)
shutil.move(name, outputpath)
def _get_module_info_from_callable(callable_):
return _get_module_info(callable_.func_globals['__name__'])
def _get_module_info(filename):
return ModuleInfo._modules[filename]
|
oceanobservatories/mi-instrument
|
refs/heads/master
|
mi/dataset/driver/ctdbp_p/ctdbp_p_recovered_driver.py
|
7
|
#!/usr/bin/env python
"""
@package mi.dataset.driver.ctdbp_p
@file mi-dataset/mi/dataset/driver/ctdbp_p/ctdbp_p_recovered_driver.py
@author Jeff Roy, Rene Gelinas
@brief Driver for the ctdbp_p instrument (Recovered Data)
Release notes:
Initial Release
"""
from mi.dataset.dataset_driver import SimpleDatasetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.ctdbp_p import CtdbpPCommonParser
from mi.core.versioning import version
MODULE_NAME = 'mi.dataset.parser.ctdbp_p'
CTDBP_RECOV_CONFIG = {
DataSetDriverConfigKeys.PARTICLE_MODULE: MODULE_NAME,
DataSetDriverConfigKeys.PARTICLE_CLASS: 'CtdbpPRecoveredDataParticle'
}
@version("15.6.1")
def parse(unused, source_file_path, particle_data_handler):
"""
This is the method called by Uframe
:param unused
:param source_file_path This is the full path and filename of the file to be parsed
:param particle_data_handler Consumes the output of the parser
:return particle_data_handler
"""
with open(source_file_path, 'rU') as stream_handle:
# create an instance of the concrete driver class defined below
driver = CtdbpPRecoveredDriver(unused, stream_handle, particle_data_handler)
driver.processFileStream()
return particle_data_handler
class CtdbpPRecoveredDriver(SimpleDatasetDriver):
"""
Derived ctdbp_p driver class
All this needs to do is create a concrete _build_parser method
"""
def _build_parser(self, stream_handle):
# The parser inherits from simple parser - other callbacks not needed here
parser = CtdbpPCommonParser(CTDBP_RECOV_CONFIG,
stream_handle,
self._exception_callback)
return parser
|
warner83/micropython
|
refs/heads/master
|
tests/pyb/led.py
|
48
|
import pyb
from pyb import LED
for i in range(4):
print(LED(i+1))
for i in range(4):
LED(i+1).on()
pyb.delay(10)
for i in range(4):
LED(i+1).off()
pyb.delay(10)
for i in range(4):
LED(i+1).toggle()
pyb.delay(10)
for i in range(4):
LED(i+1).intensity(0)
for i in range(256):
LED(4).intensity(i)
if LED(4).intensity() != i:
print('fail', i)
pyb.delay(1)
for i in range(256):
LED(4).intensity(255 - i)
pyb.delay(1)
for i in range(4):
LED(i+1).off()
|
xindaya/bazel
|
refs/heads/master
|
third_party/py/gflags/tests/gflags_unittest.py
|
100
|
#!/usr/bin/env python
# Copyright (c) 2007, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"Unittest for gflags.py module"
__pychecker__ = "no-local" # for unittest
import cStringIO
import sys
import os
import shutil
import gflags
from flags_modules_for_testing import module_foo
from flags_modules_for_testing import module_bar
from flags_modules_for_testing import module_baz
FLAGS=gflags.FLAGS
import gflags_googletest as googletest
# TODO(csilvers): add a wrapper function around FLAGS(argv) that
# verifies the input is a list or tuple. This avoids bugs where we
# make argv a string instead of a list, by mistake.
class FlagsUnitTest(googletest.TestCase):
"Flags Unit Test"
def setUp(self):
# make sure we are using the old, stupid way of parsing flags.
FLAGS.UseGnuGetOpt(False)
def test_flags(self):
##############################################
# Test normal usage with no (expected) errors.
# Define flags
number_test_framework_flags = len(FLAGS.RegisteredFlags())
repeatHelp = "how many times to repeat (0-5)"
gflags.DEFINE_integer("repeat", 4, repeatHelp,
lower_bound=0, short_name='r')
gflags.DEFINE_string("name", "Bob", "namehelp")
gflags.DEFINE_boolean("debug", 0, "debughelp")
gflags.DEFINE_boolean("q", 1, "quiet mode")
gflags.DEFINE_boolean("quack", 0, "superstring of 'q'")
gflags.DEFINE_boolean("noexec", 1, "boolean flag with no as prefix")
gflags.DEFINE_integer("x", 3, "how eXtreme to be")
gflags.DEFINE_integer("l", 0x7fffffff00000000, "how long to be")
gflags.DEFINE_list('letters', 'a,b,c', "a list of letters")
gflags.DEFINE_list('numbers', [1, 2, 3], "a list of numbers")
gflags.DEFINE_enum("kwery", None, ['who', 'what', 'why', 'where', 'when'],
"?")
# Specify number of flags defined above. The short_name defined
# for 'repeat' counts as an extra flag.
number_defined_flags = 11 + 1
self.assertEqual(len(FLAGS.RegisteredFlags()),
number_defined_flags + number_test_framework_flags)
assert FLAGS.repeat == 4, "integer default values not set:" + FLAGS.repeat
assert FLAGS.name == 'Bob', "default values not set:" + FLAGS.name
assert FLAGS.debug == 0, "boolean default values not set:" + FLAGS.debug
assert FLAGS.q == 1, "boolean default values not set:" + FLAGS.q
assert FLAGS.x == 3, "integer default values not set:" + FLAGS.x
assert FLAGS.l == 0x7fffffff00000000, ("integer default values not set:"
+ FLAGS.l)
assert FLAGS.letters == ['a', 'b', 'c'], ("list default values not set:"
+ FLAGS.letters)
assert FLAGS.numbers == [1, 2, 3], ("list default values not set:"
+ FLAGS.numbers)
assert FLAGS.kwery is None, ("enum default None value not set:"
+ FLAGS.kwery)
flag_values = FLAGS.FlagValuesDict()
assert flag_values['repeat'] == 4
assert flag_values['name'] == 'Bob'
assert flag_values['debug'] == 0
assert flag_values['r'] == 4 # short for repeat
assert flag_values['q'] == 1
assert flag_values['quack'] == 0
assert flag_values['x'] == 3
assert flag_values['l'] == 0x7fffffff00000000
assert flag_values['letters'] == ['a', 'b', 'c']
assert flag_values['numbers'] == [1, 2, 3]
assert flag_values['kwery'] is None
# Verify string form of defaults
assert FLAGS['repeat'].default_as_str == "'4'"
assert FLAGS['name'].default_as_str == "'Bob'"
assert FLAGS['debug'].default_as_str == "'false'"
assert FLAGS['q'].default_as_str == "'true'"
assert FLAGS['quack'].default_as_str == "'false'"
assert FLAGS['noexec'].default_as_str == "'true'"
assert FLAGS['x'].default_as_str == "'3'"
assert FLAGS['l'].default_as_str == "'9223372032559808512'"
assert FLAGS['letters'].default_as_str == "'a,b,c'"
assert FLAGS['numbers'].default_as_str == "'1,2,3'"
# Verify that the iterator for flags yields all the keys
keys = list(FLAGS)
keys.sort()
reg_flags = FLAGS.RegisteredFlags()
reg_flags.sort()
self.assertEqual(keys, reg_flags)
# Parse flags
# .. empty command line
argv = ('./program',)
argv = FLAGS(argv)
assert len(argv) == 1, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
# .. non-empty command line
argv = ('./program', '--debug', '--name=Bob', '-q', '--x=8')
argv = FLAGS(argv)
assert len(argv) == 1, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert FLAGS['debug'].present == 1
FLAGS['debug'].present = 0 # Reset
assert FLAGS['name'].present == 1
FLAGS['name'].present = 0 # Reset
assert FLAGS['q'].present == 1
FLAGS['q'].present = 0 # Reset
assert FLAGS['x'].present == 1
FLAGS['x'].present = 0 # Reset
# Flags list
self.assertEqual(len(FLAGS.RegisteredFlags()),
number_defined_flags + number_test_framework_flags)
assert 'name' in FLAGS.RegisteredFlags()
assert 'debug' in FLAGS.RegisteredFlags()
assert 'repeat' in FLAGS.RegisteredFlags()
assert 'r' in FLAGS.RegisteredFlags()
assert 'q' in FLAGS.RegisteredFlags()
assert 'quack' in FLAGS.RegisteredFlags()
assert 'x' in FLAGS.RegisteredFlags()
assert 'l' in FLAGS.RegisteredFlags()
assert 'letters' in FLAGS.RegisteredFlags()
assert 'numbers' in FLAGS.RegisteredFlags()
# has_key
assert FLAGS.has_key('name')
assert not FLAGS.has_key('name2')
assert 'name' in FLAGS
assert 'name2' not in FLAGS
# try deleting a flag
del FLAGS.r
self.assertEqual(len(FLAGS.RegisteredFlags()),
number_defined_flags - 1 + number_test_framework_flags)
assert not 'r' in FLAGS.RegisteredFlags()
# .. command line with extra stuff
argv = ('./program', '--debug', '--name=Bob', 'extra')
argv = FLAGS(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
assert FLAGS['debug'].present == 1
FLAGS['debug'].present = 0 # Reset
assert FLAGS['name'].present == 1
FLAGS['name'].present = 0 # Reset
# Test reset
argv = ('./program', '--debug')
argv = FLAGS(argv)
assert len(argv) == 1, "wrong number of arguments pulled"
assert argv[0] == './program', "program name not preserved"
assert FLAGS['debug'].present == 1
assert FLAGS['debug'].value
FLAGS.Reset()
assert FLAGS['debug'].present == 0
assert not FLAGS['debug'].value
# Test that reset restores default value when default value is None.
argv = ('./program', '--kwery=who')
argv = FLAGS(argv)
assert len(argv) == 1, "wrong number of arguments pulled"
assert argv[0] == './program', "program name not preserved"
assert FLAGS['kwery'].present == 1
assert FLAGS['kwery'].value == 'who'
FLAGS.Reset()
assert FLAGS['kwery'].present == 0
assert FLAGS['kwery'].value == None
# Test integer argument passing
argv = ('./program', '--x', '0x12345')
argv = FLAGS(argv)
self.assertEquals(FLAGS.x, 0x12345)
self.assertEquals(type(FLAGS.x), int)
argv = ('./program', '--x', '0x1234567890ABCDEF1234567890ABCDEF')
argv = FLAGS(argv)
self.assertEquals(FLAGS.x, 0x1234567890ABCDEF1234567890ABCDEF)
self.assertEquals(type(FLAGS.x), long)
# Treat 0-prefixed parameters as base-10, not base-8
argv = ('./program', '--x', '012345')
argv = FLAGS(argv)
self.assertEquals(FLAGS.x, 12345)
self.assertEquals(type(FLAGS.x), int)
argv = ('./program', '--x', '0123459')
argv = FLAGS(argv)
self.assertEquals(FLAGS.x, 123459)
self.assertEquals(type(FLAGS.x), int)
argv = ('./program', '--x', '0x123efg')
try:
argv = FLAGS(argv)
raise AssertionError("failed to detect invalid hex argument")
except gflags.IllegalFlagValue:
pass
# Test boolean argument parsing
gflags.DEFINE_boolean("test0", None, "test boolean parsing")
argv = ('./program', '--notest0')
argv = FLAGS(argv)
assert FLAGS.test0 == 0
gflags.DEFINE_boolean("test1", None, "test boolean parsing")
argv = ('./program', '--test1')
argv = FLAGS(argv)
assert FLAGS.test1 == 1
FLAGS.test0 = None
argv = ('./program', '--test0=false')
argv = FLAGS(argv)
assert FLAGS.test0 == 0
FLAGS.test1 = None
argv = ('./program', '--test1=true')
argv = FLAGS(argv)
assert FLAGS.test1 == 1
FLAGS.test0 = None
argv = ('./program', '--test0=0')
argv = FLAGS(argv)
assert FLAGS.test0 == 0
FLAGS.test1 = None
argv = ('./program', '--test1=1')
argv = FLAGS(argv)
assert FLAGS.test1 == 1
# Test booleans that already have 'no' as a prefix
FLAGS.noexec = None
argv = ('./program', '--nonoexec', '--name', 'Bob')
argv = FLAGS(argv)
assert FLAGS.noexec == 0
FLAGS.noexec = None
argv = ('./program', '--name', 'Bob', '--noexec')
argv = FLAGS(argv)
assert FLAGS.noexec == 1
# Test unassigned booleans
gflags.DEFINE_boolean("testnone", None, "test boolean parsing")
argv = ('./program',)
argv = FLAGS(argv)
assert FLAGS.testnone == None
# Test get with default
gflags.DEFINE_boolean("testget1", None, "test parsing with defaults")
gflags.DEFINE_boolean("testget2", None, "test parsing with defaults")
gflags.DEFINE_boolean("testget3", None, "test parsing with defaults")
gflags.DEFINE_integer("testget4", None, "test parsing with defaults")
argv = ('./program','--testget1','--notestget2')
argv = FLAGS(argv)
assert FLAGS.get('testget1', 'foo') == 1
assert FLAGS.get('testget2', 'foo') == 0
assert FLAGS.get('testget3', 'foo') == 'foo'
assert FLAGS.get('testget4', 'foo') == 'foo'
# test list code
lists = [['hello','moo','boo','1'],
[],]
gflags.DEFINE_list('testlist', '', 'test lists parsing')
gflags.DEFINE_spaceseplist('testspacelist', '', 'tests space lists parsing')
for name, sep in (('testlist', ','), ('testspacelist', ' '),
('testspacelist', '\n')):
for lst in lists:
argv = ('./program', '--%s=%s' % (name, sep.join(lst)))
argv = FLAGS(argv)
self.assertEquals(getattr(FLAGS, name), lst)
# Test help text
flagsHelp = str(FLAGS)
assert flagsHelp.find("repeat") != -1, "cannot find flag in help"
assert flagsHelp.find(repeatHelp) != -1, "cannot find help string in help"
# Test flag specified twice
argv = ('./program', '--repeat=4', '--repeat=2', '--debug', '--nodebug')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('repeat', None), 2)
self.assertEqual(FLAGS.get('debug', None), 0)
# Test MultiFlag with single default value
gflags.DEFINE_multistring('s_str', 'sing1',
'string option that can occur multiple times',
short_name='s')
self.assertEqual(FLAGS.get('s_str', None), [ 'sing1', ])
# Test MultiFlag with list of default values
multi_string_defs = [ 'def1', 'def2', ]
gflags.DEFINE_multistring('m_str', multi_string_defs,
'string option that can occur multiple times',
short_name='m')
self.assertEqual(FLAGS.get('m_str', None), multi_string_defs)
# Test flag specified multiple times with a MultiFlag
argv = ('./program', '--m_str=str1', '-m', 'str2')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('m_str', None), [ 'str1', 'str2', ])
# Test single-letter flags; should support both single and double dash
argv = ('./program', '-q', '-x8')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('q', None), 1)
self.assertEqual(FLAGS.get('x', None), 8)
argv = ('./program', '--q', '--x', '9', '--noqu')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('q', None), 1)
self.assertEqual(FLAGS.get('x', None), 9)
# --noqu should match '--noquack since it's a unique prefix
self.assertEqual(FLAGS.get('quack', None), 0)
argv = ('./program', '--noq', '--x=10', '--qu')
argv = FLAGS(argv)
self.assertEqual(FLAGS.get('q', None), 0)
self.assertEqual(FLAGS.get('x', None), 10)
self.assertEqual(FLAGS.get('quack', None), 1)
####################################
# Test flag serialization code:
oldtestlist = FLAGS.testlist
oldtestspacelist = FLAGS.testspacelist
argv = ('./program',
FLAGS['test0'].Serialize(),
FLAGS['test1'].Serialize(),
FLAGS['testnone'].Serialize(),
FLAGS['s_str'].Serialize())
argv = FLAGS(argv)
self.assertEqual(FLAGS['test0'].Serialize(), '--notest0')
self.assertEqual(FLAGS['test1'].Serialize(), '--test1')
self.assertEqual(FLAGS['testnone'].Serialize(), '')
self.assertEqual(FLAGS['s_str'].Serialize(), '--s_str=sing1')
testlist1 = ['aa', 'bb']
testspacelist1 = ['aa', 'bb', 'cc']
FLAGS.testlist = list(testlist1)
FLAGS.testspacelist = list(testspacelist1)
argv = ('./program',
FLAGS['testlist'].Serialize(),
FLAGS['testspacelist'].Serialize())
argv = FLAGS(argv)
self.assertEqual(FLAGS.testlist, testlist1)
self.assertEqual(FLAGS.testspacelist, testspacelist1)
testlist1 = ['aa some spaces', 'bb']
testspacelist1 = ['aa', 'bb,some,commas,', 'cc']
FLAGS.testlist = list(testlist1)
FLAGS.testspacelist = list(testspacelist1)
argv = ('./program',
FLAGS['testlist'].Serialize(),
FLAGS['testspacelist'].Serialize())
argv = FLAGS(argv)
self.assertEqual(FLAGS.testlist, testlist1)
self.assertEqual(FLAGS.testspacelist, testspacelist1)
FLAGS.testlist = oldtestlist
FLAGS.testspacelist = oldtestspacelist
####################################
# Test flag-update:
def ArgsString():
flagnames = FLAGS.RegisteredFlags()
flagnames.sort()
nonbool_flags = ['--%s %s' % (name, FLAGS.get(name, None))
for name in flagnames
if not isinstance(FLAGS[name], gflags.BooleanFlag)]
truebool_flags = ['--%s' % (name)
for name in flagnames
if isinstance(FLAGS[name], gflags.BooleanFlag) and
FLAGS.get(name, None)]
falsebool_flags = ['--no%s' % (name)
for name in flagnames
if isinstance(FLAGS[name], gflags.BooleanFlag) and
not FLAGS.get(name, None)]
return ' '.join(nonbool_flags + truebool_flags + falsebool_flags)
argv = ('./program', '--repeat=3', '--name=giants', '--nodebug')
FLAGS(argv)
self.assertEqual(FLAGS.get('repeat', None), 3)
self.assertEqual(FLAGS.get('name', None), 'giants')
self.assertEqual(FLAGS.get('debug', None), 0)
self.assertEqual(ArgsString(),
"--kwery None "
"--l 9223372032559808512 "
"--letters ['a', 'b', 'c'] "
"--m ['str1', 'str2'] --m_str ['str1', 'str2'] "
"--name giants "
"--numbers [1, 2, 3] "
"--repeat 3 "
"--s ['sing1'] --s_str ['sing1'] "
""
""
"--testget4 None --testlist [] "
"--testspacelist [] --x 10 "
"--noexec --quack "
"--test1 "
"--testget1 --tmod_baz_x "
"--no? --nodebug --nohelp --nohelpshort --nohelpxml --noq "
""
"--notest0 --notestget2 --notestget3 --notestnone")
argv = ('./program', '--debug', '--m_str=upd1', '-s', 'upd2')
FLAGS(argv)
self.assertEqual(FLAGS.get('repeat', None), 3)
self.assertEqual(FLAGS.get('name', None), 'giants')
self.assertEqual(FLAGS.get('debug', None), 1)
# items appended to existing non-default value lists for --m/--m_str
# new value overwrites default value (not appended to it) for --s/--s_str
self.assertEqual(ArgsString(),
"--kwery None "
"--l 9223372032559808512 "
"--letters ['a', 'b', 'c'] "
"--m ['str1', 'str2', 'upd1'] "
"--m_str ['str1', 'str2', 'upd1'] "
"--name giants "
"--numbers [1, 2, 3] "
"--repeat 3 "
"--s ['upd2'] --s_str ['upd2'] "
""
""
"--testget4 None --testlist [] "
"--testspacelist [] --x 10 "
"--debug --noexec --quack "
"--test1 "
"--testget1 --tmod_baz_x "
"--no? --nohelp --nohelpshort --nohelpxml --noq "
""
"--notest0 --notestget2 --notestget3 --notestnone")
####################################
# Test all kind of error conditions.
# Duplicate flag detection
try:
gflags.DEFINE_boolean("run", 0, "runhelp", short_name='q')
raise AssertionError("duplicate flag detection failed")
except gflags.DuplicateFlag:
pass
# Duplicate short flag detection
try:
gflags.DEFINE_boolean("zoom1", 0, "runhelp z1", short_name='z')
gflags.DEFINE_boolean("zoom2", 0, "runhelp z2", short_name='z')
raise AssertionError("duplicate short flag detection failed")
except gflags.DuplicateFlag, e:
self.assertTrue("The flag 'z' is defined twice. " in e.args[0])
self.assertTrue("First from" in e.args[0])
self.assertTrue(", Second from" in e.args[0])
# Duplicate mixed flag detection
try:
gflags.DEFINE_boolean("short1", 0, "runhelp s1", short_name='s')
gflags.DEFINE_boolean("s", 0, "runhelp s2")
raise AssertionError("duplicate mixed flag detection failed")
except gflags.DuplicateFlag, e:
self.assertTrue("The flag 's' is defined twice. " in e.args[0])
self.assertTrue("First from" in e.args[0])
self.assertTrue(", Second from" in e.args[0])
# Check that duplicate flag detection detects definition sites
# correctly.
flagnames = ["repeated"]
original_flags = gflags.FlagValues()
gflags.DEFINE_boolean(flagnames[0], False, "Flag about to be repeated.",
flag_values=original_flags)
duplicate_flags = module_foo.DuplicateFlags(flagnames)
try:
original_flags.AppendFlagValues(duplicate_flags)
except gflags.DuplicateFlagError, e:
self.assertTrue("flags_unittest" in str(e))
self.assertTrue("module_foo" in str(e))
# Make sure allow_override works
try:
gflags.DEFINE_boolean("dup1", 0, "runhelp d11", short_name='u',
allow_override=0)
flag = FLAGS.FlagDict()['dup1']
self.assertEqual(flag.default, 0)
gflags.DEFINE_boolean("dup1", 1, "runhelp d12", short_name='u',
allow_override=1)
flag = FLAGS.FlagDict()['dup1']
self.assertEqual(flag.default, 1)
except gflags.DuplicateFlag:
raise AssertionError("allow_override did not permit a flag duplication")
# Make sure allow_override works
try:
gflags.DEFINE_boolean("dup2", 0, "runhelp d21", short_name='u',
allow_override=1)
flag = FLAGS.FlagDict()['dup2']
self.assertEqual(flag.default, 0)
gflags.DEFINE_boolean("dup2", 1, "runhelp d22", short_name='u',
allow_override=0)
flag = FLAGS.FlagDict()['dup2']
self.assertEqual(flag.default, 1)
except gflags.DuplicateFlag:
raise AssertionError("allow_override did not permit a flag duplication")
# Make sure allow_override doesn't work with None default
try:
gflags.DEFINE_boolean("dup3", 0, "runhelp d31", short_name='u3',
allow_override=0)
flag = FLAGS.FlagDict()['dup3']
self.assertEqual(flag.default, 0)
gflags.DEFINE_boolean("dup3", None, "runhelp d32", short_name='u3',
allow_override=1)
raise AssertionError('Cannot override a flag with a default of None')
except gflags.DuplicateFlagCannotPropagateNoneToSwig:
pass
# Make sure that re-importing a module does not cause a DuplicateFlagError
# to be raised.
try:
sys.modules.pop(
"flags_modules_for_testing.module_baz")
import flags_modules_for_testing.module_baz
except gflags.DuplicateFlagError:
raise AssertionError("Module reimport caused flag duplication error")
# Make sure that when we override, the help string gets updated correctly
gflags.DEFINE_boolean("dup3", 0, "runhelp d31", short_name='u',
allow_override=1)
gflags.DEFINE_boolean("dup3", 1, "runhelp d32", short_name='u',
allow_override=1)
self.assert_(str(FLAGS).find('runhelp d31') == -1)
self.assert_(str(FLAGS).find('runhelp d32') != -1)
# Make sure AppendFlagValues works
new_flags = gflags.FlagValues()
gflags.DEFINE_boolean("new1", 0, "runhelp n1", flag_values=new_flags)
gflags.DEFINE_boolean("new2", 0, "runhelp n2", flag_values=new_flags)
self.assertEqual(len(new_flags.FlagDict()), 2)
old_len = len(FLAGS.FlagDict())
FLAGS.AppendFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict())-old_len, 2)
self.assertEqual("new1" in FLAGS.FlagDict(), True)
self.assertEqual("new2" in FLAGS.FlagDict(), True)
# Then test that removing those flags works
FLAGS.RemoveFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict()), old_len)
self.assertFalse("new1" in FLAGS.FlagDict())
self.assertFalse("new2" in FLAGS.FlagDict())
# Make sure AppendFlagValues works with flags with shortnames.
new_flags = gflags.FlagValues()
gflags.DEFINE_boolean("new3", 0, "runhelp n3", flag_values=new_flags)
gflags.DEFINE_boolean("new4", 0, "runhelp n4", flag_values=new_flags,
short_name="n4")
self.assertEqual(len(new_flags.FlagDict()), 3)
old_len = len(FLAGS.FlagDict())
FLAGS.AppendFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict())-old_len, 3)
self.assertTrue("new3" in FLAGS.FlagDict())
self.assertTrue("new4" in FLAGS.FlagDict())
self.assertTrue("n4" in FLAGS.FlagDict())
self.assertEqual(FLAGS.FlagDict()['n4'], FLAGS.FlagDict()['new4'])
# Then test removing them
FLAGS.RemoveFlagValues(new_flags)
self.assertEqual(len(FLAGS.FlagDict()), old_len)
self.assertFalse("new3" in FLAGS.FlagDict())
self.assertFalse("new4" in FLAGS.FlagDict())
self.assertFalse("n4" in FLAGS.FlagDict())
# Make sure AppendFlagValues fails on duplicates
gflags.DEFINE_boolean("dup4", 0, "runhelp d41")
new_flags = gflags.FlagValues()
gflags.DEFINE_boolean("dup4", 0, "runhelp d42", flag_values=new_flags)
try:
FLAGS.AppendFlagValues(new_flags)
raise AssertionError("ignore_copy was not set but caused no exception")
except gflags.DuplicateFlag:
pass
# Integer out of bounds
try:
argv = ('./program', '--repeat=-4')
FLAGS(argv)
raise AssertionError('integer bounds exception not raised:'
+ str(FLAGS.repeat))
except gflags.IllegalFlagValue:
pass
# Non-integer
try:
argv = ('./program', '--repeat=2.5')
FLAGS(argv)
raise AssertionError("malformed integer value exception not raised")
except gflags.IllegalFlagValue:
pass
# Missing required arugment
try:
argv = ('./program', '--name')
FLAGS(argv)
raise AssertionError("Flag argument required exception not raised")
except gflags.FlagsError:
pass
# Non-boolean arguments for boolean
try:
argv = ('./program', '--debug=goofup')
FLAGS(argv)
raise AssertionError("Illegal flag value exception not raised")
except gflags.IllegalFlagValue:
pass
try:
argv = ('./program', '--debug=42')
FLAGS(argv)
raise AssertionError("Illegal flag value exception not raised")
except gflags.IllegalFlagValue:
pass
# Non-numeric argument for integer flag --repeat
try:
argv = ('./program', '--repeat', 'Bob', 'extra')
FLAGS(argv)
raise AssertionError("Illegal flag value exception not raised")
except gflags.IllegalFlagValue:
pass
# Test ModuleHelp().
helpstr = FLAGS.ModuleHelp(module_baz)
expected_help = "\n" + module_baz.__name__ + ":" + """
--[no]tmod_baz_x: Boolean flag.
(default: 'true')"""
self.assertMultiLineEqual(expected_help, helpstr)
# Test MainModuleHelp(). This must be part of test_flags because
# it dpeends on dup1/2/3/etc being introduced first.
helpstr = FLAGS.MainModuleHelp()
expected_help = "\n" + sys.argv[0] + ':' + """
--[no]debug: debughelp
(default: 'false')
-u,--[no]dup1: runhelp d12
(default: 'true')
-u,--[no]dup2: runhelp d22
(default: 'true')
-u,--[no]dup3: runhelp d32
(default: 'true')
--[no]dup4: runhelp d41
(default: 'false')
--kwery: <who|what|why|where|when>: ?
--l: how long to be
(default: '9223372032559808512')
(an integer)
--letters: a list of letters
(default: 'a,b,c')
(a comma separated list)
-m,--m_str: string option that can occur multiple times;
repeat this option to specify a list of values
(default: "['def1', 'def2']")
--name: namehelp
(default: 'Bob')
--[no]noexec: boolean flag with no as prefix
(default: 'true')
--numbers: a list of numbers
(default: '1,2,3')
(a comma separated list)
--[no]q: quiet mode
(default: 'true')
--[no]quack: superstring of 'q'
(default: 'false')
-r,--repeat: how many times to repeat (0-5)
(default: '4')
(a non-negative integer)
-s,--s_str: string option that can occur multiple times;
repeat this option to specify a list of values
(default: "['sing1']")
--[no]test0: test boolean parsing
--[no]test1: test boolean parsing
--[no]testget1: test parsing with defaults
--[no]testget2: test parsing with defaults
--[no]testget3: test parsing with defaults
--testget4: test parsing with defaults
(an integer)
--testlist: test lists parsing
(default: '')
(a comma separated list)
--[no]testnone: test boolean parsing
--testspacelist: tests space lists parsing
(default: '')
(a whitespace separated list)
--x: how eXtreme to be
(default: '3')
(an integer)
-z,--[no]zoom1: runhelp z1
(default: 'false')"""
# Insert the --help flags in their proper place.
help_help = """\
-?,--[no]help: show this help
--[no]helpshort: show usage only for this module
--[no]helpxml: like --help, but generates XML output
"""
expected_help = expected_help.replace(' --kwery',
help_help + ' --kwery')
self.assertMultiLineEqual(expected_help, helpstr)
class MultiNumericalFlagsTest(googletest.TestCase):
def testMultiNumericalFlags(self):
"""Test multi_int and multi_float flags."""
int_defaults = [77, 88,]
gflags.DEFINE_multi_int('m_int', int_defaults,
'integer option that can occur multiple times',
short_name='mi')
self.assertListEqual(FLAGS.get('m_int', None), int_defaults)
argv = ('./program', '--m_int=-99', '--mi=101')
FLAGS(argv)
self.assertListEqual(FLAGS.get('m_int', None), [-99, 101,])
float_defaults = [2.2, 3]
gflags.DEFINE_multi_float('m_float', float_defaults,
'float option that can occur multiple times',
short_name='mf')
for (expected, actual) in zip(float_defaults, FLAGS.get('m_float', None)):
self.assertAlmostEquals(expected, actual)
argv = ('./program', '--m_float=-17', '--mf=2.78e9')
FLAGS(argv)
expected_floats = [-17.0, 2.78e9]
for (expected, actual) in zip(expected_floats, FLAGS.get('m_float', None)):
self.assertAlmostEquals(expected, actual)
def testSingleValueDefault(self):
"""Test multi_int and multi_float flags with a single default value."""
int_default = 77
gflags.DEFINE_multi_int('m_int1', int_default,
'integer option that can occur multiple times')
self.assertListEqual(FLAGS.get('m_int1', None), [int_default])
float_default = 2.2
gflags.DEFINE_multi_float('m_float1', float_default,
'float option that can occur multiple times')
actual = FLAGS.get('m_float1', None)
self.assertEquals(1, len(actual))
self.assertAlmostEquals(actual[0], float_default)
def testBadMultiNumericalFlags(self):
"""Test multi_int and multi_float flags with non-parseable values."""
# Test non-parseable defaults.
self.assertRaisesWithRegexpMatch(
gflags.IllegalFlagValue,
'flag --m_int2=abc: invalid literal for int\(\) with base 10: \'abc\'',
gflags.DEFINE_multi_int, 'm_int2', ['abc'], 'desc')
self.assertRaisesWithRegexpMatch(
gflags.IllegalFlagValue,
'flag --m_float2=abc: invalid literal for float\(\): abc',
gflags.DEFINE_multi_float, 'm_float2', ['abc'], 'desc')
# Test non-parseable command line values.
gflags.DEFINE_multi_int('m_int2', '77',
'integer option that can occur multiple times')
argv = ('./program', '--m_int2=def')
self.assertRaisesWithRegexpMatch(
gflags.IllegalFlagValue,
'flag --m_int2=def: invalid literal for int\(\) with base 10: \'def\'',
FLAGS, argv)
gflags.DEFINE_multi_float('m_float2', 2.2,
'float option that can occur multiple times')
argv = ('./program', '--m_float2=def')
self.assertRaisesWithRegexpMatch(
gflags.IllegalFlagValue,
'flag --m_float2=def: invalid literal for float\(\): def',
FLAGS, argv)
class UnicodeFlagsTest(googletest.TestCase):
"""Testing proper unicode support for flags."""
def testUnicodeDefaultAndHelpstring(self):
gflags.DEFINE_string("unicode_str", "\xC3\x80\xC3\xBD".decode("utf-8"),
"help:\xC3\xAA".decode("utf-8"))
argv = ("./program",)
FLAGS(argv) # should not raise any exceptions
argv = ("./program", "--unicode_str=foo")
FLAGS(argv) # should not raise any exceptions
def testUnicodeInList(self):
gflags.DEFINE_list("unicode_list", ["abc", "\xC3\x80".decode("utf-8"),
"\xC3\xBD".decode("utf-8")],
"help:\xC3\xAB".decode("utf-8"))
argv = ("./program",)
FLAGS(argv) # should not raise any exceptions
argv = ("./program", "--unicode_list=hello,there")
FLAGS(argv) # should not raise any exceptions
def testXMLOutput(self):
gflags.DEFINE_string("unicode1", "\xC3\x80\xC3\xBD".decode("utf-8"),
"help:\xC3\xAC".decode("utf-8"))
gflags.DEFINE_list("unicode2", ["abc", "\xC3\x80".decode("utf-8"),
"\xC3\xBD".decode("utf-8")],
"help:\xC3\xAD".decode("utf-8"))
gflags.DEFINE_list("non_unicode", ["abc", "def", "ghi"],
"help:\xC3\xAD".decode("utf-8"))
outfile = cStringIO.StringIO()
FLAGS.WriteHelpInXMLFormat(outfile)
actual_output = outfile.getvalue()
# The xml output is large, so we just check parts of it.
self.assertTrue("<name>unicode1</name>\n"
" <meaning>help:ì</meaning>\n"
" <default>Àý</default>\n"
" <current>Àý</current>"
in actual_output)
self.assertTrue("<name>unicode2</name>\n"
" <meaning>help:í</meaning>\n"
" <default>abc,À,ý</default>\n"
" <current>[\'abc\', u\'\\xc0\', u\'\\xfd\']</current>"
in actual_output)
self.assertTrue("<name>non_unicode</name>\n"
" <meaning>help:í</meaning>\n"
" <default>abc,def,ghi</default>\n"
" <current>[\'abc\', \'def\', \'ghi\']</current>"
in actual_output)
class LoadFromFlagFileTest(googletest.TestCase):
"""Testing loading flags from a file and parsing them."""
def setUp(self):
self.flag_values = gflags.FlagValues()
# make sure we are using the old, stupid way of parsing flags.
self.flag_values.UseGnuGetOpt(False)
gflags.DEFINE_string('UnitTestMessage1', 'Foo!', 'You Add Here.',
flag_values=self.flag_values)
gflags.DEFINE_string('UnitTestMessage2', 'Bar!', 'Hello, Sailor!',
flag_values=self.flag_values)
gflags.DEFINE_boolean('UnitTestBoolFlag', 0, 'Some Boolean thing',
flag_values=self.flag_values)
gflags.DEFINE_integer('UnitTestNumber', 12345, 'Some integer',
lower_bound=0, flag_values=self.flag_values)
gflags.DEFINE_list('UnitTestList', "1,2,3", 'Some list',
flag_values=self.flag_values)
self.files_to_delete = []
def tearDown(self):
self._RemoveTestFiles()
def _SetupTestFiles(self):
""" Creates and sets up some dummy flagfile files with bogus flags"""
# Figure out where to create temporary files
tmp_path = '/tmp/flags_unittest'
if os.path.exists(tmp_path):
shutil.rmtree(tmp_path)
os.makedirs(tmp_path)
try:
tmp_flag_file_1 = open(tmp_path + '/UnitTestFile1.tst', 'w')
tmp_flag_file_2 = open(tmp_path + '/UnitTestFile2.tst', 'w')
tmp_flag_file_3 = open(tmp_path + '/UnitTestFile3.tst', 'w')
tmp_flag_file_4 = open(tmp_path + '/UnitTestFile4.tst', 'w')
except IOError, e_msg:
print e_msg
print 'FAIL\n File Creation problem in Unit Test'
sys.exit(1)
# put some dummy flags in our test files
tmp_flag_file_1.write('#A Fake Comment\n')
tmp_flag_file_1.write('--UnitTestMessage1=tempFile1!\n')
tmp_flag_file_1.write('\n')
tmp_flag_file_1.write('--UnitTestNumber=54321\n')
tmp_flag_file_1.write('--noUnitTestBoolFlag\n')
file_list = [tmp_flag_file_1.name]
# this one includes test file 1
tmp_flag_file_2.write('//A Different Fake Comment\n')
tmp_flag_file_2.write('--flagfile=%s\n' % tmp_flag_file_1.name)
tmp_flag_file_2.write('--UnitTestMessage2=setFromTempFile2\n')
tmp_flag_file_2.write('\t\t\n')
tmp_flag_file_2.write('--UnitTestNumber=6789a\n')
file_list.append(tmp_flag_file_2.name)
# this file points to itself
tmp_flag_file_3.write('--flagfile=%s\n' % tmp_flag_file_3.name)
tmp_flag_file_3.write('--UnitTestMessage1=setFromTempFile3\n')
tmp_flag_file_3.write('#YAFC\n')
tmp_flag_file_3.write('--UnitTestBoolFlag\n')
file_list.append(tmp_flag_file_3.name)
# this file is unreadable
tmp_flag_file_4.write('--flagfile=%s\n' % tmp_flag_file_3.name)
tmp_flag_file_4.write('--UnitTestMessage1=setFromTempFile3\n')
tmp_flag_file_4.write('--UnitTestMessage1=setFromTempFile3\n')
os.chmod(tmp_path + '/UnitTestFile4.tst', 0)
file_list.append(tmp_flag_file_4.name)
tmp_flag_file_1.close()
tmp_flag_file_2.close()
tmp_flag_file_3.close()
tmp_flag_file_4.close()
self.files_to_delete = file_list
return file_list # these are just the file names
# end SetupFiles def
def _RemoveTestFiles(self):
"""Closes the files we just created. tempfile deletes them for us """
for file_name in self.files_to_delete:
try:
os.remove(file_name)
except OSError, e_msg:
print '%s\n, Problem deleting test file' % e_msg
#end RemoveTestFiles def
def _ReadFlagsFromFiles(self, argv, force_gnu):
return argv[:1] + self.flag_values.ReadFlagsFromFiles(argv[1:],
force_gnu=force_gnu)
#### Flagfile Unit Tests ####
def testMethod_flagfiles_1(self):
""" Test trivial case with no flagfile based options. """
fake_cmd_line = 'fooScript --UnitTestBoolFlag'
fake_argv = fake_cmd_line.split(' ')
self.flag_values(fake_argv)
self.assertEqual( self.flag_values.UnitTestBoolFlag, 1)
self.assertEqual( fake_argv, self._ReadFlagsFromFiles(fake_argv, False))
# end testMethodOne
def testMethod_flagfiles_2(self):
"""Tests parsing one file + arguments off simulated argv"""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = 'fooScript --q --flagfile=%s' % tmp_files[0]
fake_argv = fake_cmd_line.split(' ')
# We should see the original cmd line with the file's contents spliced in.
# Flags from the file will appear in the order order they are sepcified
# in the file, in the same position as the flagfile argument.
expected_results = ['fooScript',
'--q',
'--UnitTestMessage1=tempFile1!',
'--UnitTestNumber=54321',
'--noUnitTestBoolFlag']
test_results = self._ReadFlagsFromFiles(fake_argv, False)
self.assertEqual(expected_results, test_results)
# end testTwo def
def testMethod_flagfiles_3(self):
"""Tests parsing nested files + arguments of simulated argv"""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = ('fooScript --UnitTestNumber=77 --flagfile=%s'
% tmp_files[1])
fake_argv = fake_cmd_line.split(' ')
expected_results = ['fooScript',
'--UnitTestNumber=77',
'--UnitTestMessage1=tempFile1!',
'--UnitTestNumber=54321',
'--noUnitTestBoolFlag',
'--UnitTestMessage2=setFromTempFile2',
'--UnitTestNumber=6789a']
test_results = self._ReadFlagsFromFiles(fake_argv, False)
self.assertEqual(expected_results, test_results)
# end testThree def
def testMethod_flagfiles_4(self):
"""Tests parsing self-referential files + arguments of simulated argv.
This test should print a warning to stderr of some sort.
"""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = ('fooScript --flagfile=%s --noUnitTestBoolFlag'
% tmp_files[2])
fake_argv = fake_cmd_line.split(' ')
expected_results = ['fooScript',
'--UnitTestMessage1=setFromTempFile3',
'--UnitTestBoolFlag',
'--noUnitTestBoolFlag' ]
test_results = self._ReadFlagsFromFiles(fake_argv, False)
self.assertEqual(expected_results, test_results)
def testMethod_flagfiles_5(self):
"""Test that --flagfile parsing respects the '--' end-of-options marker."""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = 'fooScript --SomeFlag -- --flagfile=%s' % tmp_files[0]
fake_argv = fake_cmd_line.split(' ')
expected_results = ['fooScript',
'--SomeFlag',
'--',
'--flagfile=%s' % tmp_files[0]]
test_results = self._ReadFlagsFromFiles(fake_argv, False)
self.assertEqual(expected_results, test_results)
def testMethod_flagfiles_6(self):
"""Test that --flagfile parsing stops at non-options (non-GNU behavior)."""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%s'
% tmp_files[0])
fake_argv = fake_cmd_line.split(' ')
expected_results = ['fooScript',
'--SomeFlag',
'some_arg',
'--flagfile=%s' % tmp_files[0]]
test_results = self._ReadFlagsFromFiles(fake_argv, False)
self.assertEqual(expected_results, test_results)
def testMethod_flagfiles_7(self):
"""Test that --flagfile parsing skips over a non-option (GNU behavior)."""
self.flag_values.UseGnuGetOpt()
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%s'
% tmp_files[0])
fake_argv = fake_cmd_line.split(' ')
expected_results = ['fooScript',
'--SomeFlag',
'some_arg',
'--UnitTestMessage1=tempFile1!',
'--UnitTestNumber=54321',
'--noUnitTestBoolFlag']
test_results = self._ReadFlagsFromFiles(fake_argv, False)
self.assertEqual(expected_results, test_results)
def testMethod_flagfiles_8(self):
"""Test that --flagfile parsing respects force_gnu=True."""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%s'
% tmp_files[0])
fake_argv = fake_cmd_line.split(' ')
expected_results = ['fooScript',
'--SomeFlag',
'some_arg',
'--UnitTestMessage1=tempFile1!',
'--UnitTestNumber=54321',
'--noUnitTestBoolFlag']
test_results = self._ReadFlagsFromFiles(fake_argv, True)
self.assertEqual(expected_results, test_results)
def testMethod_flagfiles_NoPermissions(self):
"""Test that --flagfile raises except on file that is unreadable."""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%s'
% tmp_files[3])
fake_argv = fake_cmd_line.split(' ')
self.assertRaises(gflags.CantOpenFlagFileError,
self._ReadFlagsFromFiles, fake_argv, True)
def testMethod_flagfiles_NotFound(self):
"""Test that --flagfile raises except on file that does not exist."""
tmp_files = self._SetupTestFiles()
# specify our temp file on the fake cmd line
fake_cmd_line = ('fooScript --SomeFlag some_arg --flagfile=%sNOTEXIST'
% tmp_files[3])
fake_argv = fake_cmd_line.split(' ')
self.assertRaises(gflags.CantOpenFlagFileError,
self._ReadFlagsFromFiles, fake_argv, True)
def test_flagfiles_user_path_expansion(self):
"""Test that user directory referenced paths (ie. ~/foo) are correctly
expanded. This test depends on whatever account's running the unit test
to have read/write access to their own home directory, otherwise it'll
FAIL.
"""
fake_flagfile_item_style_1 = '--flagfile=~/foo.file'
fake_flagfile_item_style_2 = '-flagfile=~/foo.file'
expected_results = os.path.expanduser('~/foo.file')
test_results = self.flag_values.ExtractFilename(fake_flagfile_item_style_1)
self.assertEqual(expected_results, test_results)
test_results = self.flag_values.ExtractFilename(fake_flagfile_item_style_2)
self.assertEqual(expected_results, test_results)
# end testFour def
def test_no_touchy_non_flags(self):
"""
Test that the flags parser does not mutilate arguments which are
not supposed to be flags
"""
fake_argv = ['fooScript', '--UnitTestBoolFlag',
'command', '--command_arg1', '--UnitTestBoom', '--UnitTestB']
argv = self.flag_values(fake_argv)
self.assertEqual(argv, fake_argv[:1] + fake_argv[2:])
def test_parse_flags_after_args_if_using_gnu_getopt(self):
"""
Test that flags given after arguments are parsed if using gnu_getopt.
"""
self.flag_values.UseGnuGetOpt()
fake_argv = ['fooScript', '--UnitTestBoolFlag',
'command', '--UnitTestB']
argv = self.flag_values(fake_argv)
self.assertEqual(argv, ['fooScript', 'command'])
def test_SetDefault(self):
"""
Test changing flag defaults.
"""
# Test that SetDefault changes both the default and the value,
# and that the value is changed when one is given as an option.
self.flag_values['UnitTestMessage1'].SetDefault('New value')
self.assertEqual(self.flag_values.UnitTestMessage1, 'New value')
self.assertEqual(self.flag_values['UnitTestMessage1'].default_as_str,
"'New value'")
self.flag_values([ 'dummyscript', '--UnitTestMessage1=Newer value' ])
self.assertEqual(self.flag_values.UnitTestMessage1, 'Newer value')
# Test that setting the default to None works correctly.
self.flag_values['UnitTestNumber'].SetDefault(None)
self.assertEqual(self.flag_values.UnitTestNumber, None)
self.assertEqual(self.flag_values['UnitTestNumber'].default_as_str, None)
self.flag_values([ 'dummyscript', '--UnitTestNumber=56' ])
self.assertEqual(self.flag_values.UnitTestNumber, 56)
# Test that setting the default to zero works correctly.
self.flag_values['UnitTestNumber'].SetDefault(0)
self.assertEqual(self.flag_values.UnitTestNumber, 0)
self.assertEqual(self.flag_values['UnitTestNumber'].default_as_str, "'0'")
self.flag_values([ 'dummyscript', '--UnitTestNumber=56' ])
self.assertEqual(self.flag_values.UnitTestNumber, 56)
# Test that setting the default to "" works correctly.
self.flag_values['UnitTestMessage1'].SetDefault("")
self.assertEqual(self.flag_values.UnitTestMessage1, "")
self.assertEqual(self.flag_values['UnitTestMessage1'].default_as_str, "''")
self.flag_values([ 'dummyscript', '--UnitTestMessage1=fifty-six' ])
self.assertEqual(self.flag_values.UnitTestMessage1, "fifty-six")
# Test that setting the default to false works correctly.
self.flag_values['UnitTestBoolFlag'].SetDefault(False)
self.assertEqual(self.flag_values.UnitTestBoolFlag, False)
self.assertEqual(self.flag_values['UnitTestBoolFlag'].default_as_str,
"'false'")
self.flag_values([ 'dummyscript', '--UnitTestBoolFlag=true' ])
self.assertEqual(self.flag_values.UnitTestBoolFlag, True)
# Test that setting a list default works correctly.
self.flag_values['UnitTestList'].SetDefault('4,5,6')
self.assertEqual(self.flag_values.UnitTestList, ['4', '5', '6'])
self.assertEqual(self.flag_values['UnitTestList'].default_as_str, "'4,5,6'")
self.flag_values([ 'dummyscript', '--UnitTestList=7,8,9' ])
self.assertEqual(self.flag_values.UnitTestList, ['7', '8', '9'])
# Test that setting invalid defaults raises exceptions
self.assertRaises(gflags.IllegalFlagValue,
self.flag_values['UnitTestNumber'].SetDefault, 'oops')
self.assertRaises(gflags.IllegalFlagValue,
self.flag_values.SetDefault, 'UnitTestNumber', -1)
class FlagsParsingTest(googletest.TestCase):
"""Testing different aspects of parsing: '-f' vs '--flag', etc."""
def setUp(self):
self.flag_values = gflags.FlagValues()
def testMethod_ShortestUniquePrefixes(self):
"""Test FlagValues.ShortestUniquePrefixes"""
gflags.DEFINE_string('a', '', '', flag_values=self.flag_values)
gflags.DEFINE_string('abc', '', '', flag_values=self.flag_values)
gflags.DEFINE_string('common_a_string', '', '', flag_values=self.flag_values)
gflags.DEFINE_boolean('common_b_boolean', 0, '',
flag_values=self.flag_values)
gflags.DEFINE_boolean('common_c_boolean', 0, '',
flag_values=self.flag_values)
gflags.DEFINE_boolean('common', 0, '', flag_values=self.flag_values)
gflags.DEFINE_integer('commonly', 0, '', flag_values=self.flag_values)
gflags.DEFINE_boolean('zz', 0, '', flag_values=self.flag_values)
gflags.DEFINE_integer('nozz', 0, '', flag_values=self.flag_values)
shorter_flags = self.flag_values.ShortestUniquePrefixes(
self.flag_values.FlagDict())
expected_results = {'nocommon_b_boolean': 'nocommon_b',
'common_c_boolean': 'common_c',
'common_b_boolean': 'common_b',
'a': 'a',
'abc': 'ab',
'zz': 'z',
'nozz': 'nozz',
'common_a_string': 'common_a',
'commonly': 'commonl',
'nocommon_c_boolean': 'nocommon_c',
'nocommon': 'nocommon',
'common': 'common'}
for name, shorter in expected_results.iteritems():
self.assertEquals(shorter_flags[name], shorter)
self.flag_values.__delattr__('a')
self.flag_values.__delattr__('abc')
self.flag_values.__delattr__('common_a_string')
self.flag_values.__delattr__('common_b_boolean')
self.flag_values.__delattr__('common_c_boolean')
self.flag_values.__delattr__('common')
self.flag_values.__delattr__('commonly')
self.flag_values.__delattr__('zz')
self.flag_values.__delattr__('nozz')
def test_twodasharg_first(self):
gflags.DEFINE_string("twodash_name", "Bob", "namehelp",
flag_values=self.flag_values)
gflags.DEFINE_string("twodash_blame", "Rob", "blamehelp",
flag_values=self.flag_values)
argv = ('./program',
'--',
'--twodash_name=Harry')
argv = self.flag_values(argv)
self.assertEqual('Bob', self.flag_values.twodash_name)
self.assertEqual(argv[1], '--twodash_name=Harry')
def test_twodasharg_middle(self):
gflags.DEFINE_string("twodash2_name", "Bob", "namehelp",
flag_values=self.flag_values)
gflags.DEFINE_string("twodash2_blame", "Rob", "blamehelp",
flag_values=self.flag_values)
argv = ('./program',
'--twodash2_blame=Larry',
'--',
'--twodash2_name=Harry')
argv = self.flag_values(argv)
self.assertEqual('Bob', self.flag_values.twodash2_name)
self.assertEqual('Larry', self.flag_values.twodash2_blame)
self.assertEqual(argv[1], '--twodash2_name=Harry')
def test_onedasharg_first(self):
gflags.DEFINE_string("onedash_name", "Bob", "namehelp",
flag_values=self.flag_values)
gflags.DEFINE_string("onedash_blame", "Rob", "blamehelp",
flag_values=self.flag_values)
argv = ('./program',
'-',
'--onedash_name=Harry')
argv = self.flag_values(argv)
self.assertEqual(argv[1], '-')
# TODO(csilvers): we should still parse --onedash_name=Harry as a
# flag, but currently we don't (we stop flag processing as soon as
# we see the first non-flag).
# - This requires gnu_getopt from Python 2.3+ see FLAGS.UseGnuGetOpt()
def test_unrecognized_flags(self):
gflags.DEFINE_string("name", "Bob", "namehelp", flag_values=self.flag_values)
# Unknown flag --nosuchflag
try:
argv = ('./program', '--nosuchflag', '--name=Bob', 'extra')
self.flag_values(argv)
raise AssertionError("Unknown flag exception not raised")
except gflags.UnrecognizedFlag, e:
assert e.flagname == 'nosuchflag'
assert e.flagvalue == '--nosuchflag'
# Unknown flag -w (short option)
try:
argv = ('./program', '-w', '--name=Bob', 'extra')
self.flag_values(argv)
raise AssertionError("Unknown flag exception not raised")
except gflags.UnrecognizedFlag, e:
assert e.flagname == 'w'
assert e.flagvalue == '-w'
# Unknown flag --nosuchflagwithparam=foo
try:
argv = ('./program', '--nosuchflagwithparam=foo', '--name=Bob', 'extra')
self.flag_values(argv)
raise AssertionError("Unknown flag exception not raised")
except gflags.UnrecognizedFlag, e:
assert e.flagname == 'nosuchflagwithparam'
assert e.flagvalue == '--nosuchflagwithparam=foo'
# Allow unknown flag --nosuchflag if specified with undefok
argv = ('./program', '--nosuchflag', '--name=Bob',
'--undefok=nosuchflag', 'extra')
argv = self.flag_values(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
# Allow unknown flag --noboolflag if undefok=boolflag is specified
argv = ('./program', '--noboolflag', '--name=Bob',
'--undefok=boolflag', 'extra')
argv = self.flag_values(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
# But not if the flagname is misspelled:
try:
argv = ('./program', '--nosuchflag', '--name=Bob',
'--undefok=nosuchfla', 'extra')
self.flag_values(argv)
raise AssertionError("Unknown flag exception not raised")
except gflags.UnrecognizedFlag, e:
assert e.flagname == 'nosuchflag'
try:
argv = ('./program', '--nosuchflag', '--name=Bob',
'--undefok=nosuchflagg', 'extra')
self.flag_values(argv)
raise AssertionError("Unknown flag exception not raised")
except gflags.UnrecognizedFlag, e:
assert e.flagname == 'nosuchflag'
# Allow unknown short flag -w if specified with undefok
argv = ('./program', '-w', '--name=Bob', '--undefok=w', 'extra')
argv = self.flag_values(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
# Allow unknown flag --nosuchflagwithparam=foo if specified
# with undefok
argv = ('./program', '--nosuchflagwithparam=foo', '--name=Bob',
'--undefok=nosuchflagwithparam', 'extra')
argv = self.flag_values(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
# Even if undefok specifies multiple flags
argv = ('./program', '--nosuchflag', '-w', '--nosuchflagwithparam=foo',
'--name=Bob',
'--undefok=nosuchflag,w,nosuchflagwithparam',
'extra')
argv = self.flag_values(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
# However, not if undefok doesn't specify the flag
try:
argv = ('./program', '--nosuchflag', '--name=Bob',
'--undefok=another_such', 'extra')
self.flag_values(argv)
raise AssertionError("Unknown flag exception not raised")
except gflags.UnrecognizedFlag, e:
assert e.flagname == 'nosuchflag'
# Make sure --undefok doesn't mask other option errors.
try:
# Provide an option requiring a parameter but not giving it one.
argv = ('./program', '--undefok=name', '--name')
self.flag_values(argv)
raise AssertionError("Missing option parameter exception not raised")
except gflags.UnrecognizedFlag:
raise AssertionError("Wrong kind of error exception raised")
except gflags.FlagsError:
pass
# Test --undefok <list>
argv = ('./program', '--nosuchflag', '-w', '--nosuchflagwithparam=foo',
'--name=Bob',
'--undefok',
'nosuchflag,w,nosuchflagwithparam',
'extra')
argv = self.flag_values(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
class NonGlobalFlagsTest(googletest.TestCase):
def test_nonglobal_flags(self):
"""Test use of non-global FlagValues"""
nonglobal_flags = gflags.FlagValues()
gflags.DEFINE_string("nonglobal_flag", "Bob", "flaghelp", nonglobal_flags)
argv = ('./program',
'--nonglobal_flag=Mary',
'extra')
argv = nonglobal_flags(argv)
assert len(argv) == 2, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
assert argv[1]=='extra', "extra argument not preserved"
assert nonglobal_flags['nonglobal_flag'].value == 'Mary'
def test_unrecognized_nonglobal_flags(self):
"""Test unrecognized non-global flags"""
nonglobal_flags = gflags.FlagValues()
argv = ('./program',
'--nosuchflag')
try:
argv = nonglobal_flags(argv)
raise AssertionError("Unknown flag exception not raised")
except gflags.UnrecognizedFlag, e:
assert e.flagname == 'nosuchflag'
pass
argv = ('./program',
'--nosuchflag',
'--undefok=nosuchflag')
argv = nonglobal_flags(argv)
assert len(argv) == 1, "wrong number of arguments pulled"
assert argv[0]=='./program', "program name not preserved"
def test_create_flag_errors(self):
# Since the exception classes are exposed, nothing stops users
# from creating their own instances. This test makes sure that
# people modifying the flags module understand that the external
# mechanisms for creating the exceptions should continue to work.
e = gflags.FlagsError()
e = gflags.FlagsError("message")
e = gflags.DuplicateFlag()
e = gflags.DuplicateFlag("message")
e = gflags.IllegalFlagValue()
e = gflags.IllegalFlagValue("message")
e = gflags.UnrecognizedFlag()
e = gflags.UnrecognizedFlag("message")
def testFlagValuesDelAttr(self):
"""Checks that del self.flag_values.flag_id works."""
default_value = 'default value for testFlagValuesDelAttr'
# 1. Declare and delete a flag with no short name.
flag_values = gflags.FlagValues()
gflags.DEFINE_string('delattr_foo', default_value, 'A simple flag.',
flag_values=flag_values)
self.assertEquals(flag_values.delattr_foo, default_value)
flag_obj = flag_values['delattr_foo']
# We also check that _FlagIsRegistered works as expected :)
self.assertTrue(flag_values._FlagIsRegistered(flag_obj))
del flag_values.delattr_foo
self.assertFalse('delattr_foo' in flag_values.FlagDict())
self.assertFalse(flag_values._FlagIsRegistered(flag_obj))
# If the previous del FLAGS.delattr_foo did not work properly, the
# next definition will trigger a redefinition error.
gflags.DEFINE_integer('delattr_foo', 3, 'A simple flag.',
flag_values=flag_values)
del flag_values.delattr_foo
self.assertFalse('delattr_foo' in flag_values.RegisteredFlags())
# 2. Declare and delete a flag with a short name.
gflags.DEFINE_string('delattr_bar', default_value, 'flag with short name',
short_name='x5', flag_values=flag_values)
flag_obj = flag_values['delattr_bar']
self.assertTrue(flag_values._FlagIsRegistered(flag_obj))
del flag_values.x5
self.assertTrue(flag_values._FlagIsRegistered(flag_obj))
del flag_values.delattr_bar
self.assertFalse(flag_values._FlagIsRegistered(flag_obj))
# 3. Just like 2, but del flag_values.name last
gflags.DEFINE_string('delattr_bar', default_value, 'flag with short name',
short_name='x5', flag_values=flag_values)
flag_obj = flag_values['delattr_bar']
self.assertTrue(flag_values._FlagIsRegistered(flag_obj))
del flag_values.delattr_bar
self.assertTrue(flag_values._FlagIsRegistered(flag_obj))
del flag_values.x5
self.assertFalse(flag_values._FlagIsRegistered(flag_obj))
self.assertFalse('delattr_bar' in flag_values.RegisteredFlags())
self.assertFalse('x5' in flag_values.RegisteredFlags())
class KeyFlagsTest(googletest.TestCase):
def setUp(self):
self.flag_values = gflags.FlagValues()
def _GetNamesOfDefinedFlags(self, module, flag_values):
"""Returns the list of names of flags defined by a module.
Auxiliary for the testKeyFlags* methods.
Args:
module: A module object or a string module name.
flag_values: A FlagValues object.
Returns:
A list of strings.
"""
return [f.name for f in flag_values._GetFlagsDefinedByModule(module)]
def _GetNamesOfKeyFlags(self, module, flag_values):
"""Returns the list of names of key flags for a module.
Auxiliary for the testKeyFlags* methods.
Args:
module: A module object or a string module name.
flag_values: A FlagValues object.
Returns:
A list of strings.
"""
return [f.name for f in flag_values._GetKeyFlagsForModule(module)]
def _AssertListsHaveSameElements(self, list_1, list_2):
# Checks that two lists have the same elements with the same
# multiplicity, in possibly different order.
list_1 = list(list_1)
list_1.sort()
list_2 = list(list_2)
list_2.sort()
self.assertListEqual(list_1, list_2)
def testKeyFlags(self):
# Before starting any testing, make sure no flags are already
# defined for module_foo and module_bar.
self.assertListEqual(self._GetNamesOfKeyFlags(module_foo, self.flag_values),
[])
self.assertListEqual(self._GetNamesOfKeyFlags(module_bar, self.flag_values),
[])
self.assertListEqual(self._GetNamesOfDefinedFlags(module_foo,
self.flag_values),
[])
self.assertListEqual(self._GetNamesOfDefinedFlags(module_bar,
self.flag_values),
[])
# Defines a few flags in module_foo and module_bar.
module_foo.DefineFlags(flag_values=self.flag_values)
try:
# Part 1. Check that all flags defined by module_foo are key for
# that module, and similarly for module_bar.
for module in [module_foo, module_bar]:
self._AssertListsHaveSameElements(
self.flag_values._GetFlagsDefinedByModule(module),
self.flag_values._GetKeyFlagsForModule(module))
# Also check that each module defined the expected flags.
self._AssertListsHaveSameElements(
self._GetNamesOfDefinedFlags(module, self.flag_values),
module.NamesOfDefinedFlags())
# Part 2. Check that gflags.DECLARE_key_flag works fine.
# Declare that some flags from module_bar are key for
# module_foo.
module_foo.DeclareKeyFlags(flag_values=self.flag_values)
# Check that module_foo has the expected list of defined flags.
self._AssertListsHaveSameElements(
self._GetNamesOfDefinedFlags(module_foo, self.flag_values),
module_foo.NamesOfDefinedFlags())
# Check that module_foo has the expected list of key flags.
self._AssertListsHaveSameElements(
self._GetNamesOfKeyFlags(module_foo, self.flag_values),
module_foo.NamesOfDeclaredKeyFlags())
# Part 3. Check that gflags.ADOPT_module_key_flags works fine.
# Trigger a call to gflags.ADOPT_module_key_flags(module_bar)
# inside module_foo. This should declare a few more key
# flags in module_foo.
module_foo.DeclareExtraKeyFlags(flag_values=self.flag_values)
# Check that module_foo has the expected list of key flags.
self._AssertListsHaveSameElements(
self._GetNamesOfKeyFlags(module_foo, self.flag_values),
module_foo.NamesOfDeclaredKeyFlags() +
module_foo.NamesOfDeclaredExtraKeyFlags())
finally:
module_foo.RemoveFlags(flag_values=self.flag_values)
def testKeyFlagsWithNonDefaultFlagValuesObject(self):
# Check that key flags work even when we use a FlagValues object
# that is not the default gflags.self.flag_values object. Otherwise, this
# test is similar to testKeyFlags, but it uses only module_bar.
# The other test module (module_foo) uses only the default values
# for the flag_values keyword arguments. This way, testKeyFlags
# and this method test both the default FlagValues, the explicitly
# specified one, and a mixed usage of the two.
# A brand-new FlagValues object, to use instead of gflags.self.flag_values.
fv = gflags.FlagValues()
# Before starting any testing, make sure no flags are already
# defined for module_foo and module_bar.
self.assertListEqual(
self._GetNamesOfKeyFlags(module_bar, fv),
[])
self.assertListEqual(
self._GetNamesOfDefinedFlags(module_bar, fv),
[])
module_bar.DefineFlags(flag_values=fv)
# Check that all flags defined by module_bar are key for that
# module, and that module_bar defined the expected flags.
self._AssertListsHaveSameElements(
fv._GetFlagsDefinedByModule(module_bar),
fv._GetKeyFlagsForModule(module_bar))
self._AssertListsHaveSameElements(
self._GetNamesOfDefinedFlags(module_bar, fv),
module_bar.NamesOfDefinedFlags())
# Pick two flags from module_bar, declare them as key for the
# current (i.e., main) module (via gflags.DECLARE_key_flag), and
# check that we get the expected effect. The important thing is
# that we always use flags_values=fv (instead of the default
# self.flag_values).
main_module = gflags._GetMainModule()
names_of_flags_defined_by_bar = module_bar.NamesOfDefinedFlags()
flag_name_0 = names_of_flags_defined_by_bar[0]
flag_name_2 = names_of_flags_defined_by_bar[2]
gflags.DECLARE_key_flag(flag_name_0, flag_values=fv)
self._AssertListsHaveSameElements(
self._GetNamesOfKeyFlags(main_module, fv),
[flag_name_0])
gflags.DECLARE_key_flag(flag_name_2, flag_values=fv)
self._AssertListsHaveSameElements(
self._GetNamesOfKeyFlags(main_module, fv),
[flag_name_0, flag_name_2])
# Try with a special (not user-defined) flag too:
gflags.DECLARE_key_flag('undefok', flag_values=fv)
self._AssertListsHaveSameElements(
self._GetNamesOfKeyFlags(main_module, fv),
[flag_name_0, flag_name_2, 'undefok'])
gflags.ADOPT_module_key_flags(module_bar, fv)
self._AssertListsHaveSameElements(
self._GetNamesOfKeyFlags(main_module, fv),
names_of_flags_defined_by_bar + ['undefok'])
# Adopt key flags from the flags module itself.
gflags.ADOPT_module_key_flags(gflags, flag_values=fv)
self._AssertListsHaveSameElements(
self._GetNamesOfKeyFlags(main_module, fv),
names_of_flags_defined_by_bar + ['flagfile', 'undefok'])
def testMainModuleHelpWithKeyFlags(self):
# Similar to test_main_module_help, but this time we make sure to
# declare some key flags.
# Safety check that the main module does not declare any flags
# at the beginning of this test.
expected_help = ''
self.assertMultiLineEqual(expected_help, self.flag_values.MainModuleHelp())
# Define one flag in this main module and some flags in modules
# a and b. Also declare one flag from module a and one flag
# from module b as key flags for the main module.
gflags.DEFINE_integer('main_module_int_fg', 1,
'Integer flag in the main module.',
flag_values=self.flag_values)
try:
main_module_int_fg_help = (
" --main_module_int_fg: Integer flag in the main module.\n"
" (default: '1')\n"
" (an integer)")
expected_help += "\n%s:\n%s" % (sys.argv[0], main_module_int_fg_help)
self.assertMultiLineEqual(expected_help,
self.flag_values.MainModuleHelp())
# The following call should be a no-op: any flag declared by a
# module is automatically key for that module.
gflags.DECLARE_key_flag('main_module_int_fg', flag_values=self.flag_values)
self.assertMultiLineEqual(expected_help,
self.flag_values.MainModuleHelp())
# The definition of a few flags in an imported module should not
# change the main module help.
module_foo.DefineFlags(flag_values=self.flag_values)
self.assertMultiLineEqual(expected_help,
self.flag_values.MainModuleHelp())
gflags.DECLARE_key_flag('tmod_foo_bool', flag_values=self.flag_values)
tmod_foo_bool_help = (
" --[no]tmod_foo_bool: Boolean flag from module foo.\n"
" (default: 'true')")
expected_help += "\n" + tmod_foo_bool_help
self.assertMultiLineEqual(expected_help,
self.flag_values.MainModuleHelp())
gflags.DECLARE_key_flag('tmod_bar_z', flag_values=self.flag_values)
tmod_bar_z_help = (
" --[no]tmod_bar_z: Another boolean flag from module bar.\n"
" (default: 'false')")
# Unfortunately, there is some flag sorting inside
# MainModuleHelp, so we can't keep incrementally extending
# the expected_help string ...
expected_help = ("\n%s:\n%s\n%s\n%s" %
(sys.argv[0],
main_module_int_fg_help,
tmod_bar_z_help,
tmod_foo_bool_help))
self.assertMultiLineEqual(self.flag_values.MainModuleHelp(),
expected_help)
finally:
# At the end, delete all the flag information we created.
self.flag_values.__delattr__('main_module_int_fg')
module_foo.RemoveFlags(flag_values=self.flag_values)
def test_ADOPT_module_key_flags(self):
# Check that ADOPT_module_key_flags raises an exception when
# called with a module name (as opposed to a module object).
self.assertRaises(gflags.FlagsError,
gflags.ADOPT_module_key_flags,
'pyglib.app')
class GetCallingModuleTest(googletest.TestCase):
"""Test whether we correctly determine the module which defines the flag."""
def test_GetCallingModule(self):
self.assertEqual(gflags._GetCallingModule(), sys.argv[0])
self.assertEqual(
module_foo.GetModuleName(),
'flags_modules_for_testing.module_foo')
self.assertEqual(
module_bar.GetModuleName(),
'flags_modules_for_testing.module_bar')
# We execute the following exec statements for their side-effect
# (i.e., not raising an error). They emphasize the case that not
# all code resides in one of the imported modules: Python is a
# really dynamic language, where we can dynamically construct some
# code and execute it.
code = ("import gflags\n"
"module_name = gflags._GetCallingModule()")
exec(code)
# Next two exec statements executes code with a global environment
# that is different from the global environment of any imported
# module.
exec(code, {})
# vars(self) returns a dictionary corresponding to the symbol
# table of the self object. dict(...) makes a distinct copy of
# this dictionary, such that any new symbol definition by the
# exec-ed code (e.g., import flags, module_name = ...) does not
# affect the symbol table of self.
exec(code, dict(vars(self)))
# Next test is actually more involved: it checks not only that
# _GetCallingModule does not crash inside exec code, it also checks
# that it returns the expected value: the code executed via exec
# code is treated as being executed by the current module. We
# check it twice: first time by executing exec from the main
# module, second time by executing it from module_bar.
global_dict = {}
exec(code, global_dict)
self.assertEqual(global_dict['module_name'],
sys.argv[0])
global_dict = {}
module_bar.ExecuteCode(code, global_dict)
self.assertEqual(
global_dict['module_name'],
'flags_modules_for_testing.module_bar')
def test_GetCallingModuleWithIteritemsError(self):
# This test checks that _GetCallingModule is using
# sys.modules.items(), instead of .iteritems().
orig_sys_modules = sys.modules
# Mock sys.modules: simulates error produced by importing a module
# in paralel with our iteration over sys.modules.iteritems().
class SysModulesMock(dict):
def __init__(self, original_content):
dict.__init__(self, original_content)
def iteritems(self):
# Any dictionary method is fine, but not .iteritems().
raise RuntimeError('dictionary changed size during iteration')
sys.modules = SysModulesMock(orig_sys_modules)
try:
# _GetCallingModule should still work as expected:
self.assertEqual(gflags._GetCallingModule(), sys.argv[0])
self.assertEqual(
module_foo.GetModuleName(),
'flags_modules_for_testing.module_foo')
finally:
sys.modules = orig_sys_modules
class FindModuleTest(googletest.TestCase):
"""Testing methods that find a module that defines a given flag."""
def testFindModuleDefiningFlag(self):
self.assertEqual('default', FLAGS.FindModuleDefiningFlag(
'__NON_EXISTENT_FLAG__', 'default'))
self.assertEqual(
module_baz.__name__, FLAGS.FindModuleDefiningFlag('tmod_baz_x'))
def testFindModuleIdDefiningFlag(self):
self.assertEqual('default', FLAGS.FindModuleIdDefiningFlag(
'__NON_EXISTENT_FLAG__', 'default'))
self.assertEqual(
id(module_baz), FLAGS.FindModuleIdDefiningFlag('tmod_baz_x'))
class FlagsErrorMessagesTest(googletest.TestCase):
"""Testing special cases for integer and float flags error messages."""
def setUp(self):
# make sure we are using the old, stupid way of parsing flags.
self.flag_values = gflags.FlagValues()
self.flag_values.UseGnuGetOpt(False)
def testIntegerErrorText(self):
# Make sure we get proper error text
gflags.DEFINE_integer('positive', 4, 'non-negative flag', lower_bound=1,
flag_values=self.flag_values)
gflags.DEFINE_integer('non_negative', 4, 'positive flag', lower_bound=0,
flag_values=self.flag_values)
gflags.DEFINE_integer('negative', -4, 'negative flag', upper_bound=-1,
flag_values=self.flag_values)
gflags.DEFINE_integer('non_positive', -4, 'non-positive flag', upper_bound=0,
flag_values=self.flag_values)
gflags.DEFINE_integer('greater', 19, 'greater-than flag', lower_bound=4,
flag_values=self.flag_values)
gflags.DEFINE_integer('smaller', -19, 'smaller-than flag', upper_bound=4,
flag_values=self.flag_values)
gflags.DEFINE_integer('usual', 4, 'usual flag', lower_bound=0,
upper_bound=10000, flag_values=self.flag_values)
gflags.DEFINE_integer('another_usual', 0, 'usual flag', lower_bound=-1,
upper_bound=1, flag_values=self.flag_values)
self._CheckErrorMessage('positive', -4, 'a positive integer')
self._CheckErrorMessage('non_negative', -4, 'a non-negative integer')
self._CheckErrorMessage('negative', 0, 'a negative integer')
self._CheckErrorMessage('non_positive', 4, 'a non-positive integer')
self._CheckErrorMessage('usual', -4, 'an integer in the range [0, 10000]')
self._CheckErrorMessage('another_usual', 4,
'an integer in the range [-1, 1]')
self._CheckErrorMessage('greater', -5, 'integer >= 4')
self._CheckErrorMessage('smaller', 5, 'integer <= 4')
def testFloatErrorText(self):
gflags.DEFINE_float('positive', 4, 'non-negative flag', lower_bound=1,
flag_values=self.flag_values)
gflags.DEFINE_float('non_negative', 4, 'positive flag', lower_bound=0,
flag_values=self.flag_values)
gflags.DEFINE_float('negative', -4, 'negative flag', upper_bound=-1,
flag_values=self.flag_values)
gflags.DEFINE_float('non_positive', -4, 'non-positive flag', upper_bound=0,
flag_values=self.flag_values)
gflags.DEFINE_float('greater', 19, 'greater-than flag', lower_bound=4,
flag_values=self.flag_values)
gflags.DEFINE_float('smaller', -19, 'smaller-than flag', upper_bound=4,
flag_values=self.flag_values)
gflags.DEFINE_float('usual', 4, 'usual flag', lower_bound=0,
upper_bound=10000, flag_values=self.flag_values)
gflags.DEFINE_float('another_usual', 0, 'usual flag', lower_bound=-1,
upper_bound=1, flag_values=self.flag_values)
self._CheckErrorMessage('positive', 0.5, 'number >= 1')
self._CheckErrorMessage('non_negative', -4.0, 'a non-negative number')
self._CheckErrorMessage('negative', 0.5, 'number <= -1')
self._CheckErrorMessage('non_positive', 4.0, 'a non-positive number')
self._CheckErrorMessage('usual', -4.0, 'a number in the range [0, 10000]')
self._CheckErrorMessage('another_usual', 4.0,
'a number in the range [-1, 1]')
self._CheckErrorMessage('smaller', 5.0, 'number <= 4')
def _CheckErrorMessage(self, flag_name, flag_value, expected_message_suffix):
"""Set a flag to a given value and make sure we get expected message."""
try:
self.flag_values.__setattr__(flag_name, flag_value)
raise AssertionError('Bounds exception not raised!')
except gflags.IllegalFlagValue, e:
expected = ('flag --%(name)s=%(value)s: %(value)s is not %(suffix)s' %
{'name': flag_name, 'value': flag_value,
'suffix': expected_message_suffix})
self.assertEquals(str(e), expected)
def main():
googletest.main()
if __name__ == '__main__':
main()
|
huard/scipy-work
|
refs/heads/master
|
scipy/special/tests/test_basic.py
|
1
|
#this program corresponds to special.py
### Means test is not done yet
#E Means test is giving error (E)
#F Means test is failing (F)
#EF Means test is giving error and Failing
#! Means test is segfaulting
#8 Means test runs forever
### test_besselpoly
### test_jnjnp_zeros
### test_mathieu_a
### test_mathieu_even_coef
### test_mathieu_odd_coef
### test_modfresnelp
### test_modfresnelm
# test_pbdv_seq
### test_pbvv_seq
### test_sph_harm
# test_sph_in
# test_sph_jn
# test_sph_kn
from numpy import array
from numpy.testing import *
from scipy.special import *
import scipy.special._cephes as cephes
import numpy as np
def assert_tol_equal(a, b, rtol=1e-7, atol=0, err_msg='', verbose=True):
"""Assert that `a` and `b` are equal to tolerance ``atol + rtol*abs(b)``"""
def compare(x, y):
return allclose(x, y, rtol=rtol, atol=atol)
a, b = asanyarray(a), asanyarray(b)
header = 'Not equal to tolerance rtol=%g, atol=%g' % (rtol, atol)
np.testing.utils.assert_array_compare(compare, a, b, err_msg=str(err_msg),
verbose=verbose, header=header)
class TestCephes(TestCase):
def test_airy(self):
cephes.airy(0)
def test_airye(self):
cephes.airye(0)
def test_bdtr(self):
assert_equal(cephes.bdtr(1,1,0.5),1.0)
def test_bdtri(self):
assert_equal(cephes.bdtri(1,3,0.5),0.5)
def test_bdtrc(self):
assert_equal(cephes.bdtrc(1,3,0.5),0.5)
def test_bdtrin(self):
assert_equal(cephes.bdtrin(1,0,1),5.0)
def test_bdtrik(self):
cephes.bdtrik(1,3,0.5)
def test_bei(self):
assert_equal(cephes.bei(0),0.0)
def test_beip(self):
assert_equal(cephes.beip(0),0.0)
def test_ber(self):
assert_equal(cephes.ber(0),1.0)
def test_berp(self):
assert_equal(cephes.berp(0),0.0)
def test_besselpoly(self):
assert_equal(cephes.besselpoly(0,0,0),1.0)
def test_beta(self):
assert_equal(cephes.beta(1,1),1.0)
def test_betainc(self):
assert_equal(cephes.betainc(1,1,1),1.0)
def test_betaln(self):
assert_equal(cephes.betaln(1,1),0.0)
def test_betaincinv(self):
assert_equal(cephes.betaincinv(1,1,1),1.0)
def test_btdtr(self):
assert_equal(cephes.btdtr(1,1,1),1.0)
def test_btdtri(self):
assert_equal(cephes.btdtri(1,1,1),1.0)
def test_btdtria(self):
assert_equal(cephes.btdtria(1,1,1),5.0)
def test_btdtrib(self):
assert_equal(cephes.btdtrib(1,1,1),5.0)
def test_cbrt(self):
assert_approx_equal(cephes.cbrt(1),1.0)
def test_chdtr(self):
assert_equal(cephes.chdtr(1,0),0.0)
def test_chdtrc(self):
assert_equal(cephes.chdtrc(1,0),1.0)
def test_chdtri(self):
assert_equal(cephes.chdtri(1,1),0.0)
def test_chdtriv(self):
assert_equal(cephes.chdtriv(0,0),5.0)
def test_chndtr(self):
assert_equal(cephes.chndtr(0,1,0),0.0)
def test_chndtridf(self):
assert_equal(cephes.chndtridf(0,0,1),5.0)
def test_chndtrinc(self):
assert_equal(cephes.chndtrinc(0,1,0),5.0)
def test_chndtrix(self):
assert_equal(cephes.chndtrix(0,1,0),0.0)
def test_cosdg(self):
assert_equal(cephes.cosdg(0),1.0)
def test_cosm1(self):
assert_equal(cephes.cosm1(0),0.0)
def test_cotdg(self):
assert_almost_equal(cephes.cotdg(45),1.0)
def test_dawsn(self):
assert_equal(cephes.dawsn(0),0.0)
def test_ellipe(self):
assert_equal(cephes.ellipe(1),1.0)
def test_ellipeinc(self):
assert_equal(cephes.ellipeinc(0,1),0.0)
def test_ellipj(self):
cephes.ellipj(0,1)
def test_ellipk(self):
cephes.ellipk(0)#==pi/2
def test_ellipkinc(self):
assert_equal(cephes.ellipkinc(0,0),0.0)
def test_erf(self):
assert_equal(cephes.erf(0),0.0)
def test_erfc(self):
assert_equal(cephes.erfc(0),1.0)
def test_exp1(self):
cephes.exp1(1)
def test_expi(self):
cephes.expi(1)
def test_expn(self):
cephes.expn(1,1)
def test_exp1_reg(self):
# Regression for #834
a = cephes.exp1(-complex(19.9999990))
b = cephes.exp1(-complex(19.9999991))
assert_array_almost_equal(a.imag, b.imag)
def test_exp10(self):
assert_approx_equal(cephes.exp10(2),100.0)
def test_exp2(self):
assert_equal(cephes.exp2(2),4.0)
def test_expm1(self):
assert_equal(cephes.expm1(0),0.0)
def test_fdtr(self):
assert_equal(cephes.fdtr(1,1,0),0.0)
def test_fdtrc(self):
assert_equal(cephes.fdtrc(1,1,0),1.0)
def test_fdtri(self):
cephes.fdtri(1,1,0.5)
def test_fdtridfd(self):
assert_equal(cephes.fdtridfd(1,0,0),5.0)
def test_fresnel(self):
assert_equal(cephes.fresnel(0),(0.0,0.0))
def test_gamma(self):
assert_equal(cephes.gamma(5),24.0)
def test_gammainc(self):
assert_equal(cephes.gammainc(5,0),0.0)
def test_gammaincc(self):
assert_equal(cephes.gammaincc(5,0),1.0)
def test_gammainccinv(self):
assert_equal(cephes.gammainccinv(5,1),0.0)
def test_gammaln(self):
cephes.gammaln(10)
def test_gdtr(self):
assert_equal(cephes.gdtr(1,1,0),0.0)
def test_gdtrc(self):
assert_equal(cephes.gdtrc(1,1,0),1.0)
def test_gdtria(self):
assert_equal(cephes.gdtria(0,1,1),0.0)
def test_gdtrib(self):
cephes.gdtrib(1,0,1)
#assert_equal(cephes.gdtrib(1,0,1),5.0)
def test_gdtrix(self):
cephes.gdtrix(1,1,.1)
def test_hankel1(self):
cephes.hankel1(1,1)
def test_hankel1e(self):
cephes.hankel1e(1,1)
def test_hankel2(self):
cephes.hankel2(1,1)
def test_hankel2e(self):
cephes.hankel2e(1,1)
def test_hyp1f1(self):
assert_approx_equal(cephes.hyp1f1(1,1,1), exp(1.0))
assert_approx_equal(cephes.hyp1f1(3,4,-6), 0.026056422099537251095)
cephes.hyp1f1(1,1,1)
def test_hyp1f2(self):
cephes.hyp1f2(1,1,1,1)
def test_hyp2f0(self):
cephes.hyp2f0(1,1,1,1)
def test_hyp2f1(self):
assert_equal(cephes.hyp2f1(1,1,1,0),1.0)
def test_hyp3f0(self):
assert_equal(cephes.hyp3f0(1,1,1,0),(1.0,0.0))
def test_hyperu(self):
assert_equal(cephes.hyperu(0,1,1),1.0)
def test_i0(self):
assert_equal(cephes.i0(0),1.0)
def test_i0e(self):
assert_equal(cephes.i0e(0),1.0)
def test_i1(self):
assert_equal(cephes.i1(0),0.0)
def test_i1e(self):
assert_equal(cephes.i1e(0),0.0)
def test_it2i0k0(self):
cephes.it2i0k0(1)
def test_it2j0y0(self):
cephes.it2j0y0(1)
def test_it2struve0(self):
cephes.it2struve0(1)
def test_itairy(self):
cephes.itairy(1)
def test_iti0k0(self):
assert_equal(cephes.iti0k0(0),(0.0,0.0))
def test_itj0y0(self):
assert_equal(cephes.itj0y0(0),(0.0,0.0))
def test_itmodstruve0(self):
assert_equal(cephes.itmodstruve0(0),0.0)
def test_itstruve0(self):
assert_equal(cephes.itstruve0(0),0.0)
def test_iv(self):
assert_equal(cephes.iv(1,0),0.0)
def _check_ive(self):
assert_equal(cephes.ive(1,0),0.0)
def test_j0(self):
assert_equal(cephes.j0(0),1.0)
def test_j1(self):
assert_equal(cephes.j1(0),0.0)
def test_jn(self):
assert_equal(cephes.jn(0,0),1.0)
def test_jv(self):
assert_equal(cephes.jv(0,0),1.0)
def _check_jve(self):
assert_equal(cephes.jve(0,0),1.0)
def test_k0(self):
cephes.k0(2)
def test_k0e(self):
cephes.k0e(2)
def test_k1(self):
cephes.k1(2)
def test_k1e(self):
cephes.k1e(2)
def test_kei(self):
cephes.kei(2)
def test_keip(self):
assert_equal(cephes.keip(0),0.0)
def test_ker(self):
cephes.ker(2)
def test_kerp(self):
cephes.kerp(2)
def _check_kelvin(self):
cephes.kelvin(2)
def test_kn(self):
cephes.kn(1,1)
def test_kolmogi(self):
assert_equal(cephes.kolmogi(1),0.0)
def test_kolmogorov(self):
assert_equal(cephes.kolmogorov(0),1.0)
def _check_kv(self):
cephes.kv(1,1)
def _check_kve(self):
cephes.kve(1,1)
def test_log1p(self):
assert_equal(cephes.log1p(0),0.0)
def test_lpmv(self):
assert_equal(cephes.lpmv(0,0,1),1.0)
def test_mathieu_a(self):
assert_equal(cephes.mathieu_a(1,0),1.0)
def test_mathieu_b(self):
assert_equal(cephes.mathieu_b(1,0),1.0)
def test_mathieu_cem(self):
assert_equal(cephes.mathieu_cem(1,0,0),(1.0,0.0))
def test_mathieu_modcem1(self):
assert_equal(cephes.mathieu_modcem1(1,0,0),(0.0,0.0))
def test_mathieu_modcem2(self):
cephes.mathieu_modcem2(1,1,1)
def test_mathieu_sem(self):
assert_equal(cephes.mathieu_sem(1,0,0),(0.0,1.0))
def test_mathieu_modsem1(self):
assert_equal(cephes.mathieu_modsem1(1,0,0),(0.0,0.0))
def test_mathieu_modsem2(self):
cephes.mathieu_modsem2(1,1,1)
def test_modfresnelm(self):
cephes.modfresnelm(0)
def test_modfresnelp(self):
cephes.modfresnelp(0)
def _check_modstruve(self):
assert_equal(cephes.modstruve(1,0),0.0)
def test_nbdtr(self):
assert_equal(cephes.nbdtr(1,1,1),1.0)
def test_nbdtrc(self):
assert_equal(cephes.nbdtrc(1,1,1),0.0)
def test_nbdtri(self):
assert_equal(cephes.nbdtri(1,1,1),1.0)
def __check_nbdtrik(self):
cephes.nbdtrik(1,.4,.5)
def test_nbdtrin(self):
assert_equal(cephes.nbdtrin(1,0,0),5.0)
def test_ncfdtr(self):
assert_equal(cephes.ncfdtr(1,1,1,0),0.0)
def test_ncfdtri(self):
assert_equal(cephes.ncfdtri(1,1,1,0),0.0)
def test_ncfdtridfd(self):
cephes.ncfdtridfd(1,0.5,0,1)
def __check_ncfdtridfn(self):
cephes.ncfdtridfn(1,0.5,0,1)
def __check_ncfdtrinc(self):
cephes.ncfdtrinc(1,0.5,0,1)
def test_nctdtr(self):
assert_equal(cephes.nctdtr(1,0,0),0.5)
def __check_nctdtridf(self):
cephes.nctdtridf(1,0.5,0)
def test_nctdtrinc(self):
cephes.nctdtrinc(1,0,0)
def test_nctdtrit(self):
cephes.nctdtrit(.1,0.2,.5)
def test_ndtr(self):
assert_equal(cephes.ndtr(0), 0.5)
assert_almost_equal(cephes.ndtr(1), 0.84134474606)
def test_ndtri(self):
assert_equal(cephes.ndtri(0.5),0.0)
def test_nrdtrimn(self):
assert_approx_equal(cephes.nrdtrimn(0.5,1,1),1.0)
def test_nrdtrisd(self):
assert_equal(cephes.nrdtrisd(0.5,0.5,0.5),0.0)
def test_obl_ang1(self):
cephes.obl_ang1(1,1,1,0)
def test_obl_ang1_cv(self):
result = cephes.obl_ang1_cv(1,1,1,1,0)
assert_almost_equal(result[0],1.0)
assert_almost_equal(result[1],0.0)
def _check_obl_cv(self):
assert_equal(cephes.obl_cv(1,1,0),2.0)
def test_obl_rad1(self):
cephes.obl_rad1(1,1,1,0)
def test_obl_rad1_cv(self):
cephes.obl_rad1_cv(1,1,1,1,0)
def test_obl_rad2(self):
cephes.obl_rad2(1,1,1,0)
def test_obl_rad2_cv(self):
cephes.obl_rad2_cv(1,1,1,1,0)
def test_pbdv(self):
assert_equal(cephes.pbdv(1,0),(0.0,1.0))
def test_pbvv(self):
cephes.pbvv(1,0)
def test_pbwa(self):
cephes.pbwa(1,0)
def test_pdtr(self):
cephes.pdtr(0,1)
def test_pdtrc(self):
cephes.pdtrc(0,1)
def test_pdtri(self):
cephes.pdtri(0.5,0.5)
def test_pdtrik(self):
cephes.pdtrik(0.5,1)
def test_pro_ang1(self):
cephes.pro_ang1(1,1,1,0)
def test_pro_ang1_cv(self):
assert_array_almost_equal(cephes.pro_ang1_cv(1,1,1,1,0),
array((1.0,0.0)))
def _check_pro_cv(self):
assert_equal(cephes.pro_cv(1,1,0),2.0)
def test_pro_rad1(self):
cephes.pro_rad1(1,1,1,0.1)
def test_pro_rad1_cv(self):
cephes.pro_rad1_cv(1,1,1,1,0)
def test_pro_rad2(self):
cephes.pro_rad2(1,1,1,0)
def test_pro_rad2_cv(self):
cephes.pro_rad2_cv(1,1,1,1,0)
def test_psi(self):
cephes.psi(1)
def test_radian(self):
assert_equal(cephes.radian(0,0,0),0)
def test_rgamma(self):
assert_equal(cephes.rgamma(1),1.0)
def test_round(self):
assert_equal(cephes.round(3.4),3.0)
assert_equal(cephes.round(-3.4),-3.0)
assert_equal(cephes.round(3.6),4.0)
assert_equal(cephes.round(-3.6),-4.0)
assert_equal(cephes.round(3.5),4.0)
assert_equal(cephes.round(-3.5),-4.0)
def test_shichi(self):
cephes.shichi(1)
def test_sici(self):
cephes.sici(1)
def test_sindg(self):
assert_equal(cephes.sindg(90),1.0)
def test_smirnov(self):
assert_equal(cephes.smirnov(1,.1),0.9)
def test_smirnovi(self):
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.4)),0.4)
assert_almost_equal(cephes.smirnov(1,cephes.smirnovi(1,0.6)),0.6)
def test_spence(self):
assert_equal(cephes.spence(1),0.0)
def test_stdtr(self):
assert_equal(cephes.stdtr(1,0),0.5)
def test_stdtridf(self):
cephes.stdtridf(0.7,1)
def test_stdtrit(self):
cephes.stdtrit(1,0.7)
def test_struve(self):
assert_equal(cephes.struve(0,0),0.0)
def test_tandg(self):
assert_equal(cephes.tandg(45),1.0)
def test_tklmbda(self):
assert_almost_equal(cephes.tklmbda(1,1),1.0)
def test_y0(self):
cephes.y0(1)
def test_y1(self):
cephes.y1(1)
def test_yn(self):
cephes.yn(1,1)
def test_yv(self):
cephes.yv(1,1)
def _check_yve(self):
cephes.yve(1,1)
def test_zeta(self):
cephes.zeta(2,2)
def test_zetac(self):
assert_equal(cephes.zetac(0),-1.5)
def test_wofz(self):
cephes.wofz(0)
class TestAiry(TestCase):
def test_airy(self):
#This tests the airy function to ensure 8 place accuracy in computation
x = airy(.99)
assert_array_almost_equal(x,array([0.13689066,-0.16050153,1.19815925,0.92046818]),8)
x = airy(.41)
assert_array_almost_equal(x,array([0.25238916,-.23480512,0.80686202,0.51053919]),8)
x = airy(-.36)
assert_array_almost_equal(x,array([0.44508477,-0.23186773,0.44939534,0.48105354]),8)
def test_airye(self):
a = airye(0.01)
b = airy(0.01)
b1 = [None]*4
for n in range(2):
b1[n] = b[n]*exp(2.0/3.0*0.01*sqrt(0.01))
for n in range(2,4):
b1[n] = b[n]*exp(-abs(real(2.0/3.0*0.01*sqrt(0.01))))
assert_array_almost_equal(a,b1,6)
def test_bi_zeros(self):
bi = bi_zeros(2)
bia = (array([-1.17371322, -3.2710930]),
array([-2.29443968, -4.07315509]),
array([-0.45494438, 0.39652284]),
array([ 0.60195789 , -0.76031014]))
assert_array_almost_equal(bi,bia,4)
def test_ai_zeros(self):
ai = ai_zeros(1)
assert_array_almost_equal(ai,(array([-2.33810741]),
array([-1.01879297]),
array([ 0.5357]),
array([ 0.7012])),4)
class TestAssocLaguerre(TestCase):
def test_assoc_laguerre(self):
a1 = genlaguerre(11,1)
a2 = assoc_laguerre(.2,11,1)
assert_array_almost_equal(a2,a1(.2),8)
a2 = assoc_laguerre(1,11,1)
assert_array_almost_equal(a2,a1(1),8)
class TestBesselpoly(TestCase):
def test_besselpoly(self):
pass
class TestKelvin(TestCase):
def test_bei(self):
mbei = bei(2)
assert_almost_equal(mbei, 0.9722916273066613,5)#this may not be exact
def test_beip(self):
mbeip = beip(2)
assert_almost_equal(mbeip,0.91701361338403631,5)#this may not be exact
def test_ber(self):
mber = ber(2)
assert_almost_equal(mber,0.75173418271380821,5)#this may not be exact
def test_berp(self):
mberp = berp(2)
assert_almost_equal(mberp,-0.49306712470943909,5)#this may not be exact
def test_bei_zeros(self):
bi = bi_zeros(5)
assert_array_almost_equal(bi[0],array([-1.173713222709127,
-3.271093302836352,
-4.830737841662016,
-6.169852128310251,
-7.376762079367764]),11)
assert_array_almost_equal(bi[1],array([-2.294439682614122,
-4.073155089071828,
-5.512395729663599,
-6.781294445990305,
-7.940178689168587]),10)
assert_array_almost_equal(bi[2],array([-0.454944383639657,
0.396522836094465,
-0.367969161486959,
0.349499116831805,
-0.336026240133662]),11)
assert_array_almost_equal(bi[3],array([0.601957887976239,
-0.760310141492801,
0.836991012619261,
-0.88947990142654,
0.929983638568022]),11)
def test_beip_zeros(self):
bip = beip_zeros(5)
assert_array_almost_equal(bip,array([ 3.772673304934953,
8.280987849760042,
12.742147523633703,
17.193431752512542,
21.641143941167325]),4)
def test_ber_zeros(self):
ber = ber_zeros(5)
assert_array_almost_equal(ber,array([2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
def test_berp_zeros(self):
brp = berp_zeros(5)
assert_array_almost_equal(brp,array([6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
def test_kelvin(self):
mkelv = kelvin(2)
assert_array_almost_equal(mkelv,(ber(2)+bei(2)*1j,
ker(2)+kei(2)*1j,
berp(2)+beip(2)*1j,
kerp(2)+keip(2)*1j),8)
def test_kei(self):
mkei = kei(2)
assert_almost_equal(mkei,-0.20240006776470432,5)
def test_keip(self):
mkeip = keip(2)
assert_almost_equal(mkeip,0.21980790991960536,5)
def test_ker(self):
mker = ker(2)
assert_almost_equal(mker,-0.041664513991509472,5)
def test_kerp(self):
mkerp = kerp(2)
assert_almost_equal(mkerp,-0.10660096588105264,5)
def test_kei_zeros(self):
kei = kei_zeros(5)
assert_array_almost_equal(kei,array([ 3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
def test_keip_zeros(self):
keip = keip_zeros(5)
assert_array_almost_equal(keip,array([ 4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
# numbers come from 9.9 of A&S pg. 381
def test_kelvin_zeros(self):
tmp = kelvin_zeros(5)
berz,beiz,kerz,keiz,berpz,beipz,kerpz,keipz = tmp
assert_array_almost_equal(berz,array([ 2.84892,
7.23883,
11.67396,
16.11356,
20.55463]),4)
assert_array_almost_equal(beiz,array([ 5.02622,
9.45541,
13.89349,
18.33398,
22.77544]),4)
assert_array_almost_equal(kerz,array([ 1.71854,
6.12728,
10.56294,
15.00269,
19.44382]),4)
assert_array_almost_equal(keiz,array([ 3.91467,
8.34422,
12.78256,
17.22314,
21.66464]),4)
assert_array_almost_equal(berpz,array([ 6.03871,
10.51364,
14.96844,
19.41758,
23.86430]),4)
assert_array_almost_equal(beipz,array([ 3.77267,
# table from 1927 had 3.77320
# but this is more accurate
8.28099,
12.74215,
17.19343,
21.64114]),4)
assert_array_almost_equal(kerpz,array([ 2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
assert_array_almost_equal(keipz,array([ 4.93181,
9.40405,
13.85827,
18.30717,
22.75379]),4)
def test_ker_zeros(self):
ker = ker_zeros(5)
assert_array_almost_equal(ker,array([ 1.71854,
6.12728,
10.56294,
15.00269,
19.44381]),4)
def test_kerp_zeros(self):
kerp = kerp_zeros(5)
assert_array_almost_equal(kerp,array([ 2.66584,
7.17212,
11.63218,
16.08312,
20.53068]),4)
class TestBernoulli(TestCase):
def test_bernoulli(self):
brn = bernoulli(5)
assert_array_almost_equal(brn,array([1.0000,
-0.5000,
0.1667,
0.0000,
-0.0333,
0.0000]),4)
class TestBeta(TestCase):
def test_beta(self):
bet = beta(2,4)
betg = (gamma(2)*gamma(4))/gamma(6)
assert_almost_equal(bet,betg,8)
def test_betaln(self):
betln = betaln(2,4)
bet = log(abs(beta(2,4)))
assert_almost_equal(betln,bet,8)
def test_betainc(self):
btinc = betainc(1,1,.2)
assert_almost_equal(btinc,0.2,8)
def test_betaincinv(self):
y = betaincinv(2,4,.5)
comp = betainc(2,4,y)
assert_almost_equal(comp,.5,5)
class TestCheby(TestCase):
def test_chebyc(self):
C0 = chebyc(0)
C1 = chebyc(1)
C2 = chebyc(2)
C3 = chebyc(3)
C4 = chebyc(4)
C5 = chebyc(5)
assert_array_almost_equal(C0.c,[2],13)
assert_array_almost_equal(C1.c,[1,0],13)
assert_array_almost_equal(C2.c,[1,0,-2],13)
assert_array_almost_equal(C3.c,[1,0,-3,0],13)
assert_array_almost_equal(C4.c,[1,0,-4,0,2],13)
assert_array_almost_equal(C5.c,[1,0,-5,0,5,0],13)
def test_chebys(self):
S0 = chebys(0)
S1 = chebys(1)
S2 = chebys(2)
S3 = chebys(3)
S4 = chebys(4)
S5 = chebys(5)
assert_array_almost_equal(S0.c,[1],13)
assert_array_almost_equal(S1.c,[1,0],13)
assert_array_almost_equal(S2.c,[1,0,-1],13)
assert_array_almost_equal(S3.c,[1,0,-2,0],13)
assert_array_almost_equal(S4.c,[1,0,-3,0,1],13)
assert_array_almost_equal(S5.c,[1,0,-4,0,3,0],13)
def test_chebyt(self):
T0 = chebyt(0)
T1 = chebyt(1)
T2 = chebyt(2)
T3 = chebyt(3)
T4 = chebyt(4)
T5 = chebyt(5)
assert_array_almost_equal(T0.c,[1],13)
assert_array_almost_equal(T1.c,[1,0],13)
assert_array_almost_equal(T2.c,[2,0,-1],13)
assert_array_almost_equal(T3.c,[4,0,-3,0],13)
assert_array_almost_equal(T4.c,[8,0,-8,0,1],13)
assert_array_almost_equal(T5.c,[16,0,-20,0,5,0],13)
def test_chebyu(self):
U0 = chebyu(0)
U1 = chebyu(1)
U2 = chebyu(2)
U3 = chebyu(3)
U4 = chebyu(4)
U5 = chebyu(5)
assert_array_almost_equal(U0.c,[1],13)
assert_array_almost_equal(U1.c,[2,0],13)
assert_array_almost_equal(U2.c,[4,0,-1],13)
assert_array_almost_equal(U3.c,[8,0,-4,0],13)
assert_array_almost_equal(U4.c,[16,0,-12,0,1],13)
assert_array_almost_equal(U5.c,[32,0,-32,0,6,0],13)
class TestTrigonometric(TestCase):
def test_cbrt(self):
cb = cbrt(27)
cbrl = 27**(1.0/3.0)
assert_approx_equal(cb,cbrl)
def test_cbrtmore(self):
cb1 = cbrt(27.9)
cbrl1 = 27.9**(1.0/3.0)
assert_almost_equal(cb1,cbrl1,8)
def test_cosdg(self):
cdg = cosdg(90)
cdgrl = cos(pi/2.0)
assert_almost_equal(cdg,cdgrl,8)
def test_cosdgmore(self):
cdgm = cosdg(30)
cdgmrl = cos(pi/6.0)
assert_almost_equal(cdgm,cdgmrl,8)
def test_cosm1(self):
cs = (cosm1(0),cosm1(.3),cosm1(pi/10))
csrl = (cos(0)-1,cos(.3)-1,cos(pi/10)-1)
assert_array_almost_equal(cs,csrl,8)
def test_cotdg(self):
ct = cotdg(30)
ctrl = tan(pi/6.0)**(-1)
assert_almost_equal(ct,ctrl,8)
def test_cotdgmore(self):
ct1 = cotdg(45)
ctrl1 = tan(pi/4.0)**(-1)
assert_almost_equal(ct1,ctrl1,8)
def test_specialpoints(self):
assert_almost_equal(cotdg(45), 1.0, 14)
assert_almost_equal(cotdg(-45), -1.0, 14)
assert_almost_equal(cotdg(90), 0.0, 14)
assert_almost_equal(cotdg(-90), 0.0, 14)
assert_almost_equal(cotdg(135), -1.0, 14)
assert_almost_equal(cotdg(-135), 1.0, 14)
assert_almost_equal(cotdg(225), 1.0, 14)
assert_almost_equal(cotdg(-225), -1.0, 14)
assert_almost_equal(cotdg(270), 0.0, 14)
assert_almost_equal(cotdg(-270), 0.0, 14)
assert_almost_equal(cotdg(315), -1.0, 14)
assert_almost_equal(cotdg(-315), 1.0, 14)
assert_almost_equal(cotdg(765), 1.0, 14)
def test_sinc(self):
c = arange(-2,2,.1)
y = sinc(c)
yre = sin(pi*c)/(pi*c)
yre[20] = 1.0
assert_array_almost_equal(y, yre, 4)
def test_0(self):
x = 0.0
assert_equal(sinc(x),1.0)
def test_sindg(self):
sn = sindg(90)
assert_equal(sn,1.0)
def test_sindgmore(self):
snm = sindg(30)
snmrl = sin(pi/6.0)
assert_almost_equal(snm,snmrl,8)
snm1 = sindg(45)
snmrl1 = sin(pi/4.0)
assert_almost_equal(snm1,snmrl1,8)
class TestTandg(TestCase):
def test_tandg(self):
tn = tandg(30)
tnrl = tan(pi/6.0)
assert_almost_equal(tn,tnrl,8)
def test_tandgmore(self):
tnm = tandg(45)
tnmrl = tan(pi/4.0)
assert_almost_equal(tnm,tnmrl,8)
tnm1 = tandg(60)
tnmrl1 = tan(pi/3.0)
assert_almost_equal(tnm1,tnmrl1,8)
def test_specialpoints(self):
assert_almost_equal(tandg(0), 0.0, 14)
assert_almost_equal(tandg(45), 1.0, 14)
assert_almost_equal(tandg(-45), -1.0, 14)
assert_almost_equal(tandg(135), -1.0, 14)
assert_almost_equal(tandg(-135), 1.0, 14)
assert_almost_equal(tandg(180), 0.0, 14)
assert_almost_equal(tandg(-180), 0.0, 14)
assert_almost_equal(tandg(225), 1.0, 14)
assert_almost_equal(tandg(-225), -1.0, 14)
assert_almost_equal(tandg(315), -1.0, 14)
assert_almost_equal(tandg(-315), 1.0, 14)
class TestEllip(TestCase):
def test_ellipj_nan(self):
"""Regression test for #885."""
ellipj(0.5, np.nan)
def test_ellipj(self):
el = ellipj(0.2,0)
rel = [sin(0.2),cos(0.2),1.0,0.20]
assert_array_almost_equal(el,rel,13)
def test_ellipk(self):
elk = ellipk(.2)
assert_almost_equal(elk,1.659623598610528,11)
def test_ellipkinc(self):
elkinc = ellipkinc(pi/2,.2)
elk = ellipk(0.2)
assert_almost_equal(elkinc,elk,15)
alpha = 20*pi/180
phi = 45*pi/180
m = sin(alpha)**2
elkinc = ellipkinc(phi,m)
assert_almost_equal(elkinc,0.79398143,8)
# From pg. 614 of A & S
def test_ellipe(self):
ele = ellipe(.2)
assert_almost_equal(ele,1.4890350580958529,8)
def test_ellipeinc(self):
eleinc = ellipeinc(pi/2,.2)
ele = ellipe(0.2)
assert_almost_equal(eleinc,ele,14)
# pg 617 of A & S
alpha, phi = 52*pi/180,35*pi/180
m = sin(alpha)**2
eleinc = ellipeinc(phi,m)
assert_almost_equal(eleinc, 0.58823065, 8)
class TestErf(TestCase):
def test_erf(self):
er = erf(.25)
assert_almost_equal(er,0.2763263902,8)
def test_erf_zeros(self):
erz = erf_zeros(5)
erzr= array([1.45061616+1.88094300j,
2.24465928+2.61657514j,
2.83974105+3.17562810j,
3.33546074+3.64617438j,
3.76900557+4.06069723j])
assert_array_almost_equal(erz,erzr,4)
def test_erfcinv(self):
i = erfcinv(1)
assert_equal(i,0)
def test_erfinv(self):
i = erfinv(0)
assert_equal(i,0)
def test_errprint(self):
a = errprint()
b = 1-a #a is the state 1-a inverts state
c = errprint(b) #returns last state 'a'
assert_equal(a,c)
d = errprint(a) #returns to original state
assert_equal(d,b) #makes sure state was returned
#assert_equal(d,1-a)
class TestEuler(TestCase):
def test_euler(self):
eu0 = euler(0)
eu1 = euler(1)
eu2 = euler(2) # just checking segfaults
assert_almost_equal(eu0[0],1,8)
assert_almost_equal(eu2[2],-1,8)
eu24 = euler(24)
mathworld = [1,1,5,61,1385,50521,2702765,199360981,
19391512145l,2404879675441l,
370371188237525l,69348874393137901l,
15514534163557086905l]
correct = zeros((25,),'d')
for k in range(0,13):
if (k % 2):
correct[2*k] = -float(mathworld[k])
else:
correct[2*k] = float(mathworld[k])
err = nan_to_num((eu24-correct)/correct)
errmax = max(err)
assert_almost_equal(errmax, 0.0, 14)
class TestExp(TestCase):
def test_exp2(self):
ex = exp2(2)
exrl = 2**2
assert_equal(ex,exrl)
def test_exp2more(self):
exm = exp2(2.5)
exmrl = 2**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_exp10(self):
ex = exp10(2)
exrl = 10**2
assert_approx_equal(ex,exrl)
def test_exp10more(self):
exm = exp10(2.5)
exmrl = 10**(2.5)
assert_almost_equal(exm,exmrl,8)
def test_expm1(self):
ex = (expm1(2),expm1(3),expm1(4))
exrl = (exp(2)-1,exp(3)-1,exp(4)-1)
assert_array_almost_equal(ex,exrl,8)
def test_expm1more(self):
ex1 = (expm1(2),expm1(2.1),expm1(2.2))
exrl1 = (exp(2)-1,exp(2.1)-1,exp(2.2)-1)
assert_array_almost_equal(ex1,exrl1,8)
class TestFresnel(TestCase):
def test_fresnel(self):
frs = array(fresnel(.5))
assert_array_almost_equal(frs,array([0.064732432859999287, 0.49234422587144644]),8)
# values from pg 329 Table 7.11 of A & S
# slightly corrected in 4th decimal place
def test_fresnel_zeros(self):
szo, czo = fresnel_zeros(5)
assert_array_almost_equal(szo,
array([ 2.0093+0.2885j,
2.8335+0.2443j,
3.4675+0.2185j,
4.0026+0.2009j,
4.4742+0.1877j]),3)
assert_array_almost_equal(czo,
array([ 1.7437+0.3057j,
2.6515+0.2529j,
3.3204+0.2240j,
3.8757+0.2047j,
4.3611+0.1907j]),3)
vals1 = fresnel(szo)[0]
vals2 = fresnel(czo)[1]
assert_array_almost_equal(vals1,0,14)
assert_array_almost_equal(vals2,0,14)
def test_fresnelc_zeros(self):
szo, czo = fresnel_zeros(6)
frc = fresnelc_zeros(6)
assert_array_almost_equal(frc,czo,12)
def test_fresnels_zeros(self):
szo, czo = fresnel_zeros(5)
frs = fresnels_zeros(5)
assert_array_almost_equal(frs,szo,12)
class TestGamma(TestCase):
def test_gamma(self):
gam = gamma(5)
assert_equal(gam,24.0)
def test_gammaln(self):
gamln = gammaln(3)
lngam = log(gamma(3))
assert_almost_equal(gamln,lngam,8)
def test_gammainc(self):
gama = gammainc(.5,.5)
assert_almost_equal(gama,.7,1)
def test_gammaincc(self):
gicc = gammaincc(.5,.5)
greal = 1 - gammainc(.5,.5)
assert_almost_equal(gicc,greal,8)
def test_gammainccinv(self):
gccinv = gammainccinv(.5,.5)
gcinv = gammaincinv(.5,.5)
assert_almost_equal(gccinv,gcinv,8)
def test_gammaincinv(self):
y = gammaincinv(.4,.4)
x = gammainc(.4,y)
assert_almost_equal(x,0.4,1)
y = gammainc(10, 0.05)
x = gammaincinv(10, 2.5715803516000736e-20)
assert_almost_equal(0.05, x, decimal=10)
assert_almost_equal(y, 2.5715803516000736e-20, decimal=10)
x = gammaincinv(50, 8.20754777388471303050299243573393e-18)
assert_almost_equal(11.0, x, decimal=10)
def test_rgamma(self):
rgam = rgamma(8)
rlgam = 1/gamma(8)
assert_almost_equal(rgam,rlgam,8)
class TestHankel(TestCase):
def test_negv(self):
assert_almost_equal(hankel1(-3,2), -hankel1(3,2), 14)
def test_hankel1(self):
hank1 = hankel1(1,.1)
hankrl = (jv(1,.1)+yv(1,.1)*1j)
assert_almost_equal(hank1,hankrl,8)
def test_negv(self):
assert_almost_equal(hankel1e(-3,2), -hankel1e(3,2), 14)
def test_hankel1e(self):
hank1e = hankel1e(1,.1)
hankrle = hankel1(1,.1)*exp(-.1j)
assert_almost_equal(hank1e,hankrle,8)
def test_negv(self):
assert_almost_equal(hankel2(-3,2), -hankel2(3,2), 14)
def test_hankel2(self):
hank2 = hankel2(1,.1)
hankrl2 = (jv(1,.1)-yv(1,.1)*1j)
assert_almost_equal(hank2,hankrl2,8)
def test_negv(self):
assert_almost_equal(hankel2e(-3,2), -hankel2e(3,2), 14)
def test_hankl2e(self):
hank2e = hankel2e(1,.1)
hankrl2e = hankel2e(1,.1)
assert_almost_equal(hank2e,hankrl2e,8)
class TestHermite(TestCase):
def test_hermite(self):
H0 = hermite(0)
H1 = hermite(1)
H2 = hermite(2)
H3 = hermite(3)
H4 = hermite(4)
H5 = hermite(5)
assert_array_almost_equal(H0.c,[1],13)
assert_array_almost_equal(H1.c,[2,0],13)
assert_array_almost_equal(H2.c,[4,0,-2],13)
assert_array_almost_equal(H3.c,[8,0,-12,0],13)
assert_array_almost_equal(H4.c,[16,0,-48,0,12],12)
assert_array_almost_equal(H5.c,[32,0,-160,0,120,0],12)
def test_hermitenorm(self):
# He_n(x) = 2**(-n/2) H_n(x/sqrt(2))
psub = poly1d([1.0/sqrt(2),0])
H0 = hermitenorm(0)
H1 = hermitenorm(1)
H2 = hermitenorm(2)
H3 = hermitenorm(3)
H4 = hermitenorm(4)
H5 = hermitenorm(5)
he0 = hermite(0)(psub)
he1 = hermite(1)(psub) / sqrt(2)
he2 = hermite(2)(psub) / 2.0
he3 = hermite(3)(psub) / (2*sqrt(2))
he4 = hermite(4)(psub) / 4.0
he5 = hermite(5)(psub) / (4.0*sqrt(2))
assert_array_almost_equal(H0.c,he0.c,13)
assert_array_almost_equal(H1.c,he1.c,13)
assert_array_almost_equal(H2.c,he2.c,13)
assert_array_almost_equal(H3.c,he3.c,13)
assert_array_almost_equal(H4.c,he4.c,13)
assert_array_almost_equal(H5.c,he5.c,13)
_gam = cephes.gamma
class TestGegenbauer(TestCase):
def test_gegenbauer(self):
a = 5*rand()-0.5
if any(a==0): a = -0.2
Ca0 = gegenbauer(0,a)
Ca1 = gegenbauer(1,a)
Ca2 = gegenbauer(2,a)
Ca3 = gegenbauer(3,a)
Ca4 = gegenbauer(4,a)
Ca5 = gegenbauer(5,a)
assert_array_almost_equal(Ca0.c,array([1]),13)
assert_array_almost_equal(Ca1.c,array([2*a,0]),13)
assert_array_almost_equal(Ca2.c,array([2*a*(a+1),0,-a]),13)
assert_array_almost_equal(Ca3.c,array([4*poch(a,3),0,-6*a*(a+1),
0])/3.0,11)
assert_array_almost_equal(Ca4.c,array([4*poch(a,4),0,-12*poch(a,3),
0,3*a*(a+1)])/6.0,11)
assert_array_almost_equal(Ca5.c,array([4*poch(a,5),0,-20*poch(a,4),
0,15*poch(a,3),0])/15.0,11)
class TestHyper(TestCase):
def test_h1vp(self):
h1 = h1vp(1,.1)
h1real = (jvp(1,.1)+yvp(1,.1)*1j)
assert_almost_equal(h1,h1real,8)
def test_h2vp(self):
h2 = h2vp(1,.1)
h2real = (jvp(1,.1)-yvp(1,.1)*1j)
assert_almost_equal(h2,h2real,8)
def test_hyp0f1(self):
pass
def test_hyp1f1(self):
hyp1 = hyp1f1(.1,.1,.3)
assert_almost_equal(hyp1, 1.3498588075760032,7)
# test contributed by Moritz Deger (2008-05-29)
# http://projects.scipy.org/scipy/scipy/ticket/659
# reference data obtained from mathematica [ a, b, x, m(a,b,x)]:
# produced with test_hyp1f1.nb
ref_data = array([[ -8.38132975e+00, -1.28436461e+01, -2.91081397e+01, 1.04178330e+04],
[ 2.91076882e+00, -6.35234333e+00, -1.27083993e+01, 6.68132725e+00],
[ -1.42938258e+01, 1.80869131e-01, 1.90038728e+01, 1.01385897e+05],
[ 5.84069088e+00, 1.33187908e+01, 2.91290106e+01, 1.59469411e+08],
[ -2.70433202e+01, -1.16274873e+01, -2.89582384e+01, 1.39900152e+24],
[ 4.26344966e+00, -2.32701773e+01, 1.91635759e+01, 6.13816915e+21],
[ 1.20514340e+01, -3.40260240e+00, 7.26832235e+00, 1.17696112e+13],
[ 2.77372955e+01, -1.99424687e+00, 3.61332246e+00, 3.07419615e+13],
[ 1.50310939e+01, -2.91198675e+01, -1.53581080e+01, -3.79166033e+02],
[ 1.43995827e+01, 9.84311196e+00, 1.93204553e+01, 2.55836264e+10],
[ -4.08759686e+00, 1.34437025e+01, -1.42072843e+01, 1.70778449e+01],
[ 8.05595738e+00, -1.31019838e+01, 1.52180721e+01, 3.06233294e+21],
[ 1.81815804e+01, -1.42908793e+01, 9.57868793e+00, -2.84771348e+20],
[ -2.49671396e+01, 1.25082843e+01, -1.71562286e+01, 2.36290426e+07],
[ 2.67277673e+01, 1.70315414e+01, 6.12701450e+00, 7.77917232e+03],
[ 2.49565476e+01, 2.91694684e+01, 6.29622660e+00, 2.35300027e+02],
[ 6.11924542e+00, -1.59943768e+00, 9.57009289e+00, 1.32906326e+11],
[ -1.47863653e+01, 2.41691301e+01, -1.89981821e+01, 2.73064953e+03],
[ 2.24070483e+01, -2.93647433e+00, 8.19281432e+00, -6.42000372e+17],
[ 8.04042600e-01, 1.82710085e+01, -1.97814534e+01, 5.48372441e-01],
[ 1.39590390e+01, 1.97318686e+01, 2.37606635e+00, 5.51923681e+00],
[ -4.66640483e+00, -2.00237930e+01, 7.40365095e+00, 4.50310752e+00],
[ 2.76821999e+01, -6.36563968e+00, 1.11533984e+01, -9.28725179e+23],
[ -2.56764457e+01, 1.24544906e+00, 1.06407572e+01, 1.25922076e+01],
[ 3.20447808e+00, 1.30874383e+01, 2.26098014e+01, 2.03202059e+04],
[ -1.24809647e+01, 4.15137113e+00, -2.92265700e+01, 2.39621411e+08],
[ 2.14778108e+01, -2.35162960e+00, -1.13758664e+01, 4.46882152e-01],
[ -9.85469168e+00, -3.28157680e+00, 1.67447548e+01, -1.07342390e+07],
[ 1.08122310e+01, -2.47353236e+01, -1.15622349e+01, -2.91733796e+03],
[ -2.67933347e+01, -3.39100709e+00, 2.56006986e+01, -5.29275382e+09],
[ -8.60066776e+00, -8.02200924e+00, 1.07231926e+01, 1.33548320e+06],
[ -1.01724238e-01, -1.18479709e+01, -2.55407104e+01, 1.55436570e+00],
[ -3.93356771e+00, 2.11106818e+01, -2.57598485e+01, 2.13467840e+01],
[ 3.74750503e+00, 1.55687633e+01, -2.92841720e+01, 1.43873509e-02],
[ 6.99726781e+00, 2.69855571e+01, -1.63707771e+01, 3.08098673e-02],
[ -2.31996011e+01, 3.47631054e+00, 9.75119815e-01, 1.79971073e-02],
[ 2.38951044e+01, -2.91460190e+01, -2.50774708e+00, 9.56934814e+00],
[ 1.52730825e+01, 5.77062507e+00, 1.21922003e+01, 1.32345307e+09],
[ 1.74673917e+01, 1.89723426e+01, 4.94903250e+00, 9.90859484e+01],
[ 1.88971241e+01, 2.86255413e+01, 5.52360109e-01, 1.44165360e+00],
[ 1.02002319e+01, -1.66855152e+01, -2.55426235e+01, 6.56481554e+02],
[ -1.79474153e+01, 1.22210200e+01, -1.84058212e+01, 8.24041812e+05],
[ -1.36147103e+01, 1.32365492e+00, -7.22375200e+00, 9.92446491e+05],
[ 7.57407832e+00, 2.59738234e+01, -1.34139168e+01, 3.64037761e-02],
[ 2.21110169e+00, 1.28012666e+01, 1.62529102e+01, 1.33433085e+02],
[ -2.64297569e+01, -1.63176658e+01, -1.11642006e+01, -2.44797251e+13],
[ -2.46622944e+01, -3.02147372e+00, 8.29159315e+00, -3.21799070e+05],
[ -1.37215095e+01, -1.96680183e+01, 2.91940118e+01, 3.21457520e+12],
[ -5.45566105e+00, 2.81292086e+01, 1.72548215e-01, 9.66973000e-01],
[ -1.55751298e+00, -8.65703373e+00, 2.68622026e+01, -3.17190834e+16],
[ 2.45393609e+01, -2.70571903e+01, 1.96815505e+01, 1.80708004e+37],
[ 5.77482829e+00, 1.53203143e+01, 2.50534322e+01, 1.14304242e+06],
[ -1.02626819e+01, 2.36887658e+01, -2.32152102e+01, 7.28965646e+02],
[ -1.30833446e+00, -1.28310210e+01, 1.87275544e+01, -9.33487904e+12],
[ 5.83024676e+00, -1.49279672e+01, 2.44957538e+01, -7.61083070e+27],
[ -2.03130747e+01, 2.59641715e+01, -2.06174328e+01, 4.54744859e+04],
[ 1.97684551e+01, -2.21410519e+01, -2.26728740e+01, 3.53113026e+06],
[ 2.73673444e+01, 2.64491725e+01, 1.57599882e+01, 1.07385118e+07],
[ 5.73287971e+00, 1.21111904e+01, 1.33080171e+01, 2.63220467e+03],
[ -2.82751072e+01, 2.08605881e+01, 9.09838900e+00, -6.60957033e-07],
[ 1.87270691e+01, -1.74437016e+01, 1.52413599e+01, 6.59572851e+27],
[ 6.60681457e+00, -2.69449855e+00, 9.78972047e+00, -2.38587870e+12],
[ 1.20895561e+01, -2.51355765e+01, 2.30096101e+01, 7.58739886e+32],
[ -2.44682278e+01, 2.10673441e+01, -1.36705538e+01, 4.54213550e+04],
[ -4.50665152e+00, 3.72292059e+00, -4.83403707e+00, 2.68938214e+01],
[ -7.46540049e+00, -1.08422222e+01, -1.72203805e+01, -2.09402162e+02],
[ -2.00307551e+01, -7.50604431e+00, -2.78640020e+01, 4.15985444e+19],
[ 1.99890876e+01, 2.20677419e+01, -2.51301778e+01, 1.23840297e-09],
[ 2.03183823e+01, -7.66942559e+00, 2.10340070e+01, 1.46285095e+31],
[ -2.90315825e+00, -2.55785967e+01, -9.58779316e+00, 2.65714264e-01],
[ 2.73960829e+01, -1.80097203e+01, -2.03070131e+00, 2.52908999e+02],
[ -2.11708058e+01, -2.70304032e+01, 2.48257944e+01, 3.09027527e+08],
[ 2.21959758e+01, 4.00258675e+00, -1.62853977e+01, -9.16280090e-09],
[ 1.61661840e+01, -2.26845150e+01, 2.17226940e+01, -8.24774394e+33],
[ -3.35030306e+00, 1.32670581e+00, 9.39711214e+00, -1.47303163e+01],
[ 7.23720726e+00, -2.29763909e+01, 2.34709682e+01, -9.20711735e+29],
[ 2.71013568e+01, 1.61951087e+01, -7.11388906e-01, 2.98750911e-01],
[ 8.40057933e+00, -7.49665220e+00, 2.95587388e+01, 6.59465635e+29],
[ -1.51603423e+01, 1.94032322e+01, -7.60044357e+00, 1.05186941e+02],
[ -8.83788031e+00, -2.72018313e+01, 1.88269907e+00, 1.81687019e+00],
[ -1.87283712e+01, 5.87479570e+00, -1.91210203e+01, 2.52235612e+08],
[ -5.61338513e-01, 2.69490237e+01, 1.16660111e-01, 9.97567783e-01],
[ -5.44354025e+00, -1.26721408e+01, -4.66831036e+00, 1.06660735e-01],
[ -2.18846497e+00, 2.33299566e+01, 9.62564397e+00, 3.03842061e-01],
[ 6.65661299e+00, -2.39048713e+01, 1.04191807e+01, 4.73700451e+13],
[ -2.57298921e+01, -2.60811296e+01, 2.74398110e+01, -5.32566307e+11],
[ -1.11431826e+01, -1.59420160e+01, -1.84880553e+01, -1.01514747e+02],
[ 6.50301931e+00, 2.59859051e+01, -2.33270137e+01, 1.22760500e-02],
[ -1.94987891e+01, -2.62123262e+01, 3.90323225e+00, 1.71658894e+01],
[ 7.26164601e+00, -1.41469402e+01, 2.81499763e+01, -2.50068329e+31],
[ -1.52424040e+01, 2.99719005e+01, -2.85753678e+01, 1.31906693e+04],
[ 5.24149291e+00, -1.72807223e+01, 2.22129493e+01, 2.50748475e+25],
[ 3.63207230e-01, -9.54120862e-02, -2.83874044e+01, 9.43854939e-01],
[ -2.11326457e+00, -1.25707023e+01, 1.17172130e+00, 1.20812698e+00],
[ 2.48513582e+00, 1.03652647e+01, -1.84625148e+01, 6.47910997e-02],
[ 2.65395942e+01, 2.74794672e+01, 1.29413428e+01, 2.89306132e+05],
[ -9.49445460e+00, 1.59930921e+01, -1.49596331e+01, 3.27574841e+02],
[ -5.89173945e+00, 9.96742426e+00, 2.60318889e+01, -3.15842908e-01],
[ -1.15387239e+01, -2.21433107e+01, -2.17686413e+01, 1.56724718e-01],
[ -5.30592244e+00, -2.42752190e+01, 1.29734035e+00, 1.31985534e+00]])
for a,b,c,expected in ref_data:
result = hyp1f1(a,b,c)
assert(abs(expected - result)/expected < 1e-4)
def test_hyp1f2(self):
pass
def test_hyp2f0(self):
pass
def test_hyp2f1(self):
# a collection of special cases taken from AMS 55
values = [[0.5, 1, 1.5, 0.2**2, 0.5/0.2*log((1+0.2)/(1-0.2))],
[0.5, 1, 1.5, -0.2**2, 1./0.2*arctan(0.2)],
[1, 1, 2, 0.2, -1/0.2*log(1-0.2)],
[3, 3.5, 1.5, 0.2**2,
0.5/0.2/(-5)*((1+0.2)**(-5)-(1-0.2)**(-5))],
[-3, 3, 0.5, sin(0.2)**2, cos(2*3*0.2)],
[3, 4, 8, 1, gamma(8)*gamma(8-4-3)/gamma(8-3)/gamma(8-4)],
[3, 2, 3-2+1, -1, 1./2**3*sqrt(pi)*
gamma(1+3-2)/gamma(1+0.5*3-2)/gamma(0.5+0.5*3)],
[5, 2, 5-2+1, -1, 1./2**5*sqrt(pi)*
gamma(1+5-2)/gamma(1+0.5*5-2)/gamma(0.5+0.5*5)],
[4, 0.5+4, 1.5-2*4, -1./3, (8./9)**(-2*4)*gamma(4./3)*
gamma(1.5-2*4)/gamma(3./2)/gamma(4./3-2*4)],
# and some others
# ticket #424
[1.5, -0.5, 1.0, -10.0, 4.1300097765277476484],
]
for i, (a, b, c, x, v) in enumerate(values):
cv = hyp2f1(a, b, c, x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_hyp3f0(self):
pass
def test_hyperu(self):
val1 = hyperu(1,0.1,100)
assert_almost_equal(val1,0.0098153,7)
a,b = [0.3,0.6,1.2,-2.7],[1.5,3.2,-0.4,-3.2]
a,b = asarray(a), asarray(b)
z = 0.5
hypu = hyperu(a,b,z)
hprl = (pi/sin(pi*b))*(hyp1f1(a,b,z)/ \
(gamma(1+a-b)*gamma(b))- \
z**(1-b)*hyp1f1(1+a-b,2-b,z) \
/(gamma(a)*gamma(2-b)))
assert_array_almost_equal(hypu,hprl,12)
class TestBessel(TestCase):
def test_itj0y0(self):
it0 = array(itj0y0(.2))
assert_array_almost_equal(it0,array([0.19933433254006822, -0.34570883800412566]),8)
def test_it2j0y0(self):
it2 = array(it2j0y0(.2))
assert_array_almost_equal(it2,array([0.0049937546274601858, -0.43423067011231614]),8)
def test_negv(self):
assert_equal(iv(3,2), iv(-3,2))
def test_j0(self):
oz = j0(.1)
ozr = jn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_j1(self):
o1 = j1(.1)
o1r = jn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_jn(self):
jnnr = jn(1,.2)
assert_almost_equal(jnnr,0.099500832639235995,8)
def test_negv(self):
assert_almost_equal(jv(-3,2), -jv(3,2), 14)
def test_jv(self):
values = [[0, 0.1, 0.99750156206604002],
[2./3, 1e-8, 0.3239028506761532e-5],
[2./3, 1e-10, 0.1503423854873779e-6],
[3.1, 1e-10, 0.1711956265409013e-32],
[2./3, 4.0, -0.2325440850267039],
]
for i, (v, x, y) in enumerate(values):
yc = jv(v, x)
assert_almost_equal(yc, y, 8, err_msg='test #%d' % i)
def test_negv(self):
assert_almost_equal(jve(-3,2), -jve(3,2), 14)
def test_jve(self):
jvexp = jve(1,.2)
assert_almost_equal(jvexp,0.099500832639235995,8)
jvexp1 = jve(1,.2+1j)
z = .2+1j
jvexpr = jv(1,z)*exp(-abs(z.imag))
assert_almost_equal(jvexp1,jvexpr,8)
def test_jn_zeros(self):
jn0 = jn_zeros(0,5)
jn1 = jn_zeros(1,5)
assert_array_almost_equal(jn0,array([ 2.4048255577,
5.5200781103,
8.6537279129,
11.7915344391,
14.9309177086]),4)
assert_array_almost_equal(jn1,array([ 3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),4)
jn102 = jn_zeros(102,5)
assert_tol_equal(jn102, array([110.89174935992040343,
117.83464175788308398,
123.70194191713507279,
129.02417238949092824,
134.00114761868422559]), rtol=1e-13)
jn301 = jn_zeros(301,5)
assert_tol_equal(jn301, array([313.59097866698830153,
323.21549776096288280,
331.22338738656748796,
338.39676338872084500,
345.03284233056064157]), rtol=1e-13)
def test_jn_zeros_slow(self):
jn0 = jn_zeros(0, 300)
assert_tol_equal(jn0[260-1], 816.02884495068867280, rtol=1e-13)
assert_tol_equal(jn0[280-1], 878.86068707124422606, rtol=1e-13)
assert_tol_equal(jn0[300-1], 941.69253065317954064, rtol=1e-13)
jn10 = jn_zeros(10, 300)
assert_tol_equal(jn10[260-1], 831.67668514305631151, rtol=1e-13)
assert_tol_equal(jn10[280-1], 894.51275095371316931, rtol=1e-13)
assert_tol_equal(jn10[300-1], 957.34826370866539775, rtol=1e-13)
jn3010 = jn_zeros(3010,5)
assert_tol_equal(jn3010, array([3036.86590780927,
3057.06598526482,
3073.66360690272,
3088.37736494778,
3101.86438139042]), rtol=1e-8)
def test_jnjnp_zeros(self):
pass
#jnjp = jnjnp(3)
#assert_array_almost_equal(jnjp,(array([
#I don't think specfun jdzo is working properly the outputs do not seem to correlate
#to the inputs
def test_jnp_zeros(self):
jnp = jnp_zeros(1,5)
assert_array_almost_equal(jnp, array([ 1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),4)
jnp = jnp_zeros(443,5)
assert_tol_equal(jvp(443, jnp), 0, atol=1e-15)
def test_jnyn_zeros(self):
jnz = jnyn_zeros(1,5)
assert_array_almost_equal(jnz,(array([ 3.83171,
7.01559,
10.17347,
13.32369,
16.47063]),
array([ 1.84118,
5.33144,
8.53632,
11.70600,
14.86359]),
array([ 2.19714,
5.42968,
8.59601,
11.74915,
14.89744]),
array([ 3.68302,
6.94150,
10.12340,
13.28576,
16.44006])),5)
def test_jvp(self):
jvprim = jvp(2,2)
jv0 = (jv(1,2)-jv(3,2))/2
assert_almost_equal(jvprim,jv0,10)
def test_k0(self):
ozk = k0(.1)
ozkr = kv(0,.1)
assert_almost_equal(ozk,ozkr,8)
def test_k0e(self):
ozke = k0e(.1)
ozker = kve(0,.1)
assert_almost_equal(ozke,ozker,8)
def test_k1(self):
o1k = k1(.1)
o1kr = kv(1,.1)
assert_almost_equal(o1k,o1kr,8)
def test_k1e(self):
o1ke = k1e(.1)
o1ker = kve(1,.1)
assert_almost_equal(o1ke,o1ker,8)
def test_jacobi(self):
a = 5*rand() - 1
b = 5*rand() - 1
P0 = jacobi(0,a,b)
P1 = jacobi(1,a,b)
P2 = jacobi(2,a,b)
P3 = jacobi(3,a,b)
assert_array_almost_equal(P0.c,[1],13)
assert_array_almost_equal(P1.c,array([a+b+2,a-b])/2.0,13)
cp = [(a+b+3)*(a+b+4), 4*(a+b+3)*(a+2), 4*(a+1)*(a+2)]
p2c = [cp[0],cp[1]-2*cp[0],cp[2]-cp[1]+cp[0]]
assert_array_almost_equal(P2.c,array(p2c)/8.0,13)
cp = [(a+b+4)*(a+b+5)*(a+b+6),6*(a+b+4)*(a+b+5)*(a+3),
12*(a+b+4)*(a+2)*(a+3),8*(a+1)*(a+2)*(a+3)]
p3c = [cp[0],cp[1]-3*cp[0],cp[2]-2*cp[1]+3*cp[0],cp[3]-cp[2]+cp[1]-cp[0]]
assert_array_almost_equal(P3.c,array(p3c)/48.0,13)
def test_kn(self):
kn1 = kn(0,.2)
assert_almost_equal(kn1,1.7527038555281462,8)
def test_negv(self):
assert_equal(kv(3.0, 2.2), kv(-3.0, 2.2))
def test_kv0(self):
kv0 = kv(0,.2)
assert_almost_equal(kv0, 1.7527038555281462, 10)
def test_kv1(self):
kv1 = kv(1,0.2)
assert_almost_equal(kv1, 4.775972543220472, 10)
def test_kv2(self):
kv2 = kv(2,0.2)
assert_almost_equal(kv2, 49.51242928773287, 10)
def test_negv(self):
assert_equal(kve(3.0, 2.2), kve(-3.0, 2.2))
def test_kve(self):
kve1 = kve(0,.2)
kv1 = kv(0,.2)*exp(.2)
assert_almost_equal(kve1,kv1,8)
z = .2+1j
kve2 = kve(0,z)
kv2 = kv(0,z)*exp(z)
assert_almost_equal(kve2,kv2,8)
def test_kvp_v0n1(self):
z = 2.2
assert_almost_equal(-kv(1,z), kvp(0,z, n=1), 10)
def test_kvp_n1(self):
v = 3.
z = 2.2
xc = -kv(v+1,z) + v/z*kv(v,z)
x = kvp(v,z, n=1)
assert_almost_equal(xc, x, 10) #this function (kvp) is broken
def test_kvp_n2(self):
v = 3.
z = 2.2
xc = (z**2+v**2-v)/z**2 * kv(v,z) + kv(v+1,z)/z
x = kvp(v, z, n=2)
assert_almost_equal(xc, x, 10)
def test_y0(self):
oz = y0(.1)
ozr = yn(0,.1)
assert_almost_equal(oz,ozr,8)
def test_y1(self):
o1 = y1(.1)
o1r = yn(1,.1)
assert_almost_equal(o1,o1r,8)
def test_y0_zeros(self):
yo,ypo = y0_zeros(2)
zo,zpo = y0_zeros(2,complex=1)
all = r_[yo,zo]
allval = r_[ypo,zpo]
assert_array_almost_equal(abs(yv(0.0,all)),0.0,11)
assert_array_almost_equal(abs(yv(1,all)-allval),0.0,11)
def test_y1_zeros(self):
y1 = y1_zeros(1)
assert_array_almost_equal(y1,(array([2.19714]),array([0.52079])),5)
def test_y1p_zeros(self):
y1p = y1p_zeros(1,complex=1)
assert_array_almost_equal(y1p,(array([ 0.5768+0.904j]), array([-0.7635+0.5892j])),3)
def test_yn_zeros(self):
an = yn_zeros(4,2)
assert_array_almost_equal(an,array([ 5.64515, 9.36162]),5)
an = yn_zeros(443,5)
assert_tol_equal(an, [450.13573091578090314, 463.05692376675001542,
472.80651546418663566, 481.27353184725625838,
488.98055964441374646], rtol=1e-15)
def test_ynp_zeros(self):
ao = ynp_zeros(0,2)
assert_array_almost_equal(ao,array([ 2.19714133, 5.42968104]),6)
ao = ynp_zeros(43,5)
assert_tol_equal(yvp(43, ao), 0, atol=1e-15)
ao = ynp_zeros(443,5)
assert_tol_equal(yvp(443, ao), 0, atol=1e-9)
@dec.knownfailureif(True,
"cephes/yv is not eps accurate for large orders on "
"all platforms, and has nan/inf issues")
def test_ynp_zeros_large_order(self):
ao = ynp_zeros(443,5)
assert_tol_equal(yvp(443, ao), 0, atol=1e-15)
def test_yn(self):
yn2n = yn(1,.2)
assert_almost_equal(yn2n,-3.3238249881118471,8)
def test_negv(self):
assert_almost_equal(yv(-3,2), -yv(3,2), 14)
def test_yv(self):
yv2 = yv(1,.2)
assert_almost_equal(yv2,-3.3238249881118471,8)
def test_negv(self):
assert_almost_equal(yve(-3,2), -yve(3,2), 14)
def test_yve(self):
yve2 = yve(1,.2)
assert_almost_equal(yve2,-3.3238249881118471,8)
yve2r = yv(1,.2+1j)*exp(-1)
yve22 = yve(1,.2+1j)
assert_almost_equal(yve22,yve2r,8)
def test_yvp(self):
yvpr = (yv(1,.2) - yv(3,.2))/2.0
yvp1 = yvp(2,.2)
assert_array_almost_equal(yvp1,yvpr,10)
def _cephes_vs_amos_points(self):
"""Yield points at which to compare Cephes implementation to AMOS"""
# check several points, including large-amplitude ones
for v in [-120, -100.3, -20., -10., -1., -.5,
0., 1., 12.49, 120., 301]:
for z in [-1300, -11, -10, -1, 1., 10., 200.5, 401., 600.5,
700.6, 1300, 10003]:
yield v, z
# check half-integers; these are problematic points at least
# for cephes/iv
for v in 0.5 + arange(-60, 60):
yield v, 3.5
def check_cephes_vs_amos(self, f1, f2, rtol=1e-11, atol=0, skip=None):
for v, z in self._cephes_vs_amos_points():
if skip is not None and skip(v, z):
continue
c1, c2, c3 = f1(v, z), f1(v,z+0j), f2(int(v), z)
if np.isinf(c1):
assert np.abs(c2) >= 1e300, (v, z)
elif np.isnan(c1):
assert c2.imag != 0, (v, z)
else:
assert_tol_equal(c1, c2, err_msg=(v, z), rtol=rtol, atol=atol)
if v == int(v):
assert_tol_equal(c3, c2, err_msg=(v, z),
rtol=rtol, atol=atol)
def test_jv_cephes_vs_amos(self):
self.check_cephes_vs_amos(jv, jn, rtol=1e-10, atol=1e-305)
@dec.knownfailureif(True,
"cephes/yv is not eps accurate for large orders on "
"all platforms, and has nan/inf issues")
def test_yv_cephes_vs_amos(self):
self.check_cephes_vs_amos(yv, yn, rtol=1e-11, atol=1e-305)
def test_yv_cephes_vs_amos_only_small_orders(self):
skipper = lambda v, z: (abs(v) > 50)
self.check_cephes_vs_amos(yv, yn, rtol=1e-11, atol=1e-305, skip=skipper)
def test_iv_cephes_vs_amos(self):
self.check_cephes_vs_amos(iv, iv, rtol=1e-12, atol=1e-305)
@dec.slow
def test_iv_cephes_vs_amos_mass_test(self):
N = 1000000
np.random.seed(1)
v = np.random.pareto(0.5, N) * (-1)**np.random.randint(2, size=N)
x = np.random.pareto(0.2, N) * (-1)**np.random.randint(2, size=N)
imsk = (np.random.randint(8, size=N) == 0)
v[imsk] = v.astype(int)
c1 = iv(v, x)
c2 = iv(v, x+0j)
dc = abs(c1/c2 - 1)
dc[np.isnan(dc)] = 0
k = np.argmax(dc)
# Most error apparently comes from AMOS and not our implementation;
# there are some problems near integer orders there
assert dc[k] < 1e-9, (iv(v[k], x[k]), iv(v[k], x[k]+0j))
def test_kv_cephes_vs_amos(self):
#self.check_cephes_vs_amos(kv, kn, rtol=1e-9, atol=1e-305)
self.check_cephes_vs_amos(kv, kv, rtol=1e-9, atol=1e-305)
def test_ticket_623(self):
assert_tol_equal(jv(3, 4), 0.43017147387562193)
assert_tol_equal(jv(301, 1300), 0.0183487151115275)
assert_tol_equal(jv(301, 1296.0682), -0.0224174325312048)
def test_ticket_853(self):
"""Negative-order Bessels"""
# cephes
assert_tol_equal(jv(-1, 1 ), -0.4400505857449335)
assert_tol_equal(jv(-2, 1 ), 0.1149034849319005)
assert_tol_equal(yv(-1, 1 ), 0.7812128213002887)
assert_tol_equal(yv(-2, 1 ), -1.650682606816255)
assert_tol_equal(iv(-1, 1 ), 0.5651591039924851)
assert_tol_equal(iv(-2, 1 ), 0.1357476697670383)
assert_tol_equal(kv(-1, 1 ), 0.6019072301972347)
assert_tol_equal(kv(-2, 1 ), 1.624838898635178)
assert_tol_equal(jv(-0.5, 1 ), 0.43109886801837607952)
assert_tol_equal(yv(-0.5, 1 ), 0.6713967071418031)
assert_tol_equal(iv(-0.5, 1 ), 1.231200214592967)
assert_tol_equal(kv(-0.5, 1 ), 0.4610685044478945)
# amos
assert_tol_equal(jv(-1, 1+0j), -0.4400505857449335)
assert_tol_equal(jv(-2, 1+0j), 0.1149034849319005)
assert_tol_equal(yv(-1, 1+0j), 0.7812128213002887)
assert_tol_equal(yv(-2, 1+0j), -1.650682606816255)
assert_tol_equal(iv(-1, 1+0j), 0.5651591039924851)
assert_tol_equal(iv(-2, 1+0j), 0.1357476697670383)
assert_tol_equal(kv(-1, 1+0j), 0.6019072301972347)
assert_tol_equal(kv(-2, 1+0j), 1.624838898635178)
assert_tol_equal(jv(-0.5, 1+0j), 0.43109886801837607952)
assert_tol_equal(jv(-0.5, 1+1j), 0.2628946385649065-0.827050182040562j)
assert_tol_equal(yv(-0.5, 1+0j), 0.6713967071418031)
assert_tol_equal(yv(-0.5, 1+1j), 0.967901282890131+0.0602046062142816j)
assert_tol_equal(iv(-0.5, 1+0j), 1.231200214592967)
assert_tol_equal(iv(-0.5, 1+1j), 0.77070737376928+0.39891821043561j)
assert_tol_equal(kv(-0.5, 1+0j), 0.4610685044478945)
assert_tol_equal(kv(-0.5, 1+1j), 0.06868578341999-0.38157825981268j)
assert_tol_equal(jve(-0.5,1+0.3j), jv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(yve(-0.5,1+0.3j), yv(-0.5, 1+0.3j)*exp(-0.3))
assert_tol_equal(ive(-0.5,0.3+1j), iv(-0.5, 0.3+1j)*exp(-0.3))
assert_tol_equal(kve(-0.5,0.3+1j), kv(-0.5, 0.3+1j)*exp(0.3+1j))
assert_tol_equal(hankel1(-0.5, 1+1j), jv(-0.5, 1+1j) + 1j*yv(-0.5,1+1j))
assert_tol_equal(hankel2(-0.5, 1+1j), jv(-0.5, 1+1j) - 1j*yv(-0.5,1+1j))
def test_ticket_854(self):
"""Real-valued Bessel domains"""
assert isnan(jv(0.5, -1))
assert isnan(iv(0.5, -1))
assert isnan(yv(0.5, -1))
assert isnan(yv(1, -1))
assert isnan(kv(0.5, -1))
assert isnan(kv(1, -1))
assert isnan(jve(0.5, -1))
assert isnan(ive(0.5, -1))
assert isnan(yve(0.5, -1))
assert isnan(yve(1, -1))
assert isnan(kve(0.5, -1))
assert isnan(kve(1, -1))
assert isnan(airye(-1)[0:2]).all(), airye(-1)
assert not isnan(airye(-1)[2:4]).any(), airye(-1)
def test_ticket_503(self):
"""Real-valued Bessel I overflow"""
assert_tol_equal(iv(1, 700), 1.528500390233901e302)
assert_tol_equal(iv(1000, 1120), 1.301564549405821e301)
def test_iv_hyperg_poles(self):
assert_tol_equal(iv(-0.5, 1), 1.231200214592967)
def iv_series(self, v, z, n=200):
k = arange(0, n).astype(float_)
r = (v+2*k)*log(.5*z) - gammaln(k+1) - gammaln(v+k+1)
r[isnan(r)] = inf
r = exp(r)
err = abs(r).max() * finfo(float_).eps * n + abs(r[-1])*10
return r.sum(), err
def test_i0_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(0, z)
assert_tol_equal(i0(z), value, atol=err, err_msg=z)
def test_i1_series(self):
for z in [1., 10., 200.5]:
value, err = self.iv_series(1, z)
assert_tol_equal(i1(z), value, atol=err, err_msg=z)
def test_iv_series(self):
for v in [-20., -10., -1., 0., 1., 12.49, 120.]:
for z in [1., 10., 200.5, -1+2j]:
value, err = self.iv_series(v, z)
assert_tol_equal(iv(v, z), value, atol=err, err_msg=(v, z))
def test_i0(self):
values = [[0.0, 1.0],
[1e-10, 1.0],
[0.1, 0.9071009258],
[0.5, 0.6450352706],
[1.0, 0.4657596077],
[2.5, 0.2700464416],
[5.0, 0.1835408126],
[20.0, 0.0897803119],
]
for i, (x, v) in enumerate(values):
cv = i0(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i0e(self):
oize = i0e(.1)
oizer = ive(0,.1)
assert_almost_equal(oize,oizer,8)
def test_i1(self):
values = [[0.0, 0.0],
[1e-10, 0.4999999999500000e-10],
[0.1, 0.0452984468],
[0.5, 0.1564208032],
[1.0, 0.2079104154],
[5.0, 0.1639722669],
[20.0, 0.0875062222],
]
for i, (x, v) in enumerate(values):
cv = i1(x) * exp(-x)
assert_almost_equal(cv, v, 8, err_msg='test #%d' % i)
def test_i1e(self):
oi1e = i1e(.1)
oi1er = ive(1,.1)
assert_almost_equal(oi1e,oi1er,8)
def test_iti0k0(self):
iti0 = array(iti0k0(5))
assert_array_almost_equal(iti0,array([31.848667776169801, 1.5673873907283657]),5)
def test_it2i0k0(self):
it2k = it2i0k0(.1)
assert_array_almost_equal(it2k,array([0.0012503906973464409, 3.3309450354686687]),6)
def test_iv(self):
iv1 = iv(0,.1)*exp(-.1)
assert_almost_equal(iv1,0.90710092578230106,10)
def test_negv(self):
assert_equal(ive(3,2), ive(-3,2))
def test_ive(self):
ive1 = ive(0,.1)
iv1 = iv(0,.1)*exp(-.1)
assert_almost_equal(ive1,iv1,10)
def test_ivp0(self):
assert_almost_equal(iv(1,2), ivp(0,2), 10)
def test_ivp(self):
y=(iv(0,2)+iv(2,2))/2
x = ivp(1,2)
assert_almost_equal(x,y,10)
class TestLaguerre(TestCase):
def test_laguerre(self):
lag0 = laguerre(0)
lag1 = laguerre(1)
lag2 = laguerre(2)
lag3 = laguerre(3)
lag4 = laguerre(4)
lag5 = laguerre(5)
assert_array_almost_equal(lag0.c,[1],13)
assert_array_almost_equal(lag1.c,[-1,1],13)
assert_array_almost_equal(lag2.c,array([1,-4,2])/2.0,13)
assert_array_almost_equal(lag3.c,array([-1,9,-18,6])/6.0,13)
assert_array_almost_equal(lag4.c,array([1,-16,72,-96,24])/24.0,13)
assert_array_almost_equal(lag5.c,array([-1,25,-200,600,-600,120])/120.0,13)
def test_genlaguerre(self):
k = 5*rand()-0.9
lag0 = genlaguerre(0,k)
lag1 = genlaguerre(1,k)
lag2 = genlaguerre(2,k)
lag3 = genlaguerre(3,k)
assert_equal(lag0.c,[1])
assert_equal(lag1.c,[-1,k+1])
assert_almost_equal(lag2.c,array([1,-2*(k+2),(k+1.)*(k+2.)])/2.0)
assert_almost_equal(lag3.c,array([-1,3*(k+3),-3*(k+2)*(k+3),(k+1)*(k+2)*(k+3)])/6.0)
# Base polynomials come from Abrahmowitz and Stegan
class TestLegendre(TestCase):
def test_legendre(self):
leg0 = legendre(0)
leg1 = legendre(1)
leg2 = legendre(2)
leg3 = legendre(3)
leg4 = legendre(4)
leg5 = legendre(5)
assert_equal(leg0.c,[1])
assert_equal(leg1.c,[1,0])
assert_equal(leg2.c,array([3,0,-1])/2.0)
assert_almost_equal(leg3.c,array([5,0,-3,0])/2.0)
assert_almost_equal(leg4.c,array([35,0,-30,0,3])/8.0)
assert_almost_equal(leg5.c,array([63,0,-70,0,15,0])/8.0)
class TestLambda(TestCase):
def test_lmbda(self):
lam = lmbda(1,.1)
lamr = (array([jn(0,.1), 2*jn(1,.1)/.1]),
array([jvp(0,.1), -2*jv(1,.1)/.01 + 2*jvp(1,.1)/.1]))
assert_array_almost_equal(lam,lamr,8)
class TestLog1p(TestCase):
def test_log1p(self):
l1p = (log1p(10),log1p(11),log1p(12))
l1prl = (log(11),log(12),log(13))
assert_array_almost_equal(l1p,l1prl,8)
def test_log1pmore(self):
l1pm = (log1p(1),log1p(1.1),log1p(1.2))
l1pmrl = (log(2),log(2.1),log(2.2))
assert_array_almost_equal(l1pm,l1pmrl,8)
class TestLegendreFunctions(TestCase):
def test_lpmn(self):
lp = lpmn(0,2,.5)
assert_array_almost_equal(lp,(array([ [ 1.00000 ,
0.50000,
-0.12500]]),
array([ [ 0.00000 ,
1.00000 ,
1.50000]])),4)
def test_lpn(self):
lpnf = lpn(2,.5)
assert_array_almost_equal(lpnf,(array( [ 1.00000 ,
0.50000,
-0.12500]),
array( [ 0.00000 ,
1.00000 ,
1.50000])),4)
def test_lpmv(self):
lp = lpmv(0,2,.5)
assert_almost_equal(lp,-0.125,3)
def test_lqmn(self):
lqmnf = lqmn(0,2,.5)
lqmnf = lqmn(0,2,.5)
lqf = lqn(2,.5)
assert_array_almost_equal(lqmnf[0][0],lqf[0],4)
assert_array_almost_equal(lqmnf[1][0],lqf[1],4)
def test_lqmn_shape(self):
a, b = lqmn(4, 4, 1.1)
assert_equal(a.shape, (5, 5))
assert_equal(b.shape, (5, 5))
a, b = lqmn(4, 0, 1.1)
assert_equal(a.shape, (5, 1))
assert_equal(b.shape, (5, 1))
def test_lqn(self):
lqf = lqn(2,.5)
assert_array_almost_equal(lqf,(array([ 0.5493, -0.7253, -0.8187]),
array([ 1.3333, 1.216 , -0.8427])),4)
class TestMathieu(TestCase):
def test_mathieu_a(self):
pass
def test_mathieu_even_coef(self):
mc = mathieu_even_coef(2,5)
#Q not defined broken and cannot figure out proper reporting order
def test_mathieu_odd_coef(self):
pass
#same problem as above
class TestFresnelIntegral(TestCase):
def test_modfresnelp(self):
pass
def test_modfresnelm(self):
pass
class TestOblCvSeq(TestCase):
def test_obl_cv_seq(self):
obl = obl_cv_seq(0,3,1)
assert_array_almost_equal(obl,array([ -0.348602,
1.393206,
5.486800,
11.492120]),5)
class TestParabolicCylinder(TestCase):
def test_pbdn_seq(self):
pb = pbdn_seq(1,.1)
assert_array_almost_equal(pb,(array([ 0.9975,
0.0998]),
array([-0.0499,
0.9925])),4)
def test_pbdv(self):
pbv = pbdv(1,.2)
derrl = 1/2*(.2)*pbdv(1,.2)[0] - pbdv(0,.2)[0]
def test_pbdv_seq(self):
pbn = pbdn_seq(1,.1)
pbv = pbdv_seq(1,.1)
assert_array_almost_equal(pbv,(real(pbn[0]),real(pbn[1])),4)
def test_pbdv_points(self):
# simple case
eta = np.linspace(-10, 10, 5)
z = 2**(eta/2)*np.sqrt(np.pi)/gamma(.5-.5*eta)
assert_tol_equal(pbdv(eta, 0.)[0], z, rtol=1e-14, atol=1e-14)
# some points
assert_tol_equal(pbdv(10.34, 20.44)[0], 1.3731383034455e-32, rtol=1e-12)
assert_tol_equal(pbdv(-9.53, 3.44)[0], 3.166735001119246e-8, rtol=1e-12)
def test_pbdv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = pbdv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (pbdv(eta, x + eps)[0] - pbdv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
def test_pbvv_gradient(self):
x = np.linspace(-4, 4, 8)[:,None]
eta = np.linspace(-10, 10, 5)[None,:]
p = pbvv(eta, x)
eps = 1e-7 + 1e-7*abs(x)
dp = (pbvv(eta, x + eps)[0] - pbvv(eta, x - eps)[0]) / eps / 2.
assert_tol_equal(p[1], dp, rtol=1e-6, atol=1e-6)
class TestPolygamma(TestCase):
# from Table 6.2 (pg. 271) of A&S
def test_polygamma(self):
poly2 = polygamma(2,1)
poly3 = polygamma(3,1)
assert_almost_equal(poly2,-2.4041138063,10)
assert_almost_equal(poly3,6.4939394023,10)
class TestProCvSeq(TestCase):
def test_pro_cv_seq(self):
prol = pro_cv_seq(0,3,1)
assert_array_almost_equal(prol,array([ 0.319000,
2.593084,
6.533471,
12.514462]),5)
class TestPsi(TestCase):
def test_psi(self):
ps = psi(1)
assert_almost_equal(ps,-0.57721566490153287,8)
class TestRadian(TestCase):
def test_radian(self):
rad = radian(90,0,0)
assert_almost_equal(rad,pi/2.0,5)
def test_radianmore(self):
rad1 = radian(90,1,60)
assert_almost_equal(rad1,pi/2+0.0005816135199345904,5)
class TestRiccati(TestCase):
def test_riccati_jn(self):
jnrl = (sph_jn(1,.2)[0]*.2,sph_jn(1,.2)[0]+sph_jn(1,.2)[1]*.2)
ricjn = riccati_jn(1,.2)
assert_array_almost_equal(ricjn,jnrl,8)
def test_riccati_yn(self):
ynrl = (sph_yn(1,.2)[0]*.2,sph_yn(1,.2)[0]+sph_yn(1,.2)[1]*.2)
ricyn = riccati_yn(1,.2)
assert_array_almost_equal(ricyn,ynrl,8)
class TestRound(TestCase):
def test_round(self):
rnd = map(int,(round(10.1),round(10.4),round(10.5),round(10.6)))
# Note: According to the documentation, scipy.special.round is
# supposed to round to the nearest even number if the fractional
# part is exactly 0.5. On some platforms, this does not appear
# to work and thus this test may fail. However, this unit test is
# correctly written.
rndrl = (10,10,10,11)
assert_array_equal(rnd,rndrl)
class _test_sh_legendre(TestCase):
def test_sh_legendre(self):
# P*_n(x) = P_n(2x-1)
psub = poly1d([2,-1])
Ps0 = sh_legendre(0)
Ps1 = sh_legendre(1)
Ps2 = sh_legendre(2)
Ps3 = sh_legendre(3)
Ps4 = sh_legendre(4)
Ps5 = sh_legendre(5)
pse0 = legendre(0)(psub)
pse1 = legendre(1)(psub)
pse2 = legendre(2)(psub)
pse3 = legendre(3)(psub)
pse4 = legendre(4)(psub)
pse5 = legendre(5)(psub)
assert_array_almost_equal(Ps0.c,pse0.c,13)
assert_array_almost_equal(Ps1.c,pse1.c,13)
assert_array_almost_equal(Ps2.c,pse2.c,13)
assert_array_almost_equal(Ps3.c,pse3.c,13)
assert_array_almost_equal(Ps4.c,pse4.c,12)
assert_array_almost_equal(Ps5.c,pse5.c,12)
class _test_sh_chebyt(TestCase):
def test_sh_chebyt(self):
# T*_n(x) = T_n(2x-1)
psub = poly1d([2,-1])
Ts0 = sh_chebyt(0)
Ts1 = sh_chebyt(1)
Ts2 = sh_chebyt(2)
Ts3 = sh_chebyt(3)
Ts4 = sh_chebyt(4)
Ts5 = sh_chebyt(5)
tse0 = chebyt(0)(psub)
tse1 = chebyt(1)(psub)
tse2 = chebyt(2)(psub)
tse3 = chebyt(3)(psub)
tse4 = chebyt(4)(psub)
tse5 = chebyt(5)(psub)
assert_array_almost_equal(Ts0.c,tse0.c,13)
assert_array_almost_equal(Ts1.c,tse1.c,13)
assert_array_almost_equal(Ts2.c,tse2.c,13)
assert_array_almost_equal(Ts3.c,tse3.c,13)
assert_array_almost_equal(Ts4.c,tse4.c,12)
assert_array_almost_equal(Ts5.c,tse5.c,12)
class _test_sh_chebyu(TestCase):
def test_sh_chebyu(self):
# U*_n(x) = U_n(2x-1)
psub = poly1d([2,-1])
Us0 = sh_chebyu(0)
Us1 = sh_chebyu(1)
Us2 = sh_chebyu(2)
Us3 = sh_chebyu(3)
Us4 = sh_chebyu(4)
Us5 = sh_chebyu(5)
use0 = chebyu(0)(psub)
use1 = chebyu(1)(psub)
use2 = chebyu(2)(psub)
use3 = chebyu(3)(psub)
use4 = chebyu(4)(psub)
use5 = chebyu(5)(psub)
assert_array_almost_equal(Us0.c,use0.c,13)
assert_array_almost_equal(Us1.c,use1.c,13)
assert_array_almost_equal(Us2.c,use2.c,13)
assert_array_almost_equal(Us3.c,use3.c,13)
assert_array_almost_equal(Us4.c,use4.c,12)
assert_array_almost_equal(Us5.c,use5.c,11)
class _test_sh_jacobi(TestCase):
def test_sh_jacobi(self):
# G^(p,q)_n(x) = n! gamma(n+p)/gamma(2*n+p) * P^(p-q,q-1)_n(2*x-1)
conv = lambda n,p: _gam(n+1)*_gam(n+p)/_gam(2*n+p)
psub = poly1d([2,-1])
q = 4*rand()
p = q-1 + 2*rand()
#print "shifted jacobi p,q = ", p, q
G0 = sh_jacobi(0,p,q)
G1 = sh_jacobi(1,p,q)
G2 = sh_jacobi(2,p,q)
G3 = sh_jacobi(3,p,q)
G4 = sh_jacobi(4,p,q)
G5 = sh_jacobi(5,p,q)
ge0 = jacobi(0,p-q,q-1)(psub) * conv(0,p)
ge1 = jacobi(1,p-q,q-1)(psub) * conv(1,p)
ge2 = jacobi(2,p-q,q-1)(psub) * conv(2,p)
ge3 = jacobi(3,p-q,q-1)(psub) * conv(3,p)
ge4 = jacobi(4,p-q,q-1)(psub) * conv(4,p)
ge5 = jacobi(5,p-q,q-1)(psub) * conv(5,p)
assert_array_almost_equal(G0.c,ge0.c,13)
assert_array_almost_equal(G1.c,ge1.c,13)
assert_array_almost_equal(G2.c,ge2.c,13)
assert_array_almost_equal(G3.c,ge3.c,13)
assert_array_almost_equal(G4.c,ge4.c,13)
assert_array_almost_equal(G5.c,ge5.c,13)
class TestSpherical(TestCase):
def test_sph_harm(self):
pass
def test_sph_in(self):
i1n = sph_in(1,.2)
inp0 = (i1n[0][1])
inp1 = (i1n[0][0] - 2.0/0.2 * i1n[0][1])
assert_array_almost_equal(i1n[0],array([1.0066800127054699381,
0.066933714568029540839]),12)
assert_array_almost_equal(i1n[1],[inp0,inp1],12)
def test_sph_inkn(self):
spikn = r_[sph_in(1,.2)+sph_kn(1,.2)]
inkn = r_[sph_inkn(1,.2)]
assert_array_almost_equal(inkn,spikn,10)
def test_sph_jn(self):
s1 = sph_jn(2,.2)
s10 = -s1[0][1]
s11 = s1[0][0]-2.0/0.2*s1[0][1]
s12 = s1[0][1]-3.0/0.2*s1[0][2]
assert_array_almost_equal(s1[0],[0.99334665397530607731,
0.066400380670322230863,
0.0026590560795273856680],12)
assert_array_almost_equal(s1[1],[s10,s11,s12],12)
def test_sph_jnyn(self):
jnyn = r_[sph_jn(1,.2) + sph_yn(1,.2)] # tuple addition
jnyn1 = r_[sph_jnyn(1,.2)]
assert_array_almost_equal(jnyn1,jnyn,9)
def test_sph_kn(self):
kn = sph_kn(2,.2)
kn0 = -kn[0][1]
kn1 = -kn[0][0]-2.0/0.2*kn[0][1]
kn2 = -kn[0][1]-3.0/0.2*kn[0][2]
assert_array_almost_equal(kn[0],[6.4302962978445670140,
38.581777787067402086,
585.15696310385559829],12)
assert_array_almost_equal(kn[1],[kn0,kn1,kn2],9)
def test_sph_yn(self):
sy1 = sph_yn(2,.2)[0][2]
sy2 = sph_yn(0,.2)[0][0]
sphpy = (sph_yn(1,.2)[0][0]-2*sph_yn(2,.2)[0][2])/3 #correct derivative value
assert_almost_equal(sy1,-377.52483,5)#previous values in the system
assert_almost_equal(sy2,-4.9003329,5)
sy3 = sph_yn(1,.2)[1][1]
assert_almost_equal(sy3,sphpy,4) #compare correct derivative val. (correct =-system val).
class TestStruve(object):
def _series(self, v, z, n=100):
"""Compute Struve function & error estimate from its power series."""
k = arange(0, n)
r = (-1)**k * (.5*z)**(2*k+v+1)/gamma(k+1.5)/gamma(k+v+1.5)
err = abs(r).max() * finfo(float_).eps * n
return r.sum(), err
def test_vs_series(self):
"""Check Struve function versus its power series"""
for v in [-20, -10, -7.99, -3.4, -1, 0, 1, 3.4, 12.49, 16]:
for z in [1, 10, 19, 21, 30]:
value, err = self._series(v, z)
assert_tol_equal(struve(v, z), value, rtol=0, atol=err), (v, z)
def test_some_values(self):
assert_tol_equal(struve(-7.99, 21), 0.0467547614113, rtol=1e-7)
assert_tol_equal(struve(-8.01, 21), 0.0398716951023, rtol=1e-8)
assert_tol_equal(struve(-3.0, 200), 0.0142134427432, rtol=1e-12)
assert_tol_equal(struve(-8.0, -41), 0.0192469727846, rtol=1e-11)
assert_equal(struve(-12, -41), -struve(-12, 41))
assert_equal(struve(+12, -41), -struve(+12, 41))
assert_equal(struve(-11, -41), +struve(-11, 41))
assert_equal(struve(+11, -41), +struve(+11, 41))
assert isnan(struve(-7.1, -1))
assert isnan(struve(-10.1, -1))
def test_regression_679(self):
"""Regression test for #679"""
assert_tol_equal(struve(-1.0, 20 - 1e-8), struve(-1.0, 20 + 1e-8))
assert_tol_equal(struve(-2.0, 20 - 1e-8), struve(-2.0, 20 + 1e-8))
assert_tol_equal(struve(-4.3, 20 - 1e-8), struve(-4.3, 20 + 1e-8))
if __name__ == "__main__":
run_module_suite()
|
wpoely86/easybuild-framework
|
refs/heads/develop
|
easybuild/toolchains/intelcuda.py
|
1
|
##
# Copyright 2013-2016 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
EasyBuild support for a intel+CUDA compiler toolchain.
:author: Ake Sandgren (HPC2N)
"""
from easybuild.toolchains.iimpic import Iimpic
from easybuild.toolchains.fft.intelfftw import IntelFFTW
from easybuild.toolchains.linalg.intelmkl import IntelMKL
class Intelcuda(Iimpic, IntelMKL, IntelFFTW):
"""Compiler toolchain with Intel compilers (icc/ifort), Intel MPI,
Intel Math Kernel Library (MKL), Intel FFTW wrappers and CUDA"""
NAME = 'intelcuda'
SUBTOOLCHAIN = Iimpic.NAME
|
Celedhrim/persomov
|
refs/heads/master
|
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvp.py
|
18
|
from __future__ import unicode_literals
from .common import InfoExtractor
class TvpIE(InfoExtractor):
IE_NAME = 'tvp.pl'
_VALID_URL = r'https?://www\.tvp\.pl/.*?wideo/(?P<date>\d+)/(?P<id>\d+)'
_TEST = {
'url': 'http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238',
'md5': '148408967a6a468953c0a75cbdaf0d7a',
'info_dict': {
'id': '12878238',
'ext': 'wmv',
'title': '31.10.2013 - Odcinek 2',
'description': '31.10.2013 - Odcinek 2',
},
'skip': 'Download has to use same server IP as extraction. Therefore, a good (load-balancing) DNS resolver will make the download fail.'
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
json_url = 'http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % video_id
params = self._download_json(
json_url, video_id, "Downloading video metadata")
video_url = params['video_url']
return {
'id': video_id,
'title': self._og_search_title(webpage),
'ext': 'wmv',
'url': video_url,
'description': self._og_search_description(webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
|
uranusjr/django
|
refs/heads/master
|
django/db/backends/utils.py
|
5
|
import datetime
import decimal
import functools
import hashlib
import logging
from time import time
from django.conf import settings
from django.db.utils import NotSupportedError
from django.utils.encoding import force_bytes
from django.utils.timezone import utc
logger = logging.getLogger('django.db.backends')
class CursorWrapper:
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(['fetchone', 'fetchmany', 'fetchall', 'nextset'])
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
with self.db.wrap_database_errors:
yield from self.cursor
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
# Close instead of passing through to avoid backend-specific behavior
# (#17671). Catch errors liberally because errors in cleanup code
# aren't useful.
try:
self.close()
except self.db.Database.Error:
pass
# The following methods cannot be implemented in __getattr__, because the
# code must run when the method is invoked, not just when it is accessed.
def callproc(self, procname, params=None, kparams=None):
# Keyword parameters for callproc aren't supported in PEP 249, but the
# database driver may support them (e.g. cx_Oracle).
if kparams is not None and not self.db.features.supports_callproc_kwargs:
raise NotSupportedError(
'Keyword parameters for callproc are not supported on this '
'database backend.'
)
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None and kparams is None:
return self.cursor.callproc(procname)
elif kparams is None:
return self.cursor.callproc(procname, params)
else:
params = params or ()
return self.cursor.callproc(procname, params, kparams)
def execute(self, sql, params=None):
return self._execute_with_wrappers(sql, params, many=False, executor=self._execute)
def executemany(self, sql, param_list):
return self._execute_with_wrappers(sql, param_list, many=True, executor=self._executemany)
def _execute_with_wrappers(self, sql, params, many, executor):
context = {'connection': self.db, 'cursor': self}
for wrapper in reversed(self.db.execute_wrappers):
executor = functools.partial(wrapper, executor)
return executor(sql, params, many, context)
def _execute(self, sql, params, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def _executemany(self, sql, param_list, *ignored_wrapper_args):
self.db.validate_no_broken_transaction()
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
class CursorDebugWrapper(CursorWrapper):
# XXX callproc isn't instrumented at this time.
def execute(self, sql, params=None):
start = time()
try:
return super().execute(sql, params)
finally:
stop = time()
duration = stop - start
sql = self.db.ops.last_executed_query(self.cursor, sql, params)
self.db.queries_log.append({
'sql': sql,
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, params,
extra={'duration': duration, 'sql': sql, 'params': params}
)
def executemany(self, sql, param_list):
start = time()
try:
return super().executemany(sql, param_list)
finally:
stop = time()
duration = stop - start
try:
times = len(param_list)
except TypeError: # param_list could be an iterator
times = '?'
self.db.queries_log.append({
'sql': '%s times: %s' % (times, sql),
'time': "%.3f" % duration,
})
logger.debug(
'(%.3f) %s; args=%s', duration, sql, param_list,
extra={'duration': duration, 'sql': sql, 'params': param_list}
)
###############################################
# Converters from database (string) to Python #
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # return None if s is null
def typecast_time(s): # does NOT store time zone information
if not s:
return None
hour, minutes, seconds = s.split(':')
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
return datetime.time(int(hour), int(minutes), int(seconds), int((microseconds + '000000')[:6]))
def typecast_timestamp(s): # does NOT store time zone information
# "2005-07-29 15:48:00.590358-05"
# "2005-07-29 09:56:00-05"
if not s:
return None
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently it's ignored.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
elif '+' in t:
t, tz = t.split('+', 1)
tz = '+' + tz
else:
tz = ''
dates = d.split('-')
times = t.split(':')
seconds = times[2]
if '.' in seconds: # check whether seconds have a fractional part
seconds, microseconds = seconds.split('.')
else:
microseconds = '0'
tzinfo = utc if settings.USE_TZ else None
return datetime.datetime(
int(dates[0]), int(dates[1]), int(dates[2]),
int(times[0]), int(times[1]), int(seconds),
int((microseconds + '000000')[:6]), tzinfo
)
###############################################
# Converters from Python to database (string) #
###############################################
def rev_typecast_decimal(d):
if d is None:
return None
return str(d)
def split_identifier(identifier):
"""
Split a SQL identifier into a two element tuple of (namespace, name).
The identifier could be a table, column, or sequence name might be prefixed
by a namespace.
"""
try:
namespace, name = identifier.split('"."')
except ValueError:
namespace, name = '', identifier
return namespace.strip('"'), name.strip('"')
def truncate_name(identifier, length=None, hash_len=4):
"""
Shorten a SQL identifier to a repeatable mangled version with the given
length.
If a quote stripped name contains a namespace, e.g. USERNAME"."TABLE,
truncate the table portion only.
"""
namespace, name = split_identifier(identifier)
if length is None or len(name) <= length:
return identifier
digest = hashlib.md5(force_bytes(name)).hexdigest()[:hash_len]
return '%s%s%s' % ('%s"."' % namespace if namespace else '', name[:length - hash_len], digest)
def format_number(value, max_digits, decimal_places):
"""
Format a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:
return None
if isinstance(value, decimal.Decimal):
context = decimal.getcontext().copy()
if max_digits is not None:
context.prec = max_digits
if decimal_places is not None:
value = value.quantize(decimal.Decimal(".1") ** decimal_places, context=context)
else:
context.traps[decimal.Rounded] = 1
value = context.create_decimal(value)
return "{:f}".format(value)
if decimal_places is not None:
return "%.*f" % (decimal_places, value)
return "{:f}".format(value)
def strip_quotes(table_name):
"""
Strip quotes off of quoted table names to make them safe for use in index
names, sequence names, etc. For example '"USER"."TABLE"' (an Oracle naming
scheme) becomes 'USER"."TABLE'.
"""
has_quotes = table_name.startswith('"') and table_name.endswith('"')
return table_name[1:-1] if has_quotes else table_name
|
MER-GROUP/intellij-community
|
refs/heads/master
|
python/lib/Lib/site-packages/django/conf/locale/th/formats.py
|
433
|
# -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j F Y'
TIME_FORMAT = 'G:i:s'
DATETIME_FORMAT = 'j F Y, G:i:s'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j F'
SHORT_DATE_FORMAT = 'j M Y'
SHORT_DATETIME_FORMAT = 'j M Y, G:i:s'
# FIRST_DAY_OF_WEEK =
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# DATE_INPUT_FORMATS =
# TIME_INPUT_FORMATS =
# DATETIME_INPUT_FORMATS =
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
# NUMBER_GROUPING =
|
ericfc/django
|
refs/heads/master
|
tests/admin_scripts/app_raising_warning/models.py
|
391
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core import checks
from django.db import models
class ModelRaisingMessages(models.Model):
@classmethod
def check(self, **kwargs):
return [
checks.Warning(
'A warning',
hint=None,
),
]
|
hackcyprus/jobber
|
refs/heads/master
|
jobber/script.py
|
1
|
"""
jobber.script
~~~~~~~~~~~~~
Script utilities.
"""
import sys
from jobber.factory import create_app
from jobber.database import db
def run(main, *args):
"""Runs the script in an application context and manages the session cycle.
:param main: A function to run.
:param *args: Positional arguments to the `main` function.
"""
app = create_app(__name__)
with app.app_context():
# Create a new session for this script and commit/rollback accordingly.
session = db.session
try:
args += (session,)
main(*args)
session.commit()
except:
if session.is_active:
session.rollback()
raise
finally:
session.remove()
def die(reason):
"""Prints `reason` and kills script with exit code 1.
:param reason: Reason phrase.
"""
print red(reason)
sys.exit(1)
def prompt(message, yesno=False):
"""Prompts the user for a value.
:param message: A string to display at the prompt.
:param yesno: A flag indicating whether the user should reply y/n.
"""
if yesno:
message = u"{} [y/N]".format(message)
value = raw_input(u"{}: ".format(message))
return value.lower() == 'y' if yesno else value
def termcolor(code):
"""Decorator that wraps text with `code` for colored terminal output."""
def wrapper(text):
return u"\033[{}m{}\033[0m".format(code, text)
return wrapper
red = termcolor('31')
green = termcolor('32')
yellow = termcolor('33')
blue = termcolor('34')
magenta = termcolor('35')
cyan = termcolor('36')
white = termcolor('37')
|
sarojaerabelli/HVGS
|
refs/heads/master
|
CareerTinderServer/CareerTinder/migrations/0007_merge_20160918_0247.py
|
1
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-09-18 06:47
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('CareerTinder', '0005_relations_status'),
('CareerTinder', '0006_auto_20160918_0224'),
]
operations = [
]
|
low-sky/spectral-cube
|
refs/heads/master
|
spectral_cube/__init__.py
|
4
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from ._astropy_init import __version__, test
from pkg_resources import get_distribution, DistributionNotFound
from .spectral_cube import (SpectralCube, VaryingResolutionSpectralCube)
from .dask_spectral_cube import (DaskSpectralCube, DaskVaryingResolutionSpectralCube)
from .stokes_spectral_cube import StokesSpectralCube
from .masks import (MaskBase, InvertedMask, CompositeMask,
BooleanArrayMask, LazyMask, LazyComparisonMask,
FunctionMask)
from .lower_dimensional_structures import (OneDSpectrum, Projection, Slice)
# Import the following sub-packages to make sure the I/O functions are registered
from .io import casa_image
del casa_image
from .io import class_lmv
del class_lmv
from .io import fits
del fits
__all__ = ['SpectralCube', 'VaryingResolutionSpectralCube',
'DaskSpectralCube', 'DaskVaryingResolutionSpectralCube',
'StokesSpectralCube', 'CompositeMask', 'LazyComparisonMask',
'LazyMask', 'BooleanArrayMask', 'FunctionMask',
'OneDSpectrum', 'Projection', 'Slice'
]
|
zhantyzgz/polaris
|
refs/heads/master
|
run.py
|
1
|
from core import bot
bot.start()
|
vlukes/sfepy
|
refs/heads/work
|
sfepy/discrete/common/extmods/setup.py
|
4
|
#!/usr/bin/env python
def configuration(parent_package='', top_path=None):
import os.path as op
from numpy.distutils.misc_util import Configuration
from sfepy import Config
site_config = Config()
system = site_config.system()
os_flag = {'posix' : 0, 'windows' : 1}[system]
auto_dir = op.dirname(__file__)
auto_name = op.split(auto_dir)[-1]
config = Configuration(auto_name, parent_package, top_path)
inline = 'inline' if system == 'posix' else '__inline'
defines = [('SFEPY_PLATFORM', os_flag),
('inline', inline)]
if '-DDEBUG_FMF' in site_config.debug_flags():
defines.append(('DEBUG_FMF', None))
if '-DDEBUG_MESH' in site_config.debug_flags():
defines.append(('DEBUG_MESH', None))
common_src = ['fmfield.c', 'refmaps.c', 'geommech.c', 'common_python.c']
config.add_library('sfepy_common',
sources=common_src,
extra_compiler_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir, site_config.python_include()],
macros=[('SFEPY_PLATFORM', os_flag),
('inline', inline)])
src = ['_fmfield.pyx']
config.add_extension('_fmfield',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['mappings.pyx']
config.add_extension('mappings',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['assemble.pyx']
config.add_extension('assemble',
sources=src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['cmesh.pyx', 'geomtrans.c', 'mesh.c', 'meshutils.c', 'sort.c',
'common_python.c']
config.add_extension('cmesh',
sources=src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['crefcoors.pyx', 'refcoors.c', 'geomtrans.c', 'mesh.c']
config.add_extension('crefcoors',
sources=src,
libraries=['sfepy_common'],
depends=common_src,
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
src = ['_geommech.pyx']
config.add_extension('_geommech',
sources=src,
libraries=['sfepy_common'],
extra_compile_args=site_config.compile_flags(),
extra_link_args=site_config.link_flags(),
include_dirs=[auto_dir],
define_macros=defines)
# Include *.pxd files in distribution tarball and install them along
# with the extension modules.
pxd_files = ['cmesh.pxd', 'mappings.pxd', 'types.pxd',
'_fmfield.pxd', '_geommech.pxd']
config.add_data_files(('', pxd_files))
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
openplans/planbox
|
refs/heads/staging
|
src/custom_domains/middleware.py
|
4
|
from django.conf import settings
from django.http import Http404
from custom_domains.models import DomainMapping, DefaultDomainMapping
class CustomDomainResolvingMiddleware(object):
def process_request(self, request):
# Get the domain. If it's one of our explicitly known domains, then
# proceed as normal.
domain = request.META.get('HTTP_HOST', None)
if domain is None or domain in settings.KNOWN_HOSTS:
request.domain_mapping = DefaultDomainMapping(domain)
request.actual_path_info = request.path_info
return
# If the domain is implicit, check that it's valid.
try:
mapping = DomainMapping.objects.get(domain=domain)
except DomainMapping.DoesNotExist:
raise Http404
# Finally, stick the valid mapping on the request, and reassign the
# path_info attribute.
request.domain_mapping = mapping
request.actual_path_info = request.path_info
request.path_info = '/'.join([
mapping.root_path.rstrip('/'),
request.actual_path_info.lstrip('/')
])
return
|
daviwesley/Empire
|
refs/heads/master
|
lib/modules/situational_awareness/network/sharefinder.py
|
10
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-ShareFinder',
'Author': ['@harmj0y'],
'Description': ('Finds shares on machines in the domain.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'MinPSVersion' : '2',
'Comments': [
'https://github.com/PowerShellEmpire/PowerTools/tree/master/PowerView'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Hosts' : {
'Description' : 'Hosts to enumerate.',
'Required' : False,
'Value' : ''
},
'HostList' : {
'Description' : 'Hostlist to enumerate.',
'Required' : False,
'Value' : ''
},
'HostFilter' : {
'Description' : 'Host filter name to query AD for, wildcards accepted.',
'Required' : False,
'Value' : ''
},
'NoPing' : {
'Description' : 'Don\'t ping each host to ensure it\'s up before enumerating.',
'Required' : False,
'Value' : ''
},
'CheckShareAccess' : {
'Description' : 'Switch. Only display found shares that the local user has access to.',
'Required' : False,
'Value' : ''
},
'Delay' : {
'Description' : 'Delay between enumerating hosts, defaults to 0.',
'Required' : False,
'Value' : ''
},
'Domain' : {
'Description' : 'Domain to enumerate for hosts.',
'Required' : False,
'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# read in the common module source code
moduleSource = self.mainMenu.installPath + "/data/module_source/situational_awareness/network/Invoke-ShareFinder.ps1"
try:
f = open(moduleSource, 'r')
except:
print helpers.color("[!] Could not read module source path at: " + str(moduleSource))
return ""
moduleCode = f.read()
f.close()
script = moduleCode
script += "Invoke-ShareFinder "
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += '| Out-String | %{$_ + \"`n\"};"`nInvoke-ShareFinder completed"'
return script
|
ualikhansars/Gwent
|
refs/heads/master
|
lib/python2.7/site-packages/django/http/utils.py
|
372
|
"""
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
|
OliverCole/ZeroNet
|
refs/heads/master
|
plugins/PeerDb/PeerDbPlugin.py
|
1
|
import time
import sqlite3
import random
import atexit
import gevent
from Plugin import PluginManager
@PluginManager.registerTo("ContentDb")
class ContentDbPlugin(object):
def __init__(self, *args, **kwargs):
atexit.register(self.saveAllPeers)
super(ContentDbPlugin, self).__init__(*args, **kwargs)
def getSchema(self):
schema = super(ContentDbPlugin, self).getSchema()
schema["tables"]["peer"] = {
"cols": [
["site_id", "INTEGER REFERENCES site (site_id) ON DELETE CASCADE"],
["address", "TEXT NOT NULL"],
["port", "INTEGER NOT NULL"],
["hashfield", "BLOB"],
["reputation", "INTEGER NOT NULL"],
["time_added", "INTEGER NOT NULL"],
["time_found", "INTEGER NOT NULL"]
],
"indexes": [
"CREATE UNIQUE INDEX peer_key ON peer (site_id, address, port)"
],
"schema_changed": 2
}
return schema
def loadPeers(self, site):
s = time.time()
site_id = self.site_ids.get(site.address)
res = self.execute("SELECT * FROM peer WHERE site_id = :site_id", {"site_id": site_id})
num = 0
num_hashfield = 0
for row in res:
peer = site.addPeer(str(row["address"]), row["port"])
if not peer: # Already exist
continue
if row["hashfield"]:
peer.hashfield.replaceFromString(row["hashfield"])
num_hashfield += 1
peer.time_added = row["time_added"]
peer.time_found = row["time_found"]
peer.reputation = row["reputation"]
if row["address"].endswith(".onion"):
peer.reputation = peer.reputation / 2 - 1 # Onion peers less likely working
num += 1
if num_hashfield:
site.content_manager.has_optional_files = True
site.log.debug("%s peers (%s with hashfield) loaded in %.3fs" % (num, num_hashfield, time.time() - s))
def iteratePeers(self, site):
site_id = self.site_ids.get(site.address)
for key, peer in site.peers.iteritems():
address, port = key.rsplit(":", 1)
if peer.has_hashfield:
hashfield = sqlite3.Binary(peer.hashfield.tostring())
else:
hashfield = ""
yield (site_id, address, port, hashfield, peer.reputation, int(peer.time_added), int(peer.time_found))
def savePeers(self, site, spawn=False):
if spawn:
# Save peers every hour (+random some secs to not update very site at same time)
gevent.spawn_later(60 * 60 + random.randint(0, 60), self.savePeers, site, spawn=True)
if not site.peers:
site.log.debug("Peers not saved: No peers found")
return
s = time.time()
site_id = self.site_ids.get(site.address)
cur = self.getCursor()
cur.execute("BEGIN")
try:
cur.execute("DELETE FROM peer WHERE site_id = :site_id", {"site_id": site_id})
cur.cursor.executemany(
"INSERT INTO peer (site_id, address, port, hashfield, reputation, time_added, time_found) VALUES (?, ?, ?, ?, ?, ?, ?)",
self.iteratePeers(site)
)
except Exception as err:
site.log.error("Save peer error: %s" % err)
finally:
cur.execute("END")
site.log.debug("Peers saved in %.3fs" % (time.time() - s))
def initSite(self, site):
super(ContentDbPlugin, self).initSite(site)
gevent.spawn_later(0.5, self.loadPeers, site)
gevent.spawn_later(60*60, self.savePeers, site, spawn=True)
def saveAllPeers(self):
for site in self.sites.values():
try:
self.savePeers(site)
except Exception, err:
site.log.error("Save peer error: %s" % err)
|
carlmw/oscar-wager
|
refs/heads/master
|
django/db/backends/mysql/creation.py
|
311
|
from django.db.backends.creation import BaseDatabaseCreation
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated MySQL column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
data_types = {
'AutoField': 'integer AUTO_INCREMENT',
'BooleanField': 'bool',
'CharField': 'varchar(%(max_length)s)',
'CommaSeparatedIntegerField': 'varchar(%(max_length)s)',
'DateField': 'date',
'DateTimeField': 'datetime',
'DecimalField': 'numeric(%(max_digits)s, %(decimal_places)s)',
'FileField': 'varchar(%(max_length)s)',
'FilePathField': 'varchar(%(max_length)s)',
'FloatField': 'double precision',
'IntegerField': 'integer',
'BigIntegerField': 'bigint',
'IPAddressField': 'char(15)',
'NullBooleanField': 'bool',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer UNSIGNED',
'PositiveSmallIntegerField': 'smallint UNSIGNED',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'longtext',
'TimeField': 'time',
}
def sql_table_creation_suffix(self):
suffix = []
if self.connection.settings_dict['TEST_CHARSET']:
suffix.append('CHARACTER SET %s' % self.connection.settings_dict['TEST_CHARSET'])
if self.connection.settings_dict['TEST_COLLATION']:
suffix.append('COLLATE %s' % self.connection.settings_dict['TEST_COLLATION'])
return ' '.join(suffix)
def sql_for_inline_foreign_key_references(self, field, known_models, style):
"All inline references are pending under MySQL"
return [], True
def sql_for_inline_many_to_many_references(self, model, field, style):
from django.db import models
opts = model._meta
qn = self.connection.ops.quote_name
table_output = [
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_column_name())),
style.SQL_COLTYPE(models.ForeignKey(model).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL')),
' %s %s %s,' %
(style.SQL_FIELD(qn(field.m2m_reverse_name())),
style.SQL_COLTYPE(models.ForeignKey(field.rel.to).db_type(connection=self.connection)),
style.SQL_KEYWORD('NOT NULL'))
]
deferred = [
(field.m2m_db_table(), field.m2m_column_name(), opts.db_table,
opts.pk.column),
(field.m2m_db_table(), field.m2m_reverse_name(),
field.rel.to._meta.db_table, field.rel.to._meta.pk.column)
]
return table_output, deferred
|
zygmuntz/pybrain
|
refs/heads/master
|
examples/rl/environments/cartpole/cart_reinf.py
|
30
|
#!/usr/bin/env python
#########################################################################
# Reinforcement Learning with REINFORCE on the CartPoleEnvironment
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
#########################################################################
__author__ = "Thomas Rueckstiess, Frank Sehnke"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.cartpole import CartPoleEnvironment, BalanceTask
from pybrain.rl.agents import LearningAgent
from pybrain.rl.learners import Reinforce
from pybrain.rl.experiments import EpisodicExperiment
batch=50 #number of samples per learning step
prnts=4 #number of learning steps after results are printed
epis=4000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts, kind = "learner") #tool for printing and plotting
for runs in range(numbExp):
# create environment
env = CartPoleEnvironment()
# create task
task = BalanceTask(env, 200, desiredValue=None)
# create controller network
net = buildNetwork(4, 1, bias=False)
# create agent with controller and learner (and its options)
agent = LearningAgent(net, Reinforce())
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
state, action, reward = agent.learner.dataset.getSequence(agent.learner.dataset.getNumSequences()-1)
et.printResults(reward.sum(), runs, updates)
et.addExps()
et.showExps()
|
sh4t/Sick-Beard
|
refs/heads/development
|
bs4/builder/_html5lib.py
|
423
|
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
|
CSC301H-Fall2013/JuakStore
|
refs/heads/master
|
site-packages/django/contrib/gis/sitemaps/kml.py
|
482
|
from django.core import urlresolvers
from django.contrib.sitemaps import Sitemap
from django.contrib.gis.db.models.fields import GeometryField
from django.db import models
class KMLSitemap(Sitemap):
"""
A minimal hook to produce KML sitemaps.
"""
geo_format = 'kml'
def __init__(self, locations=None):
# If no locations specified, then we try to build for
# every model in installed applications.
self.locations = self._build_kml_sources(locations)
def _build_kml_sources(self, sources):
"""
Goes through the given sources and returns a 3-tuple of
the application label, module name, and field name of every
GeometryField encountered in the sources.
If no sources are provided, then all models.
"""
kml_sources = []
if sources is None:
sources = models.get_models()
for source in sources:
if isinstance(source, models.base.ModelBase):
for field in source._meta.fields:
if isinstance(field, GeometryField):
kml_sources.append((source._meta.app_label,
source._meta.module_name,
field.name))
elif isinstance(source, (list, tuple)):
if len(source) != 3:
raise ValueError('Must specify a 3-tuple of (app_label, module_name, field_name).')
kml_sources.append(source)
else:
raise TypeError('KML Sources must be a model or a 3-tuple.')
return kml_sources
def get_urls(self, page=1, site=None):
"""
This method is overrridden so the appropriate `geo_format` attribute
is placed on each URL element.
"""
urls = Sitemap.get_urls(self, page=page, site=site)
for url in urls: url['geo_format'] = self.geo_format
return urls
def items(self):
return self.locations
def location(self, obj):
return urlresolvers.reverse('django.contrib.gis.sitemaps.views.%s' % self.geo_format,
kwargs={'label' : obj[0],
'model' : obj[1],
'field_name': obj[2],
}
)
class KMZSitemap(KMLSitemap):
geo_format = 'kmz'
|
duhzecca/cinder
|
refs/heads/master
|
cinder/volume/drivers/lenovo/lenovo_fc.py
|
14
|
# Copyright 2014 Objectif Libre
# Copyright 2015 DotHill Systems
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from cinder.volume.drivers.dothill import dothill_fc
from cinder.volume.drivers.lenovo import lenovo_common
class LenovoFCDriver(dothill_fc.DotHillFCDriver):
"""OpenStack Fibre Channel cinder drivers for Lenovo Storage arrays.
Version history:
1.0 - Inheriting from DotHill cinder drivers.
"""
VERSION = "1.0"
def __init__(self, *args, **kwargs):
super(LenovoFCDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(lenovo_common.common_opt)
def _init_common(self):
return lenovo_common.LenovoCommon(self.configuration)
|
white-lab/pyproteome
|
refs/heads/master
|
pyproteome/motifs/neighborhood.py
|
1
|
from matplotlib import pyplot as plt
from scipy import stats
from . import motif, plogo
def enriched_neighborhood(
data,
f,
residues,
nmer_length=7,
count_cutoff=2,
mods=None,
):
'''
Calculates the hypergeometric enrichment value for the number of
adjacent residues within a given window around all modification sites
in a data set.
Parameters
----------
data : :class:`pyproteome.data_sets.data_set.DataSet`
f : dict or list of dict
residues : list of str
nmer_length : int, optional
count_cutoff : int, optional
mods : str or list of str
Returns
-------
f : :class:`matplotlib.figure.Figure`
ax : :class:`matplotlib.axes.Axes`
pval : float
P-value, calculated with :class:`scipy.stats.hypergeom`.
K : int
Number of sequences with # residues > count_cutoff in background list.
N : int
Size of the background list of sequences.
k : int
Number of sequences with # residues > count_cutoff in foreground list.
n : int
Size of the foreground list of sequences.
'''
if mods is None:
mods = [(None, 'Phospho')]
background = motif.generate_n_mers(
data['Sequence'],
mods=mods,
n=nmer_length,
all_matches=False,
)
foreground = motif.generate_n_mers(
data.filter(f)['Sequence'],
mods=mods,
n=nmer_length,
all_matches=False,
)
N = len(background)
K = len([
i
for i in background
if sum(i.count(j) for j in residues) >= count_cutoff
])
n = len(foreground)
k = len([
i
for i in foreground
if sum(i.count(j) for j in residues) >= count_cutoff
])
pval = stats.hypergeom(
N,
K,
n,
).sf(
min([k, n]) - 1
)
fig, ax = plt.subplots(figsize=(4, 4))
if background:
ax.hist(
[
sum(i.count(j) for j in residues)
for i in background
],
density=True,
alpha=0.5,
color='green',
bins=range(0, nmer_length, 1),
label='background',
)
if foreground:
ax.hist(
[
sum(i.count(j) for j in residues)
for i in foreground
],
density=True,
alpha=0.7,
color='orange',
bins=range(0, nmer_length, 1),
label=plogo.format_title(f=f),
)
ax.legend()
ax.set_ylabel('Frequency')
return fig, ax, pval, K, N, k, n
|
andela-ifageyinbo/django
|
refs/heads/master
|
tests/auth_tests/settings.py
|
331
|
import os
from django.utils._os import upath
AUTH_MIDDLEWARE_CLASSES = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
]
AUTH_TEMPLATES = [{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(os.path.dirname(upath(__file__)), 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
}]
|
bioasp/meneco
|
refs/heads/master
|
meneco/__init__.py
|
1
|
# Copyright (c) 2012, Sven Thiele <sthiele78@gmail.com>
#
# This file is part of meneco.
#
# meneco is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# meneco is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with meneco. If not, see <http://www.gnu.org/licenses/>.
# -*- coding: utf-8 -*-
from meneco.meneco import run_meneco
|
amuzhou/imbox
|
refs/heads/master
|
imbox/utils.py
|
4
|
from __future__ import unicode_literals
from six import PY3
import logging
logger = logging.getLogger(__name__)
if PY3:
def str_encode(value='', encoding=None, errors='strict'):
logger.debug("Encode str {} with and errors {}".format(value, encoding, errors))
return str(value, encoding, errors)
def str_decode(value='', encoding=None, errors='strict'):
return bytes(value, encoding, errors).decode('utf-8')
else:
def str_encode(string='', encoding=None, errors='strict'):
return unicode(string, encoding, errors)
def str_decode(value='', encoding=None, errors='strict'):
return value.decode(encoding, errors)
|
brianjgeiger/osf.io
|
refs/heads/develop
|
scripts/analytics/institution_summary.py
|
15
|
import django
django.setup()
import pytz
import logging
from dateutil.parser import parse
from datetime import datetime, timedelta
from django.db.models import Q
from django.utils import timezone
from framework.encryption import ensure_bytes
from osf.models import Institution
from website.app import init_app
from scripts.analytics.base import SummaryAnalytics
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class InstitutionSummary(SummaryAnalytics):
@property
def collection_name(self):
return 'institution_summary'
def get_events(self, date):
super(InstitutionSummary, self).get_events(date)
institutions = Institution.objects.all()
counts = []
# Convert to a datetime at midnight for queries and the timestamp
timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=pytz.UTC)
query_datetime = timestamp_datetime + timedelta(days=1)
daily_query = Q(created__gte=timestamp_datetime)
public_query = Q(is_public=True)
private_query = Q(is_public=False)
# `embargoed` used private status to determine embargoes, but old registrations could be private and unapproved registrations can also be private
# `embargoed_v2` uses future embargo end dates on root
embargo_v2_query = Q(root__embargo__end_date__gt=query_datetime)
for institution in institutions:
node_qs = institution.nodes.filter(is_deleted=False, created__lt=query_datetime).exclude(type='osf.registration')
registration_qs = institution.nodes.filter(is_deleted=False, created__lt=query_datetime, type='osf.registration')
count = {
'institution': {
'id': ensure_bytes(institution._id),
'name': ensure_bytes(institution.name),
},
'users': {
'total': institution.osfuser_set.filter(is_active=True).count(),
'total_daily': institution.osfuser_set.filter(date_confirmed__gte=timestamp_datetime, date_confirmed__lt=query_datetime).count(),
},
'nodes': {
'total': node_qs.count(),
'public': node_qs.filter(public_query).count(),
'private': node_qs.filter(private_query).count(),
'total_daily': node_qs.filter(daily_query).count(),
'public_daily': node_qs.filter(public_query & daily_query).count(),
'private_daily': node_qs.filter(private_query & daily_query).count(),
},
# Projects use get_roots to remove children
'projects': {
'total': node_qs.get_roots().count(),
'public': node_qs.filter(public_query).get_roots().count(),
'private': node_qs.filter(private_query).get_roots().count(),
'total_daily': node_qs.filter(daily_query).get_roots().count(),
'public_daily': node_qs.filter(public_query & daily_query).get_roots().count(),
'private_daily': node_qs.filter(private_query & daily_query).get_roots().count(),
},
'registered_nodes': {
'total': registration_qs.count(),
'public': registration_qs.filter(public_query).count(),
'embargoed': registration_qs.filter(private_query).count(),
'embargoed_v2': registration_qs.filter(private_query & embargo_v2_query).count(),
'total_daily': registration_qs.filter(daily_query).count(),
'public_daily': registration_qs.filter(public_query & daily_query).count(),
'embargoed_daily': registration_qs.filter(private_query & daily_query).count(),
'embargoed_v2_daily': registration_qs.filter(private_query & daily_query & embargo_v2_query).count(),
},
'registered_projects': {
'total': registration_qs.get_roots().count(),
'public': registration_qs.filter(public_query).get_roots().count(),
'embargoed': registration_qs.filter(private_query).get_roots().count(),
'embargoed_v2': registration_qs.filter(private_query & embargo_v2_query).get_roots().count(),
'total_daily': registration_qs.filter(daily_query).get_roots().count(),
'public_daily': registration_qs.filter(public_query & daily_query).get_roots().count(),
'embargoed_daily': registration_qs.filter(private_query & daily_query).get_roots().count(),
'embargoed_v2_daily': registration_qs.filter(private_query & daily_query & embargo_v2_query).get_roots().count(),
},
'keen': {
'timestamp': timestamp_datetime.isoformat()
}
}
logger.info(
'{} Nodes counted. Nodes: {}, Projects: {}, Registered Nodes: {}, Registered Projects: {}'.format(
count['institution']['name'],
count['nodes']['total'],
count['projects']['total'],
count['registered_nodes']['total'],
count['registered_projects']['total']
)
)
counts.append(count)
return counts
def get_class():
return InstitutionSummary
if __name__ == '__main__':
init_app()
institution_summary = InstitutionSummary()
args = institution_summary.parse_args()
yesterday = args.yesterday
if yesterday:
date = (timezone.now() - timedelta(days=1)).date()
else:
date = parse(args.date).date() if args.date else None
events = institution_summary.get_events(date)
institution_summary.send_events(events)
|
ryanahall/django
|
refs/heads/master
|
django/contrib/redirects/apps.py
|
590
|
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class RedirectsConfig(AppConfig):
name = 'django.contrib.redirects'
verbose_name = _("Redirects")
|
richardfrey86/progress-bars-assignment
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/MSVSUserFile.py
|
2710
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio user preferences file writer."""
import os
import re
import socket # for gethostname
import gyp.common
import gyp.easy_xml as easy_xml
#------------------------------------------------------------------------------
def _FindCommandInPath(command):
"""If there are no slashes in the command given, this function
searches the PATH env to find the given command, and converts it
to an absolute path. We have to do this because MSVS is looking
for an actual file to launch a debugger on, not just a command
line. Note that this happens at GYP time, so anything needing to
be built needs to have a full path."""
if '/' in command or '\\' in command:
# If the command already has path elements (either relative or
# absolute), then assume it is constructed properly.
return command
else:
# Search through the path list and find an existing file that
# we can access.
paths = os.environ.get('PATH','').split(os.pathsep)
for path in paths:
item = os.path.join(path, command)
if os.path.isfile(item) and os.access(item, os.X_OK):
return item
return command
def _QuoteWin32CommandLineArgs(args):
new_args = []
for arg in args:
# Replace all double-quotes with double-double-quotes to escape
# them for cmd shell, and then quote the whole thing if there
# are any.
if arg.find('"') != -1:
arg = '""'.join(arg.split('"'))
arg = '"%s"' % arg
# Otherwise, if there are any spaces, quote the whole arg.
elif re.search(r'[ \t\n]', arg):
arg = '"%s"' % arg
new_args.append(arg)
return new_args
class Writer(object):
"""Visual Studio XML user user file writer."""
def __init__(self, user_file_path, version, name):
"""Initializes the user file.
Args:
user_file_path: Path to the user file.
version: Version info.
name: Name of the user file.
"""
self.user_file_path = user_file_path
self.version = version
self.name = name
self.configurations = {}
def AddConfig(self, name):
"""Adds a configuration to the project.
Args:
name: Configuration name.
"""
self.configurations[name] = ['Configuration', {'Name': name}]
def AddDebugSettings(self, config_name, command, environment = {},
working_directory=""):
"""Adds a DebugSettings node to the user file for a particular config.
Args:
command: command line to run. First element in the list is the
executable. All elements of the command will be quoted if
necessary.
working_directory: other files which may trigger the rule. (optional)
"""
command = _QuoteWin32CommandLineArgs(command)
abs_command = _FindCommandInPath(command[0])
if environment and isinstance(environment, dict):
env_list = ['%s="%s"' % (key, val)
for (key,val) in environment.iteritems()]
environment = ' '.join(env_list)
else:
environment = ''
n_cmd = ['DebugSettings',
{'Command': abs_command,
'WorkingDirectory': working_directory,
'CommandArguments': " ".join(command[1:]),
'RemoteMachine': socket.gethostname(),
'Environment': environment,
'EnvironmentMerge': 'true',
# Currently these are all "dummy" values that we're just setting
# in the default manner that MSVS does it. We could use some of
# these to add additional capabilities, I suppose, but they might
# not have parity with other platforms then.
'Attach': 'false',
'DebuggerType': '3', # 'auto' debugger
'Remote': '1',
'RemoteCommand': '',
'HttpUrl': '',
'PDBPath': '',
'SQLDebugging': '',
'DebuggerFlavor': '0',
'MPIRunCommand': '',
'MPIRunArguments': '',
'MPIRunWorkingDirectory': '',
'ApplicationCommand': '',
'ApplicationArguments': '',
'ShimCommand': '',
'MPIAcceptMode': '',
'MPIAcceptFilter': ''
}]
# Find the config, and add it if it doesn't exist.
if config_name not in self.configurations:
self.AddConfig(config_name)
# Add the DebugSettings onto the appropriate config.
self.configurations[config_name].append(n_cmd)
def WriteIfChanged(self):
"""Writes the user file."""
configs = ['Configurations']
for config, spec in sorted(self.configurations.iteritems()):
configs.append(spec)
content = ['VisualStudioUserFile',
{'Version': self.version.ProjectVersion(),
'Name': self.name
},
configs]
easy_xml.WriteXmlIfChanged(content, self.user_file_path,
encoding="Windows-1252")
|
ghostrong/py-leveldb
|
refs/heads/master
|
test/test.py
|
42
|
#!/usr/bin/python
# Copyright (c) Arni Mar Jonsson.
# See LICENSE for details.
import sys, string, unittest, itertools
class TestLevelDB(unittest.TestCase):
def setUp(self):
# import local leveldb
import leveldb as _leveldb
self.leveldb = _leveldb
dir(self.leveldb)
# Python2/3 compat
if hasattr(string, 'lowercase'):
self.lowercase = string.lowercase
self.uppercase = string.uppercase
else:
self.lowercase = string.ascii_lowercase
self.uppercase = string.ascii_uppercase
# comparator
if sys.version_info[0] < 3:
def my_comparison(a, b):
return cmp(a, b)
else:
def my_comparison(a, b):
if a < b:
return -1
elif a > b:
return 1
else:
return 0
self.comparator = 'bytewise'
if True:
self.comparator = ('bytewise', my_comparison)
# repair/destroy previous database, if any
self.name = 'db_a'
#self.leveldb.RepairDB(self.name, comparator = self.comparator)
self.leveldb.DestroyDB(self.name)
def _open_options(self, create_if_missing = True, error_if_exists = False):
v = {
'create_if_missing': True,
'error_if_exists': error_if_exists,
'paranoid_checks': False,
'block_cache_size': 8 * (2 << 20),
'write_buffer_size': 2 * (2 << 20),
'block_size': 4096,
'max_open_files': 1000,
'block_restart_interval': 16,
'comparator': self.comparator
}
return v
def _open(self, *args, **kwargs):
options = self._open_options(*args, **kwargs)
db = self.leveldb.LevelDB(self.name, **options)
dir(db)
return db
def testIteratorNone(self):
options = self._open_options()
db = self.leveldb.LevelDB(self.name, **options)
for s in 'abcdef':
db.Put(self._s(s), self._s(s))
kv_ = [(self._s('a'), self._s('a')), (self._s('b'), self._s('b')), (self._s('c'), self._s('c')), (self._s('d'), self._s('d')), (self._s('e'), self._s('e')), (self._s('f'), self._s('f'))]
kv = list(db.RangeIter(key_from = None, key_to = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter(key_to = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter(key_from = None))
self.assertEqual(kv, kv_)
kv = list(db.RangeIter())
self.assertEqual(kv, kv_)
def testIteratorCrash(self):
options = self._open_options()
db = self.leveldb.LevelDB(self.name, **options)
db.Put(self._s('a'), self._s('b'))
i = db.RangeIter(include_value = False, reverse = True)
dir(i)
del self.leveldb
def _s(self, s):
if sys.version_info[0] >= 3:
return bytearray(s, encoding = 'latin1')
else:
return s
def _join(self, i):
return self._s('').join(i)
# NOTE: modeled after test 'Snapshot'
def testSnapshotBasic(self):
db = self._open()
# destroy database, if any
db.Put(self._s('foo'), self._s('v1'))
s1 = db.CreateSnapshot()
dir(s1)
db.Put(self._s('foo'), self._s('v2'))
s2 = db.CreateSnapshot()
db.Put(self._s('foo'), self._s('v3'))
s3 = db.CreateSnapshot()
db.Put(self._s('foo'), self._s('v4'))
self.assertEqual(s1.Get(self._s('foo')), self._s('v1'))
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(s3.Get(self._s('foo')), self._s('v3'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s3
self.assertEqual(s1.Get(self._s('foo')), self._s('v1'))
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s1
self.assertEqual(s2.Get(self._s('foo')), self._s('v2'))
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# TBD: close properly
del s2
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
# re-open
del db
db = self._open()
self.assertEqual(db.Get(self._s('foo')), self._s('v4'))
def ClearDB(self, db):
for k in list(db.RangeIter(include_value = False, reverse = True)):
db.Delete(k)
def ClearDB_batch(self, db):
b = self.leveldb.WriteBatch()
dir(b)
for k in db.RangeIter(include_value = False, reverse = True):
b.Delete(k)
db.Write(b)
def CountDB(self, db):
return sum(1 for i in db.RangeIter(reverse = True))
def _insert_lowercase(self, db):
b = self.leveldb.WriteBatch()
for c in self.lowercase:
b.Put(self._s(c), self._s('hello'))
db.Write(b)
def _insert_uppercase_batch(self, db):
b = self.leveldb.WriteBatch()
for c in self.uppercase:
b.Put(self._s(c), self._s('hello'))
db.Write(b)
def _test_uppercase_get(self, db):
for k in self.uppercase:
v = db.Get(self._s(k))
self.assertEqual(v, self._s('hello'))
self.assertTrue(k in self.uppercase)
def _test_uppercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('J'), self._s('M')))
self.assertEqual(s, self._s('JKLM'))
s = self._join(k for k, v in db.RangeIter(self._s('S')))
self.assertEqual(s, self._s('STUVWXYZ'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('E')))
self.assertEqual(s, self._s('ABCDE'))
def _test_uppercase_iter_rev(self, db):
# inside range
s = self._join(k for k, v in db.RangeIter(self._s('J'), self._s('M'), reverse = True))
self.assertEqual(s, self._s('MLKJ'))
# partly outside range
s = self._join(k for k, v in db.RangeIter(self._s('Z'), self._s(chr(ord('Z') + 1)), reverse = True))
self.assertEqual(s, self._s('Z'))
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('A') - 1)), self._s('A'), reverse = True))
self.assertEqual(s, self._s('A'))
# wholly outside range
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('Z') + 1)), self._s(chr(ord('Z') + 2)), reverse = True))
self.assertEqual(s, self._s(''))
s = self._join(k for k, v in db.RangeIter(self._s(chr(ord('A') - 2)), self._s(chr(ord('A') - 1)), reverse = True))
self.assertEqual(s, self._s(''))
# lower limit
s = self._join(k for k, v in db.RangeIter(self._s('S'), reverse = True))
self.assertEqual(s, self._s('ZYXWVUTS'))
# upper limit
s = self._join(k for k, v in db.RangeIter(key_to = self._s('E'), reverse = True))
self.assertEqual(s, self._s('EDCBA'))
def _test_lowercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('j'), self._s('m')))
self.assertEqual(s, self._s('jklm'))
s = self._join(k for k, v in db.RangeIter(self._s('s')))
self.assertEqual(s, self._s('stuvwxyz'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('e')))
self.assertEqual(s, self._s('abcde'))
def _test_lowercase_iter(self, db):
s = self._join(k for k, v in db.RangeIter(self._s('j'), self._s('m'), reverse = True))
self.assertEqual(s, self._s('mlkj'))
s = self._join(k for k, v in db.RangeIter(self._s('s'), reverse = True))
self.assertEqual(s, self._s('zyxwvuts'))
s = self._join(k for k, v in db.RangeIter(key_to = self._s('e'), reverse = True))
self.assertEqual(s, self._s('edcba'))
def _test_lowercase_get(self, db):
for k in self.lowercase:
v = db.Get(self._s(k))
self.assertEqual(v, self._s('hello'))
self.assertTrue(k in self.lowercase)
def testIterationBasic(self):
db = self._open()
self._insert_lowercase(db)
self.assertEqual(self.CountDB(db), 26)
self._test_lowercase_iter(db)
#self._test_lowercase_iter_rev(db)
self._test_lowercase_get(db)
self.ClearDB_batch(db)
self._insert_uppercase_batch(db)
self._test_uppercase_iter(db)
self._test_uppercase_iter_rev(db)
self._test_uppercase_get(db)
self.assertEqual(self.CountDB(db), 26)
def testCompact(self):
db = self._open()
s = self._s('foo' * 10)
for i in itertools.count():
db.Put(self._s('%i' % i), s)
if i > 10000:
break
db.CompactRange(self._s('1000'), self._s('10000'))
db.CompactRange(start = self._s('1000'))
db.CompactRange(end = self._s('1000'))
db.CompactRange(start = self._s('1000'), end = None)
db.CompactRange(start = None, end = self._s('1000'))
db.CompactRange()
# tried to re-produce http://code.google.com/p/leveldb/issues/detail?id=44
def testMe(self):
db = self._open()
db.Put(self._s('key1'), self._s('val1'))
del db
db = self._open()
db.Delete(self._s('key2'))
db.Delete(self._s('key1'))
del db
db = self._open()
db.Delete(self._s('key2'))
del db
db = self._open()
db.Put(self._s('key3'), self._s('val1'))
del db
db = self._open()
del db
db = self._open()
v = list(db.RangeIter())
self.assertEqual(v, [(self._s('key3'), self._s('val1'))])
if __name__ == '__main__':
unittest.main()
|
taohungyang/cloud-custodian
|
refs/heads/master
|
tests/test_elasticache.py
|
1
|
# Copyright 2016-2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest, TestConfig as Config
class TestElastiCacheCluster(BaseTest):
def test_elasticache_security_group(self):
session_factory = self.replay_flight_data("test_elasticache_security_group")
p = self.load_policy(
{
"name": "elasticache-cluster-simple",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
sorted([r["CacheClusterId"] for r in resources]),
["myec-001", "myec-002", "myec-003"],
)
def test_elasticache_subnet_filter(self):
session_factory = self.replay_flight_data(
"test_elasticache_subnet_group_filter"
)
p = self.load_policy(
{
"name": "elasticache-cluster-simple",
"resource": "cache-cluster",
"filters": [
{"type": "subnet", "key": "MapPublicIpOnLaunch", "value": False}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(
sorted([r["CacheClusterId"] for r in resources]),
["myec-001", "myec-002", "myec-003"],
)
def test_elasticache_cluster_simple(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_simple")
p = self.load_policy(
{"name": "elasticache-cluster-simple", "resource": "cache-cluster"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_cluster_simple_filter(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_simple")
p = self.load_policy(
{
"name": "elasticache-cluster-simple-filter",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_sharded_snapshot_copy_tags(self):
factory = self.replay_flight_data("test_elasticache_sharded_copy_cluster_tags")
client = factory().client("elasticache")
snap_tags = {
t["Key"]: t["Value"]
for t in client.list_tags_for_resource(
ResourceName="arn:aws:elasticache:us-east-2:644160558196:snapshot:zero-bytes"
)[
"TagList"
]
}
self.assertEqual(snap_tags, {"App": "MegaCache"})
p = self.load_policy(
{
"name": "test-copy-cluster-tags",
"resource": "cache-snapshot",
"actions": [
{
"type": "copy-cluster-tags",
"tags": ["App", "Env", "Zone", "Color"],
}
],
},
config=Config.empty(region="us-east-2"),
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["SnapshotName"], "zero-bytes")
arn = p.resource_manager.get_arns(resources)[0]
snap_tags = {
t["Key"]: t["Value"]
for t in client.list_tags_for_resource(ResourceName=arn)["TagList"]
}
self.assertEqual(
snap_tags, {"App": "MegaCache", "Color": "Blue", "Env": "Dev", "Zone": "12"}
)
def test_elasticache_snapshot_copy_cluster_tags(self):
session_factory = self.replay_flight_data("test_elasticache_copy_cluster_tags")
client = session_factory().client("elasticache")
results = client.list_tags_for_resource(
ResourceName="arn:aws:elasticache:us-east-1:644160558196:snapshot:myec-backup"
)[
"TagList"
]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags, {})
policy = self.load_policy(
{
"name": "test-copy-cluster-tags",
"resource": "cache-snapshot",
"actions": [{"type": "copy-cluster-tags", "tags": ["tagkey"]}],
},
Config.empty(region="us-east-1"),
session_factory=session_factory,
)
resources = policy.run()
arn = policy.resource_manager.generate_arn(resources[0]["SnapshotName"])
results = client.list_tags_for_resource(ResourceName=arn)["TagList"]
tags = {t["Key"]: t["Value"] for t in results}
self.assertEqual(tags["tagkey"], "tagval")
def test_elasticache_cluster_available(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_available")
p = self.load_policy(
{
"name": "elasticache-cluster-available",
"resource": "cache-cluster",
"filters": [
{"type": "value", "key": "CacheClusterStatus", "value": "available"}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
self.assertEqual(resources[0]["CacheClusterStatus"], "available")
def test_elasticache_cluster_mark(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_mark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-cluster-mark",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
"actions": [{"type": "mark-for-op", "days": 30, "op": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
arn = p.resource_manager.generate_arn(resources[0]["CacheClusterId"])
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("maid_status" in tag_map)
def test_elasticache_cluster_unmark(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_unmark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-cluster-unmark",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
"actions": [{"type": "unmark"}],
},
session_factory=session_factory,
)
resources = p.run()
arn = p.resource_manager.generate_arn(resources[0]["CacheClusterId"])
self.assertEqual(len(resources), 3)
tags = client.list_tags_for_resource(ResourceName=arn)
self.assertFalse("maid_status" in tags)
def test_elasticache_cluster_delete(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_delete")
p = self.load_policy(
{
"name": "elasticache-cluster-delete",
"resource": "cache-cluster",
"filters": [{"type": "value", "key": "Engine", "value": "redis"}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
def test_elasticache_cluster_snapshot(self):
session_factory = self.replay_flight_data("test_elasticache_cluster_snapshot")
p = self.load_policy(
{
"name": "elasticache-cluster-snapshot",
"resource": "cache-cluster",
"actions": [{"type": "snapshot"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 3)
class TestElastiCacheSubnetGroup(BaseTest):
def test_elasticache_subnet_group(self):
session_factory = self.replay_flight_data("test_elasticache_subnet_group")
p = self.load_policy(
{"name": "elasticache-subnet-group", "resource": "cache-subnet-group"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
class TestElastiCacheSnapshot(BaseTest):
def test_elasticache_snapshot(self):
session_factory = self.replay_flight_data("test_elasticache_snapshot")
p = self.load_policy(
{"name": "elasticache-snapshot", "resource": "cache-snapshot"},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_elasticache_snapshot_age_filter(self):
factory = self.replay_flight_data("test_elasticache_snapshot")
p = self.load_policy(
{
"name": "elasticache-snapshot-age-filter",
"resource": "cache-snapshot",
"filters": [{"type": "age", "days": 2, "op": "gt"}],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 4)
def test_elasticache_snapshot_mark(self):
session_factory = self.replay_flight_data("test_elasticache_snapshot_mark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-snapshot-mark",
"resource": "cache-snapshot",
"filters": [
{
"type": "value",
"key": "SnapshotName",
"value": "backup-myec-001-2017-06-23",
}
],
"actions": [{"type": "mark-for-op", "days": 30, "op": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
arn = p.resource_manager.generate_arn(resources[0]["SnapshotName"])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
tag_map = {t["Key"]: t["Value"] for t in tags["TagList"]}
self.assertTrue("maid_status" in tag_map)
def test_elasticache_snapshot_unmark(self):
session_factory = self.replay_flight_data("test_elasticache_snapshot_unmark")
client = session_factory().client("elasticache")
p = self.load_policy(
{
"name": "elasticache-snapshot-unmark",
"resource": "cache-snapshot",
"filters": [
{
"type": "value",
"key": "SnapshotName",
"value": "backup-myec-001-2017-06-23",
}
],
"actions": [{"type": "unmark"}],
},
session_factory=session_factory,
)
resources = p.run()
arn = p.resource_manager.generate_arn(resources[0]["SnapshotName"])
self.assertEqual(len(resources), 1)
tags = client.list_tags_for_resource(ResourceName=arn)
self.assertFalse("maid_status" in tags)
def test_elasticache_snapshot_delete(self):
factory = self.replay_flight_data("test_elasticache_snapshot_delete")
p = self.load_policy(
{
"name": "elasticache-snapshot-delete",
"resource": "cache-snapshot",
"actions": ["delete"],
},
session_factory=factory,
)
resources = p.run()
self.assertEqual(len(resources), 4)
class TestModifyVpcSecurityGroupsAction(BaseTest):
def test_elasticache_remove_matched_security_groups(self):
#
# Test conditions:
# - running 2 Elasticache replication group in default VPC with 3 clusters
# - translates to 6 clusters
# - a default security group with id 'sg-7a3fcb13' exists
# - security group named PROD-ONLY-Test-Security-Group exists in VPC and is attached to
# one replication group
# - translates to 3 clusters marked non-compliant
#
# Results in 6 clusters with default Security Group attached
session_factory = self.replay_flight_data(
"test_elasticache_remove_matched_security_groups"
)
client = session_factory().client("elasticache", region_name="ca-central-1")
p = self.load_policy(
{
"name": "elasticache-remove-matched-security-groups",
"resource": "cache-cluster",
"filters": [
{
"type": "security-group",
"key": "GroupName",
"value": "(.*PROD-ONLY.*)",
"op": "regex",
}
],
"actions": [
{
"type": "modify-security-groups",
"remove": "matched",
"isolation-group": "sg-7a3fcb13",
}
],
},
session_factory=session_factory,
)
clean_p = self.load_policy(
{
"name": "elasticache-verifyremove-matched-security-groups",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
},
session_factory=session_factory,
)
resources = p.run()
waiter = client.get_waiter("replication_group_available")
waiter.wait()
clean_resources = clean_p.run()
# clusters autoscale across AZs, so they get -001, -002, etc appended
self.assertIn("sg-test-base", resources[0]["CacheClusterId"])
self.assertEqual(len(resources), 3)
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
# show that it was indeed a replacement of security groups
self.assertEqual(len(clean_resources[0]["SecurityGroups"]), 1)
self.assertEqual(len(clean_resources), 6)
def test_elasticache_add_security_group(self):
# Test conditions:
# - running Elasticache replication group in default VPC with 3 clusters
# - a default security group with id 'sg-7a3fcb13' exists
# - security group named PROD-ONLY-Test-Security-Group exists in VPC and is not attached
# - translates to 3 clusters marked to get new group attached
#
# Results in 3 clusters with default Security Group and PROD-ONLY-Test-Security-Group
session_factory = self.replay_flight_data("test_elasticache_add_security_group")
client = session_factory().client("elasticache", region_name="ca-central-1")
p = self.load_policy(
{
"name": "add-sg-to-prod-elasticache",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"}
],
"actions": [{"type": "modify-security-groups", "add": "sg-6360920a"}],
},
session_factory=session_factory,
)
clean_p = self.load_policy(
{
"name": "validate-add-sg-to-prod-elasticache",
"resource": "cache-cluster",
"filters": [
{"type": "security-group", "key": "GroupName", "value": "default"},
{
"type": "security-group",
"key": "GroupName",
"value": "PROD-ONLY-Test-Security-Group",
},
],
},
session_factory=session_factory,
)
resources = p.run()
waiter = client.get_waiter("replication_group_available")
waiter.wait()
clean_resources = clean_p.run()
self.assertEqual(len(resources), 3)
self.assertIn("sg-test-base", resources[0]["CacheClusterId"])
self.assertEqual(len(resources[0]["SecurityGroups"]), 1)
self.assertEqual(len(clean_resources[0]["SecurityGroups"]), 2)
self.assertEqual(len(clean_resources), 3)
|
nhomar/odoo-mirror
|
refs/heads/8.0
|
doc/_themes/odoodoc/github.py
|
34
|
import inspect
import importlib
import os.path
from urlparse import urlunsplit
import sphinx
def setup(app):
app.add_config_value('github_user', None, 'env')
app.add_config_value('github_project', None, 'env')
app.connect('html-page-context', add_doc_link)
def linkcode_resolve(domain, info):
""" Resolves provided object to corresponding github URL
"""
# TODO: js?
if domain != 'py':
return None
if not (app.config.github_user and app.config.github_project):
return None
module, fullname = info['module'], info['fullname']
# TODO: attributes/properties don't have modules, maybe try to look
# them up based on their cached host object?
if not module:
return None
obj = importlib.import_module(module)
for item in fullname.split('.'):
obj = getattr(obj, item, None)
if obj is None:
return None
# get original from decorated methods
try: obj = getattr(obj, '_orig')
except AttributeError: pass
try:
obj_source_path = inspect.getsourcefile(obj)
_, line = inspect.getsourcelines(obj)
except (TypeError, IOError):
# obj doesn't have a module, or something
return None
import openerp
project_root = os.path.join(os.path.dirname(openerp.__file__), '..')
return make_github_link(
app,
os.path.relpath(obj_source_path, project_root),
line)
app.config.linkcode_resolve = linkcode_resolve
def make_github_link(app, path, line=None, mode="blob"):
config = app.config
urlpath = "/{user}/{project}/{mode}/{branch}/{path}".format(
user=config.github_user,
project=config.github_project,
branch=config.version or 'master',
path=path,
mode=mode,
)
return urlunsplit((
'https',
'github.com',
urlpath,
'',
'' if line is None else 'L%d' % line
))
def add_doc_link(app, pagename, templatename, context, doctree):
""" Add github_link function linking to the current page on github """
if not app.config.github_user and app.config.github_project:
return
def github_doc_link(mode='blob'):
""" returns the github URL for the current page
:param str mode: 'edit' for edition view
"""
return make_github_link(
app,
'doc/%s%s' % (pagename, app.config.source_suffix),
mode=mode)
context['github_link'] = github_doc_link
|
ZetDude/KALEVBOT
|
refs/heads/master
|
cogs/fun.py
|
1
|
"""Fun commands that don't do anything really productive
night, thank, shipname, shipcount, ship, hug, pecan, fortune"""
# -*- coding: utf-8 -*-
import pickle
import random
import sqlite3 as lite
import subprocess
import discord
from discord.ext import commands
from lib import shipname_module as improved_shipname, customconverter as cconv, obot
def search(values, search_for):
"Finds all the values in dict `values` where `search_for` is somewhere in the key"
found_values = [] # Initialize an empty list that will be the final list.
for k in values: # Iterate through every key in the given dictionary.
value_string = str(values[k]) # The corresponding value for the key we are currently on.
if str(search_for) in str(k): # If the string we are looking for is in the key.
found_values.append([k, value_string])
# Append the value and the key to the final list.
return found_values # Return the final list.
def remove_duplicates(values):
"Return the list `values` with duplicates removed"
# I'm going to be honest, I just found this on StackOverflow so I have no idea how it works.
seen = set()
seen_add = seen.add
values = [x for x in values if not (x in seen or seen_add(x))]
return values
class FunCog():
"fun fun fun fun fun fun"
def __init__(self, bot):
self.bot = bot
type(self).__name__ = "Fun"
@commands.command(name='night', aliases=['n', 'goodnight', 'nacht', 'öö', 'ööd', 'oyasumi',
'\u304a\u3084\u3059\u307f'],
help=(r"Wish someone a good night using a super cute kaomoji! ^_^"),
brief="Wish someone a good night.")
async def night(self, ctx, *, target_user=None):
"""Wish a good night to `target_user`, with a kaomoji emoticon in front.
`target_user` is anything pertaining to the target user or member that
lib.customconverter.HybridConverter can detect.
`target_user` defaults to None and can be left blank.
`target_user` can also be the argument "-list", in which case the bot returns all the
kaomoji emoticons associated with this command.
"""
# Define the list of kaomoji emoticons the bot will be using. Because of discord formatting
# special characters are escaped with a \.
kaomoji = [r"お(^o^)や(^O^)す(^。^)みぃ(^-^)ノ゙",
r" .。.:\*・゚☆Goodヾ(\*´Д`(\*゚ω゚\* )Night☆.。.:\*・゚",
r" – =͟͟͞ (¦3[▓▓])",
r" 。・:\*:・゚★,。・=^∇^\*=,。・:\*:・゚☆",
r"☆~\*.(UωU\*)おやすみぃ…\*~☆",
r"|・ω・`)おやすみぃ♪", ]
selected_kaomoji = random.choice(kaomoji)
if target_user is None: # If the user does not supply a target user...
await ctx.send(f"{selected_kaomoji} Good night!") # Return a generic response.
elif target_user == "-list": # -list flag...
await ctx.send("\n".join(kaomoji)) # Join together all the kaomoji and send them.
else: # If the target user is actually given.
try:
target_user = await cconv.HybridConverter().convert(ctx, target_user)
await ctx.send(f"{selected_kaomoji} Good night, {target_user.name}!")
except commands.BadArgument: # HybridConverter fails...
# Fall back to just using the inputted string with no conversion.
await ctx.send(f"{selected_kaomoji} Good night, {target_user}!")
@commands.command(name='thank', aliases=['thanks', 'arigato', 'arigatou', 'arigatoo',
'merci', 'arigatō', 'danke', 'aitah', 'aitäh',
'\u3042\u308a\u304c\u3068\u3046'],
help=(r"Thank someone using a super cute kaomoji! ^_^"),
brief="Thank someone.")
async def thank(self, ctx, *, target_user=None):
"""Thank `target_user`, with a kaomoji emoticon in front.
`target_user` is anything pertaining to the target user or member that
lib.customconverter.HybridConverter can detect.
`target_user` defaults to None and can be left blank.
`target_user` can also be the argument "-list", in which case the bot returns all the
kaomoji emoticons associated with this command.
"""
# The list of kaomoji emoticons the bot will be using. Because of discord formatting special
# characters are escaped with a \.
kaomoji = [r"♪(・ω・)ノ",
r"(\*ゝω・)ノ",
r"゚・:,。★\(^-^ )♪ありがとう♪( ^-^)/★,。・:・゚",
r"(★^O^★)",
r"☆\*:.。. o(≧▽≦)o .。.:\*☆",
r"(ノ^_^)ノ",
r"(ノ゚▽゚)ノ",
r"(ノ´ヮ´)ノ\*:・゚✧",
r"(\*^3^)/\~☆",
r"<(\_ \_\*)> アリガトォ",
r"ありがとぅございますっっヽ(●´∀\`)人(´∀\`●)ノ",
r"ありがとうございましたm(\*-ω-)m",
r"+。:.゚ヽ(\*´∀)ノ゚.:。+゚ァリガトゥ"
]
selected_kaomoji = random.choice(kaomoji)
if target_user is None: # If the user does not supply a target user.
await ctx.send(f"{selected_kaomoji} Thank you!") # Return a generic response.
elif target_user == "-list": # -list flag
await ctx.send("\n".join(kaomoji)) # Join together all the kaomoji and send them.
else: # If the target user is actually given.
try:
target_user = await cconv.HybridConverter().convert(ctx, target_user)
if target_user == ctx.bot.user: # If the user's target is the bot itself...
# "u2764" is the black heart unicode character
await ctx.send(f"You're welcome, {ctx.author.name}! \\\u2764")
elif target_user == ctx.author: # If the user attempts to thank themself... sass.
await ctx.send(f"Why would I need to thank you, {ctx.author.name}?")
else: # If no special cases were found...
await ctx.send(f"{selected_kaomoji} Thank you, {target_user.name}!")
except commands.BadArgument: # HybridConverter fails...
# Fall back to just using the inputted string with no conversion
await ctx.send(f"{selected_kaomoji} Thank you, {target_user}!")
@commands.command(name='shipname', aliases=['name'],
help="Create the shipname of two people.")
async def shipname(self, ctx, name1, name2):
"""Uses pecan's shipname module to create the shipname of two names.
`name1` is the first name.
`name2` is the first name.
"""
# Request a shipname from pecan's shipname module™ using names from arguments.
names_shipname = improved_shipname.shipname(name1, name2) # I don't know how it works.
await ctx.send(f"{ctx.author.name}, I shall call it \"**{names_shipname}**\"!")
@commands.command(name='shipcount', aliases=['count'],
help="Get amount of ships created between people",
usage="[users...] OR -top")
async def shipcount(self, ctx, *ships_in):
"""Show all the people someone has been shipped with when given one person, or the amount
of ships between certain people when given multiple.
`ships_in` is the people/person to get info of.
`ships_in` can also be the argument "-top", in which case only the top 10 most shipped pairs
will be shown."""
shipfile = obot.SHIPFILE # File where all shipping information is stored.
ships = [] # This list will contain the user(s) we want to get information about.
for i in ships_in: # Convert all the given member to actual users.
if i == "-top": # skip the -top flag.
continue
ships.append(await cconv.HybridConverter().convert(ctx, i))
ships = remove_duplicates(ships)
# Format the IDs into a format: 'id1:id2:id3...'.
# This format is needed as this is how ship information is stored in the shipfile.
ships_format = ':'.join([str(x.id) for x in ships])
try:
# Open the shipfile and unpickle it. The returning format is a dictionary.
# -> {'id1:id2:id3...': count}
with open(shipfile, "rb") as opened_file:
lines = pickle.load(opened_file)
except FileNotFoundError:
await ctx.send(f"I couldn't find the shipping file ({shipfile})")
return
except pickle.UnpicklingError:
await ctx.send("Shipping data file is corrupt, cannot fetch data.")
return
if not ships: # No arguments... default to author.
ships = [ctx.author]
if len(ships) == 1: # Find all the ships that user is contained in.
return_message = ""
if "-top" in ships_in: # -top flag is given...
# The data dict is turned into a list, and is sorted by the count, then reversed
# so that the biggest are in the beginning, and then only the first 10 are fetched.
mentions = list(reversed(sorted(list(lines.items()), key=lambda a: a[1])))[:10]
else: # no flag is given...
# All the lines that contain the target are fetched
mentions = search(lines, ships[0].id)
mentions = reversed(sorted(mentions, key=lambda a: a[1]))
for k, j in mentions: # Iterate through all fetched lines.
usern = []
# take the 'id1:id2:id3...' format and split it into the IDs it is composed from.
for i in k.split(":"):
try:
# Convert the ID which is stored into an user.
found_user = ctx.bot.get_user(int(i))
if found_user is None: # No server shared with target user.
# NOTE: The function get_user_info() works regardless of the target
# sharing servers with the bot, however, it is terribly slow.
found_user = await ctx.bot.get_user_info(i)
usern.append(found_user.name)
except discord.NotFound: # User doesn't exist on discord...?
usern.append(i) # Fall back to just showing the ID
times_message = "time" if j == 1 else "times"
return_message += f"{' x '.join(usern)}: shipped {j} {times_message}\n"
# example -> "User1 x User2: shipped 3 times"
if not return_message: # no results found...
return_message = (f"{ships[0].name}, you haven't been shipped with anybody yet, "
f"but I still love you!")
await ctx.send(f"```\n{return_message}\n```")
return
else: # The user gives multple users as arguments...
# Find how many times those specific users have been shipped before.
occ = lines.get(ships_format, 0)
times_message = "time" if j == 1 else "times"
await ctx.send(f"{ctx.author}, they have been shipped {occ} {times_message} before")
@commands.command(name='ship', aliases=['otp'],
help="Ship someone with someone else.",
brief="Ship someone with someone else. uwu")
async def ship(self, ctx, *ships: cconv.HybridConverter):
shipfile = obot.SHIPFILE # File where all the shipping information is stored.
if ctx.message.author in ships: # Uses attempts to ship themself
await ctx.send((f"{ctx.message.author.name}, "
"I don't think you can ship yourself with someone"))
return
ships = remove_duplicates(ships)
if len(ships) < 2:
await ctx.send(f"{ctx.message.author.name}, mention at least two people in the message")
return
ships_names = [x.name for x in ships]
# Format the IDs into a format: 'id1:id2:id3...'.
# This format is needed as this is how ship information is stored in the shipfile.
# The list is sorted by ID for consistency between runs.
ships_format = ":".join(sorted([str(x.id) for x in ships], key=int))
try:
with open(shipfile, "rb") as opened_file:
# Open the shipfile and unpickle it. The returning format is a dictionary.
# -> {'id1:id2:id3...': count}
lines = pickle.loads(opened_file.read())
except FileNotFoundError:
lines = {}
with open(shipfile, 'w'):
await ctx.send("Created new ship file")
except pickle.UnpicklingError:
await ctx.send("Ship file is corrupt, cannot fetch data.")
return
occ = lines.get(ships_format, 0) # Times the target users have already been shipped.
times_message = "time" + ("" if occ == 1 else "s")
lines[ships_format] = occ + 1 # Increase count by one
with open(shipfile, 'wb') as opened_file: # Write the new data
pickle.dump(lines, opened_file)
shipname = ""
if len(ships) == 2: # If there are two names, we can make a shipname
# Request a shipname from pecan's shipname module™
final = improved_shipname.shipname(*ships_names)
shipname = "I shall call it \"**" + final + "**\""
await ctx.send((f"{ctx.message.author.name} totally ships {' and '.join(ships_names)}"
f"\nThey have been shipped {occ} {times_message} before"
f"\n{shipname}"))
@commands.command(name='hug', aliases=['\U0001f917'],
help="Give someone a hug!")
async def hug(self, ctx, *target_users):
"""Hug target user, and count how many times you have hugged people in total
TODO: Make hugs server-based
`target_users` are the users to hug (or just 1 user).
`target_users` can also be the argument "-top <num>", in which case the top <num> people
with the highest amount of hugs given will be returned.
"""
target_users = list(target_users)
con = lite.connect("important/data.db") # Database where hug data is stored
if target_users[0] == "-top": # If the first argument given is the flag -top...
try: # The second argument is how many people to fetch.
fetch_amount = int(target_users[1])
if fetch_amount < 0:
await ctx.send(f"That's less than zero, {ctx.author}.")
except ValueError:
await ctx.send(f"That's not an integer, {ctx.author}.")
return
except IndexError: # If an amount isn't given, default to 5
fetch_amount = 5
with con:
try:
# Order all entries by amount, descending, then get the first `fetch_amount`
cur = con.cursor()
cur.execute("SELECT * FROM Hug ORDER BY Hugs DESC")
rows = cur.fetchall()[:fetch_amount]
combine = f"```\nTOP {fetch_amount} HUGGERS:\n---------\n"
for row in rows:
# Convert the ID to an user.
target_user = ctx.bot.get_user(row[0])
if target_user is None: # No server shared with target.
try:
# NOTE: The function get_user_info() works regardless of the target
# sharing servers with the bot, however, it is terribly slow.
target_user = await ctx.bot.get_user_info(row[0])
except discord.NotFound: # User doesn't exist on Discord.
target_user = None # Give up and default to None.
combine += target_user.name if not None else row[0]
combine += " - " + str(row[1]) + "\n"
combine += "\n```"
except lite.OperationalError as err: # sql error...
if str(err) == "no such table: Hug": # No table exists...
# Create a new one and inform the user
cur.execute("CREATE TABLE Hug(id INT NOT NULL UNIQUE, Hugs INT);")
await ctx.send("No hug data was recorded, created file now.")
else: # If actual users are given.
targets = []
for i in target_users: # Go through all the targets...
try: # and try to convert them using HybridConverter...
converted_member = await cconv.HybridConverter().convert(ctx, i)
except commands.BadArgument: # but if that fails...
converted_member = "*" + i + "*" # default to the string that the user gave.
targets.append(converted_member)
targets = remove_duplicates(targets)
# If the list contains just the author or nobody
if [ctx.author] == targets or not targets:
await ctx.send(f"Who are you going to hug, {ctx.author.name}? Yourself?")
return
if ctx.author in targets: # Remove the user from the list of targets.
targets.remove(ctx.author)
with con:
try: # Get the data of the author from the database
cur = con.cursor()
cur.execute(
"SELECT COALESCE(Hugs, 0) FROM Hug WHERE id = ?", (ctx.author.id, ))
row = cur.fetchone()
hugs = 0 if row is None else row[0]
except lite.OperationalError as err:
if str(err) == "no such table: Hug":
cur.execute(
"CREATE TABLE Hug(id INT NOT NULL UNIQUE, Hugs INT);")
await ctx.send("Created new hugs database table.")
hugs = 0
times_message = "hug" + ("" if hugs == 1 else "s")
# Create a second list which is just a copy of the targets
mentions_without_bot = list(targets)
for user in mentions_without_bot[::1]:
# Need to iterate backwards to not jump over anything when removing.
if isinstance(user, str): # Get rid of everything that isn't an user.
mentions_without_bot.remove(user)
elif user.bot: # Get rid of bots.
mentions_without_bot.remove(user)
hugs += len(mentions_without_bot) # Increase the hug tally of the author.
# Update database.
cur.execute("INSERT OR IGNORE INTO Hug VALUES(?, ?)", (ctx.author.id, hugs))
cur.execute("UPDATE Hug SET Hugs=? WHERE id=?", (hugs, ctx.author.id))
if ctx.bot.user.id in [x.id for x in targets if not isinstance(x, str)]:
# If the bot itself is in the targets list.
if len(targets) > 1: # If other users are hugged alongside it.
# Join all other targets.
recievers_without_self = list(targets)
recievers_without_self.remove(ctx.bot.user)
recievers = " and ".join([x.name if not isinstance(
x, str) else x for x in recievers_without_self])
combine = (f"{ctx.author.name} gave {recievers} a hug, and I hug you back! "
f"\U0001f917 (+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
else: # Only the bot is hugged.
combine = (f"I hug you back, {ctx.author.name}! "
f"\U0001f917 (+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
elif targets:
# Join all targets.
recievers = " and ".join(
[x.name if not isinstance(x, str) else x for x in targets])
combine = (f"{ctx.author.name} gave {recievers} a hug! "
f"(+{len(mentions_without_bot)}; {hugs} "
f"{times_message} in total)")
else: # I don't know if this clause if ever executed but I'm too scared to remove it.
combine = (f"{ctx.author.name}, you've hit the else clause on line 381 of fun.py, "
f"please report it to someone.")
await ctx.send(combine)
@commands.command(name='pecan', aliases=['p'],
help="Random quote from pecan.")
async def pecan(self, ctx, *, input_text=None):
"""Get a random or certain line from the old IRC chat logs of pecan.
`input_text` is the integer code of the line to fetch. Lookup is 1-indexed.
`input_text` can also be left empty, in which case it defaults to None and just gives a
random line.
`input_text` can also be a string, in which case that string is searched for in the corpus,
and a random line containing that string is returned.
"""
try:
with open(obot.PECAN_CORPUS, "r") as opened_file:
data = opened_file.read().splitlines() # Get all the lines of the file
if input_text is None: # No argument given
num = random.choice(range(len(data))) # Get a random number.
quote = data[num] # Get the quote corresponding to that number
await ctx.send(f"{num + 1}: `{quote}`")
else: # An argument is given
try: # Test if is the number for a certain line
num = int(input_text)
num = num - 1
if num < 0:
await ctx.send("baka! number is negative!")
return
elif num == 0:
await ctx.send("baka! file is 1-indexed!")
return
quote = data[num]
except IndexError:
await ctx.send(f"baka! number is over {len(data)}!")
return
except ValueError: # Not an int
# Find all entries where target string is included.
if input_text.startswith('"') and input_text.endswith('"'):
input_text = input_text[1:-1]
found_entries = []
for j, i in enumerate(data):
if input_text.lower() in i.lower(): # case-insensitive
found_entries.append((j, i))
if not found_entries: # No entries found...
await ctx.send(f"{ctx.author.name}, nothing contains `{input_text}`")
return
response = random.choice(found_entries) # pick a random valid entry.
await ctx.send((f"`{input_text}` (total {len(found_entries)}) - "
f"{response[0]+1}: `{response[1]}`"))
# example -> `pecan` (total 40) - 1813: `I might meet the other pecan.`
except FileNotFoundError:
await ctx.send(f"{ctx.author.name}, no pecan corpus file is included or it is "
f"configured incorrectly. Download it at "
f"<http://97.107.129.215/pecan.txt>")
@commands.command(name='fortune', aliases=['f'],
help="Unix fortune.")
async def fortune(self, ctx):
"Return a random unix fortune line."
fortune_msg = subprocess.check_output("fortune").decode("utf-8")
fortune_msg = fortune_msg[:1988] + "\u2026" if len(fortune_msg) > 1990 else fortune_msg
await ctx.send("```\n" + fortune_msg + "\n```")
@shipname.error
async def shipname_error(self, ctx, error):
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send(f"{ctx.author.name}, please use two names as arguments")
@shipcount.error
@ship.error
async def ship_error(self, ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send(f"{ctx.author.name}, {error.args[0]}")
def setup(bot):
bot.add_cog(FunCog(bot))
|
maartenq/ansible
|
refs/heads/devel
|
lib/ansible/modules/cloud/vmware/vca_fw.py
|
104
|
#!/usr/bin/python
# Copyright: (c) 2015, VMware, Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vca_fw
short_description: add remove firewall rules in a gateway in a vca
description:
- Adds or removes firewall rules from a gateway in a vca environment
version_added: "2.0"
author:
- Peter Sprygada (@privateip)
options:
fw_rules:
description:
- A list of firewall rules to be added to the gateway, Please see examples on valid entries
required: True
default: false
extends_documentation_fragment: vca.documentation
'''
EXAMPLES = '''
#Add a set of firewall rules
- hosts: localhost
connection: local
tasks:
- vca_fw:
instance_id: 'b15ff1e5-1024-4f55-889f-ea0209726282'
vdc_name: 'benz_ansible'
state: 'absent'
fw_rules:
- description: "ben testing"
source_ip: "Any"
dest_ip: 192.0.2.23
- description: "ben testing 2"
source_ip: 192.0.2.50
source_port: "Any"
dest_port: "22"
dest_ip: 192.0.2.101
is_enable: "true"
enable_logging: "false"
protocol: "Tcp"
policy: "allow"
'''
try:
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import FirewallRuleType
from pyvcloud.schema.vcd.v1_5.schemas.vcloud.networkType import ProtocolsType
except ImportError:
# normally set a flag here but it will be caught when testing for
# the existence of pyvcloud (see module_utils/vca.py). This just
# protects against generating an exception at runtime
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vca import VcaError, vca_argument_spec, vca_login
VALID_PROTO = ['Tcp', 'Udp', 'Icmp', 'Other', 'Any']
VALID_RULE_KEYS = ['policy', 'is_enable', 'enable_logging', 'description',
'dest_ip', 'dest_port', 'source_ip', 'source_port',
'protocol']
def protocol_to_tuple(protocol):
return (protocol.get_Tcp(),
protocol.get_Udp(),
protocol.get_Icmp(),
protocol.get_Other(),
protocol.get_Any())
def protocol_to_string(protocol):
protocol = protocol_to_tuple(protocol)
if protocol[0] is True:
return 'Tcp'
elif protocol[1] is True:
return 'Udp'
elif protocol[2] is True:
return 'Icmp'
elif protocol[3] is True:
return 'Other'
elif protocol[4] is True:
return 'Any'
def protocol_to_type(protocol):
try:
protocols = ProtocolsType()
setattr(protocols, protocol, True)
return protocols
except AttributeError:
raise VcaError("The value in protocol is not valid")
def validate_fw_rules(fw_rules):
for rule in fw_rules:
for k in rule.keys():
if k not in VALID_RULE_KEYS:
raise VcaError("%s is not a valid key in fw rules, please "
"check above.." % k, valid_keys=VALID_RULE_KEYS)
rule['dest_port'] = str(rule.get('dest_port', 'Any')).lower()
rule['dest_ip'] = rule.get('dest_ip', 'Any').lower()
rule['source_port'] = str(rule.get('source_port', 'Any')).lower()
rule['source_ip'] = rule.get('source_ip', 'Any').lower()
rule['protocol'] = rule.get('protocol', 'Any').lower()
rule['policy'] = rule.get('policy', 'allow').lower()
rule['is_enable'] = rule.get('is_enable', True)
rule['enable_logging'] = rule.get('enable_logging', False)
rule['description'] = rule.get('description', 'rule added by Ansible')
return fw_rules
def fw_rules_to_dict(rules):
fw_rules = list()
for rule in rules:
fw_rules.append(
dict(
dest_port=rule.get_DestinationPortRange().lower(),
dest_ip=rule.get_DestinationIp().lower().lower(),
source_port=rule.get_SourcePortRange().lower(),
source_ip=rule.get_SourceIp().lower(),
protocol=protocol_to_string(rule.get_Protocols()).lower(),
policy=rule.get_Policy().lower(),
is_enable=rule.get_IsEnabled(),
enable_logging=rule.get_EnableLogging(),
description=rule.get_Description()
)
)
return fw_rules
def create_fw_rule(is_enable, description, policy, protocol, dest_port,
dest_ip, source_port, source_ip, enable_logging):
return FirewallRuleType(IsEnabled=is_enable,
Description=description,
Policy=policy,
Protocols=protocol_to_type(protocol),
DestinationPortRange=dest_port,
DestinationIp=dest_ip,
SourcePortRange=source_port,
SourceIp=source_ip,
EnableLogging=enable_logging)
def main():
argument_spec = vca_argument_spec()
argument_spec.update(
dict(
fw_rules=dict(required=True, type='list'),
gateway_name=dict(default='gateway'),
state=dict(default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec, supports_check_mode=True)
fw_rules = module.params.get('fw_rules')
gateway_name = module.params.get('gateway_name')
vdc_name = module.params['vdc_name']
vca = vca_login(module)
gateway = vca.get_gateway(vdc_name, gateway_name)
if not gateway:
module.fail_json(msg="Not able to find the gateway %s, please check "
"the gateway_name param" % gateway_name)
fwservice = gateway._getFirewallService()
rules = gateway.get_fw_rules()
current_rules = fw_rules_to_dict(rules)
try:
desired_rules = validate_fw_rules(fw_rules)
except VcaError as e:
module.fail_json(msg=e.message)
result = dict(changed=False)
result['current_rules'] = current_rules
result['desired_rules'] = desired_rules
updates = list()
additions = list()
deletions = list()
for (index, rule) in enumerate(desired_rules):
try:
if rule != current_rules[index]:
updates.append((index, rule))
except IndexError:
additions.append(rule)
eol = len(current_rules) - len(desired_rules)
if eol > 0:
for rule in current_rules[eol:]:
deletions.append(rule)
for rule in additions:
if not module.check_mode:
rule['protocol'] = rule['protocol'].capitalize()
gateway.add_fw_rule(**rule)
result['changed'] = True
for index, rule in updates:
if not module.check_mode:
rule = create_fw_rule(**rule)
fwservice.replace_FirewallRule_at(index, rule)
result['changed'] = True
keys = ['protocol', 'dest_port', 'dest_ip', 'source_port', 'source_ip']
for rule in deletions:
if not module.check_mode:
kwargs = dict([(k, v) for k, v in rule.items() if k in keys])
kwargs['protocol'] = protocol_to_string(kwargs['protocol'])
gateway.delete_fw_rule(**kwargs)
result['changed'] = True
if not module.check_mode and result['changed'] is True:
task = gateway.save_services_configuration()
if task:
vca.block_until_completed(task)
result['rules_updated'] = len(updates)
result['rules_added'] = len(additions)
result['rules_deleted'] = len(deletions)
return module.exit_json(**result)
if __name__ == '__main__':
main()
|
upsuper/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/service-workers/service-worker/resources/import-scripts-version.py
|
48
|
import datetime
import time
epoch = datetime.datetime(1970, 1, 1)
def main(req, res):
# Artificially delay response time in order to ensure uniqueness of
# computed value
time.sleep(0.1)
now = (datetime.datetime.now() - epoch).total_seconds()
return ([
('Cache-Control', 'no-cache, must-revalidate'),
('Pragma', 'no-cache'),
('Content-Type', 'application/javascript')],
'version = "%s";\n' % now)
|
cyberark-bizdev/ansible
|
refs/heads/devel
|
test/units/modules/monitoring/test_circonus_annotation.py
|
57
|
# -*- coding: utf-8 -*-
import io
import json
import re
import uuid
from urllib3.response import HTTPResponse
from ansible.compat.tests.mock import patch
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
from ansible.modules.monitoring import circonus_annotation
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase, set_module_args
class TestCirconusAnnotation(ModuleTestCase):
def setUp(self):
super(TestCirconusAnnotation, self).setUp()
self.module = circonus_annotation
def tearDown(self):
super(TestCirconusAnnotation, self).tearDown()
def test_without_required_parameters(self):
"""Failure must occurs when all parameters are missing"""
with self.assertRaises(AnsibleFailJson):
set_module_args({})
self.module.main()
def test_add_annotation(self):
"""Check that result is changed"""
set_module_args({
'category': 'test category',
'description': 'test description',
'title': 'test title',
'api_key': str(uuid.uuid4()),
})
cid = '/annotation/100000'
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
data = {
'_cid': cid,
'_created': 1502146995,
'_last_modified': 1502146995,
'_last_modified_by': '/user/1000',
'category': 'test category',
'description': 'test description',
'rel_metrics': [],
'start': 1502145480,
'stop': None,
'title': 'test title',
}
raw = to_bytes(json.dumps(data))
resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
resp.status = 200
resp.reason = 'OK'
resp.headers = {'X-Circonus-API-Version': '2.00'}
return self.build_response(request, resp)
with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
with self.assertRaises(AnsibleExitJson) as result:
self.module.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid)
self.assertEqual(send.call_count, 1)
def test_add_annotation_unicode(self):
"""Check that result is changed.
Note: it seems there is a bug which prevent to create an annotation
with a non-ASCII category if this category already exists, in such
case an Internal Server Error (500) occurs."""
set_module_args({
'category': 'new catégorÿ',
'description': 'test description',
'title': 'test title',
'api_key': str(uuid.uuid4()),
})
cid = '/annotation/100000'
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
data = {
'_cid': '/annotation/100000',
'_created': 1502236928,
'_last_modified': 1502236928,
'_last_modified_by': '/user/1000',
# use res['annotation']['category'].encode('latin1').decode('utf8')
'category': u'new cat\xc3\xa9gor\xc3\xbf',
'description': 'test description',
'rel_metrics': [],
'start': 1502236927,
'stop': 1502236927,
'title': 'test title',
}
raw = to_bytes(json.dumps(data), encoding='latin1')
resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
resp.status = 200
resp.reason = 'OK'
resp.headers = {'X-Circonus-API-Version': '2.00'}
return self.build_response(request, resp)
with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
with self.assertRaises(AnsibleExitJson) as result:
self.module.main()
self.assertTrue(result.exception.args[0]['changed'])
self.assertEqual(result.exception.args[0]['annotation']['_cid'], cid)
self.assertEqual(send.call_count, 1)
def test_auth_failure(self):
"""Check that an error is raised when authentication failed"""
set_module_args({
'category': 'test category',
'description': 'test description',
'title': 'test title',
'api_key': str(uuid.uuid4()),
})
cid = '/annotation/100000'
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
data = {
'_cid': cid,
'_created': 1502146995,
'_last_modified': 1502146995,
'_last_modified_by': '/user/1000',
'category': 'test category',
'description': 'test description',
'rel_metrics': [],
'start': 1502145480,
'stop': None,
'title': 'test title',
}
raw = to_bytes(json.dumps(data))
resp = HTTPResponse(body=io.BytesIO(raw), preload_content=False)
resp.status = 403
resp.reason = 'Forbidden'
resp.headers = {'X-Circonus-API-Version': '2.00'}
return self.build_response(request, resp)
with patch('requests.adapters.HTTPAdapter.send', autospec=True, side_effect=send) as send:
with self.assertRaises(AnsibleFailJson) as result:
self.module.main()
self.assertTrue(result.exception.args[0]['failed'])
self.assertTrue(re.match(r'\b403\b', result.exception.args[0]['reason']))
self.assertEqual(send.call_count, 1)
|
Orochimarufan/youtube-dl
|
refs/heads/master
|
youtube_dl/extractor/douyutv.py
|
51
|
# coding: utf-8
from __future__ import unicode_literals
import time
import hashlib
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
unescapeHTML,
unified_strdate,
urljoin,
)
class DouyuTVIE(InfoExtractor):
IE_DESC = '斗鱼'
_VALID_URL = r'https?://(?:www\.)?douyu(?:tv)?\.com/(?:[^/]+/)*(?P<id>[A-Za-z0-9]+)'
_TESTS = [{
'url': 'http://www.douyutv.com/iseven',
'info_dict': {
'id': '17732',
'display_id': 'iseven',
'ext': 'flv',
'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r're:.*m7show@163\.com.*',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '7师傅',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyutv.com/85982',
'info_dict': {
'id': '85982',
'display_id': '85982',
'ext': 'flv',
'title': 're:^小漠从零单排记!——CSOL2躲猫猫 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:746a2f7a253966a06755a912f0acc0d2',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': 'douyu小漠',
'is_live': True,
},
'params': {
'skip_download': True,
},
'skip': 'Room not found',
}, {
'url': 'http://www.douyutv.com/17732',
'info_dict': {
'id': '17732',
'display_id': '17732',
'ext': 'flv',
'title': 're:^清晨醒脑!根本停不下来! [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': r're:.*m7show@163\.com.*',
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '7师傅',
'is_live': True,
},
'params': {
'skip_download': True,
},
}, {
'url': 'http://www.douyu.com/xiaocang',
'only_matching': True,
}, {
# \"room_id\"
'url': 'http://www.douyu.com/t/lpl',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
if video_id.isdigit():
room_id = video_id
else:
page = self._download_webpage(url, video_id)
room_id = self._html_search_regex(
r'"room_id\\?"\s*:\s*(\d+),', page, 'room id')
# Grab metadata from mobile API
room = self._download_json(
'http://m.douyu.com/html5/live?roomId=%s' % room_id, video_id,
note='Downloading room info')['data']
# 1 = live, 2 = offline
if room.get('show_status') == '2':
raise ExtractorError('Live stream is offline', expected=True)
# Grab the URL from PC client API
# The m3u8 url from mobile API requires re-authentication every 5 minutes
tt = int(time.time())
signContent = 'lapi/live/thirdPart/getPlay/%s?aid=pcclient&rate=0&time=%d9TUk5fjjUjg9qIMH3sdnh' % (room_id, tt)
sign = hashlib.md5(signContent.encode('ascii')).hexdigest()
video_url = self._download_json(
'http://coapi.douyucdn.cn/lapi/live/thirdPart/getPlay/' + room_id,
video_id, note='Downloading video URL info',
query={'rate': 0}, headers={
'auth': sign,
'time': str(tt),
'aid': 'pcclient'
})['data']['live_url']
title = self._live_title(unescapeHTML(room['room_name']))
description = room.get('show_details')
thumbnail = room.get('room_src')
uploader = room.get('nickname')
return {
'id': room_id,
'display_id': video_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'is_live': True,
}
class DouyuShowIE(InfoExtractor):
_VALID_URL = r'https?://v(?:mobile)?\.douyu\.com/show/(?P<id>[0-9a-zA-Z]+)'
_TESTS = [{
'url': 'https://v.douyu.com/show/rjNBdvnVXNzvE2yw',
'md5': '0c2cfd068ee2afe657801269b2d86214',
'info_dict': {
'id': 'rjNBdvnVXNzvE2yw',
'ext': 'mp4',
'title': '陈一发儿:砒霜 我有个室友系列!04-01 22点场',
'duration': 7150.08,
'thumbnail': r're:^https?://.*\.jpg$',
'uploader': '陈一发儿',
'uploader_id': 'XrZwYelr5wbK',
'uploader_url': 'https://v.douyu.com/author/XrZwYelr5wbK',
'upload_date': '20170402',
},
}, {
'url': 'https://vmobile.douyu.com/show/rjNBdvnVXNzvE2yw',
'only_matching': True,
}]
def _real_extract(self, url):
url = url.replace('vmobile.', 'v.')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
room_info = self._parse_json(self._search_regex(
r'var\s+\$ROOM\s*=\s*({.+});', webpage, 'room info'), video_id)
video_info = None
for trial in range(5):
# Sometimes Douyu rejects our request. Let's try it more times
try:
video_info = self._download_json(
'https://vmobile.douyu.com/video/getInfo', video_id,
query={'vid': video_id},
headers={
'Referer': url,
'x-requested-with': 'XMLHttpRequest',
})
break
except ExtractorError:
self._sleep(1, video_id)
if not video_info:
raise ExtractorError('Can\'t fetch video info')
formats = self._extract_m3u8_formats(
video_info['data']['video_url'], video_id,
entry_protocol='m3u8_native', ext='mp4')
upload_date = unified_strdate(self._html_search_regex(
r'<em>上传时间:</em><span>([^<]+)</span>', webpage,
'upload date', fatal=False))
uploader = uploader_id = uploader_url = None
mobj = re.search(
r'(?m)<a[^>]+href="/author/([0-9a-zA-Z]+)".+?<strong[^>]+title="([^"]+)"',
webpage)
if mobj:
uploader_id, uploader = mobj.groups()
uploader_url = urljoin(url, '/author/' + uploader_id)
return {
'id': video_id,
'title': room_info['name'],
'formats': formats,
'duration': room_info.get('duration'),
'thumbnail': room_info.get('pic'),
'upload_date': upload_date,
'uploader': uploader,
'uploader_id': uploader_id,
'uploader_url': uploader_url,
}
|
adhoc-dev/purchase-workflow
|
refs/heads/8.0
|
purchase_requisition_bid_selection/tests/__init__.py
|
20
|
# -*- coding: utf-8 -*-
from . import test_cancel_purchase_requisition
from . import test_generate_po
from . import test_purchase_requisition_line
|
kerneltask/micropython
|
refs/heads/master
|
examples/bluetooth/ble_temperature_central.py
|
2
|
# This example finds and connects to a BLE temperature sensor (e.g. the one in ble_temperature.py).
import bluetooth
import random
import struct
import time
import micropython
from ble_advertising import decode_services, decode_name
from micropython import const
_IRQ_CENTRAL_CONNECT = const(1)
_IRQ_CENTRAL_DISCONNECT = const(2)
_IRQ_GATTS_WRITE = const(3)
_IRQ_GATTS_READ_REQUEST = const(4)
_IRQ_SCAN_RESULT = const(5)
_IRQ_SCAN_DONE = const(6)
_IRQ_PERIPHERAL_CONNECT = const(7)
_IRQ_PERIPHERAL_DISCONNECT = const(8)
_IRQ_GATTC_SERVICE_RESULT = const(9)
_IRQ_GATTC_SERVICE_DONE = const(10)
_IRQ_GATTC_CHARACTERISTIC_RESULT = const(11)
_IRQ_GATTC_CHARACTERISTIC_DONE = const(12)
_IRQ_GATTC_DESCRIPTOR_RESULT = const(13)
_IRQ_GATTC_DESCRIPTOR_DONE = const(14)
_IRQ_GATTC_READ_RESULT = const(15)
_IRQ_GATTC_READ_DONE = const(16)
_IRQ_GATTC_WRITE_DONE = const(17)
_IRQ_GATTC_NOTIFY = const(18)
_IRQ_GATTC_INDICATE = const(19)
_ADV_IND = const(0x00)
_ADV_DIRECT_IND = const(0x01)
_ADV_SCAN_IND = const(0x02)
_ADV_NONCONN_IND = const(0x03)
# org.bluetooth.service.environmental_sensing
_ENV_SENSE_UUID = bluetooth.UUID(0x181A)
# org.bluetooth.characteristic.temperature
_TEMP_UUID = bluetooth.UUID(0x2A6E)
_TEMP_CHAR = (
_TEMP_UUID,
bluetooth.FLAG_READ | bluetooth.FLAG_NOTIFY,
)
_ENV_SENSE_SERVICE = (
_ENV_SENSE_UUID,
(_TEMP_CHAR,),
)
# org.bluetooth.characteristic.gap.appearance.xml
_ADV_APPEARANCE_GENERIC_THERMOMETER = const(768)
class BLETemperatureCentral:
def __init__(self, ble):
self._ble = ble
self._ble.active(True)
self._ble.irq(handler=self._irq)
self._reset()
def _reset(self):
# Cached name and address from a successful scan.
self._name = None
self._addr_type = None
self._addr = None
# Cached value (if we have one)
self._value = None
# Callbacks for completion of various operations.
# These reset back to None after being invoked.
self._scan_callback = None
self._conn_callback = None
self._read_callback = None
# Persistent callback for when new data is notified from the device.
self._notify_callback = None
# Connected device.
self._conn_handle = None
self._start_handle = None
self._end_handle = None
self._value_handle = None
def _irq(self, event, data):
if event == _IRQ_SCAN_RESULT:
addr_type, addr, adv_type, rssi, adv_data = data
if adv_type in (_ADV_IND, _ADV_DIRECT_IND,) and _ENV_SENSE_UUID in decode_services(
adv_data
):
# Found a potential device, remember it and stop scanning.
self._addr_type = addr_type
self._addr = bytes(
addr
) # Note: addr buffer is owned by caller so need to copy it.
self._name = decode_name(adv_data) or "?"
self._ble.gap_scan(None)
elif event == _IRQ_SCAN_DONE:
if self._scan_callback:
if self._addr:
# Found a device during the scan (and the scan was explicitly stopped).
self._scan_callback(self._addr_type, self._addr, self._name)
self._scan_callback = None
else:
# Scan timed out.
self._scan_callback(None, None, None)
elif event == _IRQ_PERIPHERAL_CONNECT:
# Connect successful.
conn_handle, addr_type, addr, = data
if addr_type == self._addr_type and addr == self._addr:
self._conn_handle = conn_handle
self._ble.gattc_discover_services(self._conn_handle)
elif event == _IRQ_PERIPHERAL_DISCONNECT:
# Disconnect (either initiated by us or the remote end).
conn_handle, _, _, = data
if conn_handle == self._conn_handle:
# If it was initiated by us, it'll already be reset.
self._reset()
elif event == _IRQ_GATTC_SERVICE_RESULT:
# Connected device returned a service.
conn_handle, start_handle, end_handle, uuid = data
if conn_handle == self._conn_handle and uuid == _ENV_SENSE_UUID:
self._start_handle, self._end_handle = start_handle, end_handle
elif event == _IRQ_GATTC_SERVICE_DONE:
# Service query complete.
if self._start_handle and self._end_handle:
self._ble.gattc_discover_characteristics(
self._conn_handle, self._start_handle, self._end_handle
)
else:
print("Failed to find environmental sensing service.")
elif event == _IRQ_GATTC_CHARACTERISTIC_RESULT:
# Connected device returned a characteristic.
conn_handle, def_handle, value_handle, properties, uuid = data
if conn_handle == self._conn_handle and uuid == _TEMP_UUID:
self._value_handle = value_handle
elif event == _IRQ_GATTC_CHARACTERISTIC_DONE:
# Characteristic query complete.
if self._value_handle:
# We've finished connecting and discovering device, fire the connect callback.
if self._conn_callback:
self._conn_callback()
else:
print("Failed to find temperature characteristic.")
elif event == _IRQ_GATTC_READ_RESULT:
# A read completed successfully.
conn_handle, value_handle, char_data = data
if conn_handle == self._conn_handle and value_handle == self._value_handle:
self._update_value(char_data)
if self._read_callback:
self._read_callback(self._value)
self._read_callback = None
elif event == _IRQ_GATTC_READ_DONE:
# Read completed (no-op).
conn_handle, value_handle, status = data
elif event == _IRQ_GATTC_NOTIFY:
# The ble_temperature.py demo periodically notifies its value.
conn_handle, value_handle, notify_data = data
if conn_handle == self._conn_handle and value_handle == self._value_handle:
self._update_value(notify_data)
if self._notify_callback:
self._notify_callback(self._value)
# Returns true if we've successfully connected and discovered characteristics.
def is_connected(self):
return self._conn_handle is not None and self._value_handle is not None
# Find a device advertising the environmental sensor service.
def scan(self, callback=None):
self._addr_type = None
self._addr = None
self._scan_callback = callback
self._ble.gap_scan(2000, 30000, 30000)
# Connect to the specified device (otherwise use cached address from a scan).
def connect(self, addr_type=None, addr=None, callback=None):
self._addr_type = addr_type or self._addr_type
self._addr = addr or self._addr
self._conn_callback = callback
if self._addr_type is None or self._addr is None:
return False
self._ble.gap_connect(self._addr_type, self._addr)
return True
# Disconnect from current device.
def disconnect(self):
if not self._conn_handle:
return
self._ble.gap_disconnect(self._conn_handle)
self._reset()
# Issues an (asynchronous) read, will invoke callback with data.
def read(self, callback):
if not self.is_connected():
return
self._read_callback = callback
self._ble.gattc_read(self._conn_handle, self._value_handle)
# Sets a callback to be invoked when the device notifies us.
def on_notify(self, callback):
self._notify_callback = callback
def _update_value(self, data):
# Data is sint16 in degrees Celsius with a resolution of 0.01 degrees Celsius.
self._value = struct.unpack("<h", data)[0] / 100
return self._value
def value(self):
return self._value
def demo():
ble = bluetooth.BLE()
central = BLETemperatureCentral(ble)
not_found = False
def on_scan(addr_type, addr, name):
if addr_type is not None:
print("Found sensor:", addr_type, addr, name)
central.connect()
else:
nonlocal not_found
not_found = True
print("No sensor found.")
central.scan(callback=on_scan)
# Wait for connection...
while not central.is_connected():
time.sleep_ms(100)
if not_found:
return
print("Connected")
# Explicitly issue reads, using "print" as the callback.
while central.is_connected():
central.read(callback=print)
time.sleep_ms(2000)
# Alternative to the above, just show the most recently notified value.
# while central.is_connected():
# print(central.value())
# time.sleep_ms(2000)
print("Disconnected")
if __name__ == "__main__":
demo()
|
ORTI3D/ORTI3D_code
|
refs/heads/master
|
iliblast/Opgeo.py
|
1
|
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 26 21:52:19 2015
@author: olive
"""
from geometry import *
import os
class Opgeo:
def __init__(self, core):
self.core = core
def buildMesh(self,nlay=1):
if self.core.dicval['OpgeoFlow']['domn.1'][0]==0:
nx,ny,xv,yv = getXYvects(self.core)
self.nel = nx*ny
self.nnod = (nx+1)*(ny+1)
return None # rectangular
dct = self.core.diczone['OpgeoFlow'].dic
dicD = dct['domn.4']
if dct.has_key('flow.5'): dicK = dct['flow.5'] # K hydraul
else : dicK = {'name':[]}
s = gmeshString(dicD,dicK)
os.chdir(self.core.fileDir)
f1 = open('gm_in.txt','w');f1.write(s);f1.close()
bindir = self.core.baseDir+os.sep+'bin'+os.sep
os.system(bindir+'gmsh gm_in.txt -2 -o gm_out.msh')
#os.chdir(self.core.fileDir)
f1 = open('gm_out.msh','r');s = f1.read();f1.close()
nodes,elements = readGmshOut(s)
self.nodes,self.nnod = nodes,len(nodes)
s1 = self.arr2string(nodes)
self.nodestring = s1.replace('. ',' ')
nel,nc = shape(elements)
elements[:,0]=arange(nel)
elements[:,1]=elements[:,2] # this is the material number, which starts from 1 in gmsh and 0 in opgeo
self.nel = nel
self.elements = elements
elements[:,2] = -100 # before the nodes number
s = self.arr2string(elements)
s = s.replace('-100',' -1 tri')
self.elementstring = s
self = createTriangul(self)
return
def getCenters(self):
if self.core.dicval['OpgeoFlow']['domn.1'][0]==0:
return getXYmeshCenters(self.core,'Z',0)
else :
return self.elcenters
def getNumber(self):
return self.nel
def arr2string(self,arr):
s=''
nr,nc = shape(arr)
for i in range(nr):
s += str(int(arr[i,0]))+' '
for j in range(1,nc):
s += str(arr[i,j])+' '
s += '\n'
return s
|
dtaht/ns-3-dev
|
refs/heads/master
|
src/core/examples/sample-rng-plot.py
|
188
|
# -*- Mode:Python; -*-
# /*
# * This program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License version 2 as
# * published by the Free Software Foundation
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# */
# Demonstrate use of ns-3 as a random number generator integrated with
# plotting tools; adapted from Gustavo Carneiro's ns-3 tutorial
import numpy as np
import matplotlib.pyplot as plt
import ns.core
# mu, var = 100, 225
rng = ns.core.NormalVariable(100.0, 225.0)
x = [rng.GetValue() for t in range(10000)]
# the histogram of the data
n, bins, patches = plt.hist(x, 50, normed=1, facecolor='g', alpha=0.75)
plt.title('ns-3 histogram')
plt.text(60, .025, r'$\mu=100,\ \sigma=15$')
plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()
|
chaosmaker/pyload
|
refs/heads/stable
|
module/lib/thrift/server/TNonblockingServer.py
|
83
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Implementation of non-blocking server.
The main idea of the server is reciving and sending requests
only from main thread.
It also makes thread pool server in tasks terms, not connections.
"""
import threading
import socket
import Queue
import select
import struct
import logging
from thrift.transport import TTransport
from thrift.protocol.TBinaryProtocol import TBinaryProtocolFactory
__all__ = ['TNonblockingServer']
class Worker(threading.Thread):
"""Worker is a small helper to process incoming connection."""
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Process queries from task queue, stop if processor is None."""
while True:
try:
processor, iprot, oprot, otrans, callback = self.queue.get()
if processor is None:
break
processor.process(iprot, oprot)
callback(True, otrans.getvalue())
except Exception:
logging.exception("Exception while processing request")
callback(False, '')
WAIT_LEN = 0
WAIT_MESSAGE = 1
WAIT_PROCESS = 2
SEND_ANSWER = 3
CLOSED = 4
def locked(func):
"Decorator which locks self.lock."
def nested(self, *args, **kwargs):
self.lock.acquire()
try:
return func(self, *args, **kwargs)
finally:
self.lock.release()
return nested
def socket_exception(func):
"Decorator close object on socket.error."
def read(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
except socket.error:
self.close()
return read
class Connection:
"""Basic class is represented connection.
It can be in state:
WAIT_LEN --- connection is reading request len.
WAIT_MESSAGE --- connection is reading request.
WAIT_PROCESS --- connection has just read whole request and
waits for call ready routine.
SEND_ANSWER --- connection is sending answer string (including length
of answer).
CLOSED --- socket was closed and connection should be deleted.
"""
def __init__(self, new_socket, wake_up):
self.socket = new_socket
self.socket.setblocking(False)
self.status = WAIT_LEN
self.len = 0
self.message = ''
self.lock = threading.Lock()
self.wake_up = wake_up
def _read_len(self):
"""Reads length of request.
It's really paranoic routine and it may be replaced by
self.socket.recv(4)."""
read = self.socket.recv(4 - len(self.message))
if len(read) == 0:
# if we read 0 bytes and self.message is empty, it means client close
# connection
if len(self.message) != 0:
logging.error("can't read frame size from socket")
self.close()
return
self.message += read
if len(self.message) == 4:
self.len, = struct.unpack('!i', self.message)
if self.len < 0:
logging.error("negative frame size, it seems client"\
" doesn't use FramedTransport")
self.close()
elif self.len == 0:
logging.error("empty frame, it's really strange")
self.close()
else:
self.message = ''
self.status = WAIT_MESSAGE
@socket_exception
def read(self):
"""Reads data from stream and switch state."""
assert self.status in (WAIT_LEN, WAIT_MESSAGE)
if self.status == WAIT_LEN:
self._read_len()
# go back to the main loop here for simplicity instead of
# falling through, even though there is a good chance that
# the message is already available
elif self.status == WAIT_MESSAGE:
read = self.socket.recv(self.len - len(self.message))
if len(read) == 0:
logging.error("can't read frame from socket (get %d of %d bytes)" %
(len(self.message), self.len))
self.close()
return
self.message += read
if len(self.message) == self.len:
self.status = WAIT_PROCESS
@socket_exception
def write(self):
"""Writes data from socket and switch state."""
assert self.status == SEND_ANSWER
sent = self.socket.send(self.message)
if sent == len(self.message):
self.status = WAIT_LEN
self.message = ''
self.len = 0
else:
self.message = self.message[sent:]
@locked
def ready(self, all_ok, message):
"""Callback function for switching state and waking up main thread.
This function is the only function witch can be called asynchronous.
The ready can switch Connection to three states:
WAIT_LEN if request was oneway.
SEND_ANSWER if request was processed in normal way.
CLOSED if request throws unexpected exception.
The one wakes up main thread.
"""
assert self.status == WAIT_PROCESS
if not all_ok:
self.close()
self.wake_up()
return
self.len = ''
if len(message) == 0:
# it was a oneway request, do not write answer
self.message = ''
self.status = WAIT_LEN
else:
self.message = struct.pack('!i', len(message)) + message
self.status = SEND_ANSWER
self.wake_up()
@locked
def is_writeable(self):
"Returns True if connection should be added to write list of select."
return self.status == SEND_ANSWER
# it's not necessary, but...
@locked
def is_readable(self):
"Returns True if connection should be added to read list of select."
return self.status in (WAIT_LEN, WAIT_MESSAGE)
@locked
def is_closed(self):
"Returns True if connection is closed."
return self.status == CLOSED
def fileno(self):
"Returns the file descriptor of the associated socket."
return self.socket.fileno()
def close(self):
"Closes connection"
self.status = CLOSED
self.socket.close()
class TNonblockingServer:
"""Non-blocking server."""
def __init__(self, processor, lsocket, inputProtocolFactory=None,
outputProtocolFactory=None, threads=10):
self.processor = processor
self.socket = lsocket
self.in_protocol = inputProtocolFactory or TBinaryProtocolFactory()
self.out_protocol = outputProtocolFactory or self.in_protocol
self.threads = int(threads)
self.clients = {}
self.tasks = Queue.Queue()
self._read, self._write = socket.socketpair()
self.prepared = False
def setNumThreads(self, num):
"""Set the number of worker threads that should be created."""
# implement ThreadPool interface
assert not self.prepared, "You can't change number of threads for working server"
self.threads = num
def prepare(self):
"""Prepares server for serve requests."""
self.socket.listen()
for _ in xrange(self.threads):
thread = Worker(self.tasks)
thread.setDaemon(True)
thread.start()
self.prepared = True
def wake_up(self):
"""Wake up main thread.
The server usualy waits in select call in we should terminate one.
The simplest way is using socketpair.
Select always wait to read from the first socket of socketpair.
In this case, we can just write anything to the second socket from
socketpair."""
self._write.send('1')
def _select(self):
"""Does select on open connections."""
readable = [self.socket.handle.fileno(), self._read.fileno()]
writable = []
for i, connection in self.clients.items():
if connection.is_readable():
readable.append(connection.fileno())
if connection.is_writeable():
writable.append(connection.fileno())
if connection.is_closed():
del self.clients[i]
return select.select(readable, writable, readable)
def handle(self):
"""Handle requests.
WARNING! You must call prepare BEFORE calling handle.
"""
assert self.prepared, "You have to call prepare before handle"
rset, wset, xset = self._select()
for readable in rset:
if readable == self._read.fileno():
# don't care i just need to clean readable flag
self._read.recv(1024)
elif readable == self.socket.handle.fileno():
client = self.socket.accept().handle
self.clients[client.fileno()] = Connection(client, self.wake_up)
else:
connection = self.clients[readable]
connection.read()
if connection.status == WAIT_PROCESS:
itransport = TTransport.TMemoryBuffer(connection.message)
otransport = TTransport.TMemoryBuffer()
iprot = self.in_protocol.getProtocol(itransport)
oprot = self.out_protocol.getProtocol(otransport)
self.tasks.put([self.processor, iprot, oprot,
otransport, connection.ready])
for writeable in wset:
self.clients[writeable].write()
for oob in xset:
self.clients[oob].close()
del self.clients[oob]
def close(self):
"""Closes the server."""
for _ in xrange(self.threads):
self.tasks.put([None, None, None, None, None])
self.socket.close()
self.prepared = False
def serve(self):
"""Serve forever."""
self.prepare()
while True:
self.handle()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.